diff --git a/app/artifact-cas/api/buf.gen.yaml b/app/artifact-cas/api/buf.gen.yaml index a232f9fa7..335d8c631 100644 --- a/app/artifact-cas/api/buf.gen.yaml +++ b/app/artifact-cas/api/buf.gen.yaml @@ -1,15 +1,14 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative - - name: go-errors + - local: protoc-gen-go-errors out: . opt: paths=source_relative - - name: go-grpc + - local: protoc-gen-go-grpc out: . - opt: - - paths=source_relative - - name: go-http + opt: paths=source_relative + - local: protoc-gen-go-http out: . opt: paths=source_relative diff --git a/app/artifact-cas/api/buf.lock b/app/artifact-cas/api/buf.lock deleted file mode 100644 index 4a5f100ea..000000000 --- a/app/artifact-cas/api/buf.lock +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: b983156c5e994cc9892e0ce3e64e17e0 - digest: shake256:fb47a62989d38c2529bcc5cd86ded43d800eb84cee82b42b9e8a9e815d4ee8134a0fb9d0ce8299b27c2d2bbb7d6ade0c4ad5a8a4d467e1e2c7ca619ae9f634e2 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 4ed3bc159a8b4ac68fe253218760d035 - digest: shake256:7149cf5e9955c692d381e557830555d4e93f205a0f1b8e2dfdae46d029369aa3fc1980e35df0d310f7cc3b622f93e19ad276769a283a967dd3065ddfd3a40e13 - - remote: buf.build - owner: grpc-ecosystem - repository: grpc-gateway - commit: 4c5ba75caaf84e928b7137ae5c18c26a - digest: shake256:e174ad9408f3e608f6157907153ffec8d310783ee354f821f57178ffbeeb8faa6bb70b41b61099c1783c82fe16210ebd1279bc9c9ee6da5cffba9f0e675b8b99 diff --git a/app/artifact-cas/api/buf.yaml b/app/artifact-cas/api/buf.yaml deleted file mode 100644 index 590e81281..000000000 --- a/app/artifact-cas/api/buf.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 -lint: - use: - - STANDARD diff --git a/app/artifact-cas/cmd/main.go b/app/artifact-cas/cmd/main.go index dd63e5245..5483653d9 100644 --- a/app/artifact-cas/cmd/main.go +++ b/app/artifact-cas/cmd/main.go @@ -20,7 +20,7 @@ import ( "os" "time" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" "github.com/getsentry/sentry-go" "github.com/chainloop-dev/chainloop/app/artifact-cas/internal/conf" @@ -145,7 +145,7 @@ func main() { } } -func newProtoValidator() (*protovalidate.Validator, error) { +func newProtoValidator() (protovalidate.Validator, error) { return protovalidate.New() } diff --git a/app/artifact-cas/internal/conf/buf.gen.yaml b/app/artifact-cas/internal/conf/buf.gen.yaml index 404d2d082..7b15ad0f9 100644 --- a/app/artifact-cas/internal/conf/buf.gen.yaml +++ b/app/artifact-cas/internal/conf/buf.gen.yaml @@ -1,5 +1,5 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative diff --git a/app/artifact-cas/internal/conf/buf.lock b/app/artifact-cas/internal/conf/buf.lock deleted file mode 100644 index ead3743a2..000000000 --- a/app/artifact-cas/internal/conf/buf.lock +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: b983156c5e994cc9892e0ce3e64e17e0 - digest: shake256:fb47a62989d38c2529bcc5cd86ded43d800eb84cee82b42b9e8a9e815d4ee8134a0fb9d0ce8299b27c2d2bbb7d6ade0c4ad5a8a4d467e1e2c7ca619ae9f634e2 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 7a6bc1e3207144b38e9066861e1de0ff diff --git a/app/artifact-cas/internal/conf/buf.yaml b/app/artifact-cas/internal/conf/buf.yaml deleted file mode 100644 index 19b961ff8..000000000 --- a/app/artifact-cas/internal/conf/buf.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 -lint: - use: - - STANDARD - ignore_only: - PACKAGE_DEFINED: - - ./conf.proto diff --git a/app/artifact-cas/internal/server/grpc.go b/app/artifact-cas/internal/server/grpc.go index 48f5dcee3..78d314bfd 100644 --- a/app/artifact-cas/internal/server/grpc.go +++ b/app/artifact-cas/internal/server/grpc.go @@ -35,7 +35,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "google.golang.org/genproto/googleapis/bytestream" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" "github.com/go-kratos/kratos/v2/log" "github.com/go-kratos/kratos/v2/middleware/logging" "github.com/go-kratos/kratos/v2/middleware/recovery" @@ -49,7 +49,7 @@ import ( ) // NewGRPCServer new a gRPC server. -func NewGRPCServer(c *conf.Server, authConf *conf.Auth, byteService *service.ByteStreamService, rSvc *service.ResourceService, providers backend.Providers, validator *protovalidate.Validator, logger log.Logger) (*grpc.Server, error) { +func NewGRPCServer(c *conf.Server, authConf *conf.Auth, byteService *service.ByteStreamService, rSvc *service.ResourceService, providers backend.Providers, validator protovalidate.Validator, logger log.Logger) (*grpc.Server, error) { log := log.NewHelper(logger) // Load the key on initialization instead of on every request // TODO: implement jwks endpoint diff --git a/app/cli/pkg/action/attestation_verify.go b/app/cli/pkg/action/attestation_verify.go index f172f1a7e..b7c5feb06 100644 --- a/app/cli/pkg/action/attestation_verify.go +++ b/app/cli/pkg/action/attestation_verify.go @@ -22,7 +22,7 @@ import ( pb "github.com/chainloop-dev/chainloop/app/controlplane/api/controlplane/v1" "github.com/chainloop-dev/chainloop/pkg/attestation/verifier" - "github.com/sigstore/cosign/v2/pkg/blob" + "github.com/sigstore/cosign/v3/pkg/blob" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) diff --git a/app/cli/pkg/action/workflow_run_describe.go b/app/cli/pkg/action/workflow_run_describe.go index 7b5c9ff2b..630e3e2b2 100644 --- a/app/cli/pkg/action/workflow_run_describe.go +++ b/app/cli/pkg/action/workflow_run_describe.go @@ -29,9 +29,9 @@ import ( "github.com/chainloop-dev/chainloop/pkg/attestation/verifier" intoto "github.com/in-toto/attestation/go/v1" "github.com/secure-systems-lab/go-securesystemslib/dsse" - "github.com/sigstore/cosign/v2/pkg/blob" - "github.com/sigstore/cosign/v2/pkg/cosign" - sigs "github.com/sigstore/cosign/v2/pkg/signature" + "github.com/sigstore/cosign/v3/pkg/blob" + "github.com/sigstore/cosign/v3/pkg/cosign" + sigs "github.com/sigstore/cosign/v3/pkg/signature" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" sigdsee "github.com/sigstore/sigstore/pkg/signature/dsse" diff --git a/app/controlplane/api/buf.gen.yaml b/app/controlplane/api/buf.gen.yaml index e00f01394..305fce580 100644 --- a/app/controlplane/api/buf.gen.yaml +++ b/app/controlplane/api/buf.gen.yaml @@ -1,27 +1,26 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative - - name: go-errors + - local: protoc-gen-go-errors out: . opt: paths=source_relative - - name: go-grpc + - local: protoc-gen-go-grpc out: . - opt: - - paths=source_relative - - name: go-http + opt: paths=source_relative + - local: protoc-gen-go-http out: . opt: paths=source_relative - - plugin: buf.build/community/stephenh-ts-proto:v1.151.1 + - remote: buf.build/community/stephenh-ts-proto:v1.151.1 out: ./gen/frontend opt: - - outputClientImpl=grpc-web # client implementation it generates - - esModuleInterop=true # use imports as required in modern ts setups - - useOptionals=messages # use optional TypeScript properties instead of undefined - - plugin: buf.build/bufbuild/protoschema-jsonschema:v0.2.0 + - outputClientImpl=grpc-web + - esModuleInterop=true + - useOptionals=messages + - remote: buf.build/bufbuild/protoschema-jsonschema:v0.2.0 out: ./gen/jsonschema - - plugin: buf.build/grpc-ecosystem/openapiv2:v2.26.3 + - remote: buf.build/grpc-ecosystem/openapiv2:v2.26.3 out: gen/temp-openapi opt: - allow_merge=true diff --git a/app/controlplane/api/buf.lock b/app/controlplane/api/buf.lock deleted file mode 100644 index 235dcb4aa..000000000 --- a/app/controlplane/api/buf.lock +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: b983156c5e994cc9892e0ce3e64e17e0 - digest: shake256:436ce453801917c11bc7b21d66bcfae87da2aceb804a041487be1e51dc9fbc219e61ea6a552db7a7aa6d63bb5efd0f3ed5fe3d4c42d4f750d0eb35f14144e3b6 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 4ed3bc159a8b4ac68fe253218760d035 - digest: shake256:7149cf5e9955c692d381e557830555d4e93f205a0f1b8e2dfdae46d029369aa3fc1980e35df0d310f7cc3b622f93e19ad276769a283a967dd3065ddfd3a40e13 - - remote: buf.build - owner: grpc-ecosystem - repository: grpc-gateway - commit: 4c5ba75caaf84e928b7137ae5c18c26a - digest: shake256:e174ad9408f3e608f6157907153ffec8d310783ee354f821f57178ffbeeb8faa6bb70b41b61099c1783c82fe16210ebd1279bc9c9ee6da5cffba9f0e675b8b99 - - remote: buf.build - owner: kratos-go - repository: kratos - commit: e1d52e944e3845c6862a566db322432d - digest: shake256:f7a0c398ccbb951aa222af7f1d822bfc5a9978fbaef040679e7d85c98568198c88e0727f6d2837a3e0fbb853d3ece0f9c3c8e92a90709e05a3bf69144137b48e diff --git a/app/controlplane/api/buf.yaml b/app/controlplane/api/buf.yaml deleted file mode 100644 index a23776e07..000000000 --- a/app/controlplane/api/buf.yaml +++ /dev/null @@ -1,19 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 - - buf.build/kratos-go/kratos:e1d52e944e3845c6862a566db322432d - - buf.build/grpc-ecosystem/grpc-gateway:v2.26.3 -lint: - use: - - STANDARD - ignore_only: - ENUM_ZERO_VALUE_SUFFIX: - - controlplane/v1/pagination.proto - ENUM_VALUE_PREFIX: - # We want to keep these enums human friendly - - workflowcontract/v1/crafting_schema.proto - allow_comment_ignores: true diff --git a/app/controlplane/api/controlplane/v1/cas_credentials.pb.go b/app/controlplane/api/controlplane/v1/cas_credentials.pb.go index ce6ac84d2..200629ee1 100644 --- a/app/controlplane/api/controlplane/v1/cas_credentials.pb.go +++ b/app/controlplane/api/controlplane/v1/cas_credentials.pb.go @@ -260,8 +260,8 @@ var file_controlplane_v1_cas_credentials_proto_rawDesc = []byte{ 0x32, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x0a, 0xba, 0x48, 0x07, 0x82, 0x01, 0x04, 0x1a, - 0x02, 0x01, 0x02, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, + 0x73, 0x74, 0x2e, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x0a, 0xba, 0x48, 0x07, 0x82, 0x01, 0x04, 0x18, + 0x01, 0x18, 0x02, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x44, 0x0a, 0x04, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, diff --git a/app/controlplane/api/controlplane/v1/group.pb.go b/app/controlplane/api/controlplane/v1/group.pb.go index 770054283..b840c1e7c 100644 --- a/app/controlplane/api/controlplane/v1/group.pb.go +++ b/app/controlplane/api/controlplane/v1/group.pb.go @@ -1674,10 +1674,10 @@ var file_controlplane_v1_group_proto_rawDesc = []byte{ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xd0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x07, 0x6e, 0x65, + 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xd8, 0x01, 0x01, 0x48, 0x00, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xd0, 0x01, 0x01, 0x48, 0x01, 0x52, 0x0e, 0x6e, 0x65, 0x77, + 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xd8, 0x01, 0x01, 0x48, 0x01, 0x52, 0x0e, 0x6e, 0x65, 0x77, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, @@ -1714,10 +1714,10 @@ var file_controlplane_v1_group_proto_rawDesc = []byte{ 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x0b, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x06, 0xba, 0x48, 0x03, 0xd0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x69, 0x6e, 0x74, + 0x06, 0xba, 0x48, 0x03, 0xd8, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x06, 0xba, 0x48, 0x03, 0xd0, 0x01, 0x01, 0x48, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x06, 0xba, 0x48, 0x03, 0xd8, 0x01, 0x01, 0x48, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, diff --git a/app/controlplane/api/controlplane/v1/group.proto b/app/controlplane/api/controlplane/v1/group.proto index 74215d7cd..3a922b5dd 100644 --- a/app/controlplane/api/controlplane/v1/group.proto +++ b/app/controlplane/api/controlplane/v1/group.proto @@ -103,9 +103,9 @@ message GroupServiceUpdateRequest { IdentityReference group_reference = 1 [(buf.validate.field).required = true]; // New name for the group (if provided) - optional string new_name = 3 [(buf.validate.field).ignore_empty = true]; + optional string new_name = 3 [(buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE]; // New description for the group (if provided) - optional string new_description = 4 [(buf.validate.field).ignore_empty = true]; + optional string new_description = 4 [(buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE]; } // GroupServiceUpdateResponse contains the updated group information @@ -135,9 +135,9 @@ message GroupServiceListMembersRequest { // IdentityReference is used to specify the group by either its ID or name IdentityReference group_reference = 1 [(buf.validate.field).required = true]; // Optional filter to search only by maintainers or not - optional bool maintainers = 3 [(buf.validate.field).ignore_empty = true]; + optional bool maintainers = 3 [(buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE]; // Optional filter to search by member email address - optional string member_email = 4 [(buf.validate.field).ignore_empty = true]; + optional string member_email = 4 [(buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE]; // Pagination parameters to limit and offset results OffsetPaginationRequest pagination = 5; } diff --git a/app/controlplane/api/controlplane/v1/integrations.pb.go b/app/controlplane/api/controlplane/v1/integrations.pb.go index 418d73ed2..b6aa78321 100644 --- a/app/controlplane/api/controlplane/v1/integrations.pb.go +++ b/app/controlplane/api/controlplane/v1/integrations.pb.go @@ -1298,7 +1298,7 @@ var file_controlplane_v1_integrations_proto_rawDesc = []byte{ 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, - 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd0, 0x01, 0x01, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd8, 0x01, 0x01, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, diff --git a/app/controlplane/api/controlplane/v1/integrations.proto b/app/controlplane/api/controlplane/v1/integrations.proto index 7bf17b59f..07fdb3a12 100644 --- a/app/controlplane/api/controlplane/v1/integrations.proto +++ b/app/controlplane/api/controlplane/v1/integrations.proto @@ -144,7 +144,7 @@ message IntegrationsServiceDetachResponse {} message ListAttachmentsRequest { // Filter by workflow string workflow_name = 1 [(buf.validate.field) = { - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE cel: { message: "must contain only lowercase letters, numbers, and hyphens." expression: "this.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')" diff --git a/app/controlplane/api/controlplane/v1/org_metrics.pb.go b/app/controlplane/api/controlplane/v1/org_metrics.pb.go index 90d31a110..b98f9ad87 100644 --- a/app/controlplane/api/controlplane/v1/org_metrics.pb.go +++ b/app/controlplane/api/controlplane/v1/org_metrics.pb.go @@ -689,141 +689,141 @@ var file_controlplane_v1_org_metrics_proto_rawDesc = []byte{ 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, 0x01, 0x0a, 0x15, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x01, 0x0a, 0x15, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, - 0x4e, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x02, + 0x4d, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x54, 0x69, - 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, 0x09, 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, - 0x22, 0x01, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, - 0x0e, 0x0a, 0x0c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x22, - 0xc5, 0x01, 0x0a, 0x16, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x69, - 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x44, 0x61, 0x79, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x5f, 0x0a, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, - 0x79, 0x44, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x07, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x70, 0x0a, 0x1e, 0x4f, 0x72, 0x67, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, - 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0b, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, - 0x6f, 0x77, 0x42, 0x09, 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, 0x0a, 0x74, - 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0xd1, 0x02, 0x0a, 0x1f, 0x4f, 0x72, - 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, - 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, + 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, + 0x20, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, 0x0e, + 0x0a, 0x0c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x22, 0xc5, + 0x01, 0x0a, 0x16, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x69, 0x6c, + 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x44, 0x61, 0x79, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x5f, 0x0a, 0x0a, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, + 0x44, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x07, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x6f, 0x0a, 0x1e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0xdc, - 0x01, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6e, - 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, - 0x75, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x54, 0x0a, 0x14, 0x72, 0x75, 0x6e, 0x73, - 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x11, 0x72, 0x75, 0x6e, - 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, - 0x0a, 0x19, 0x72, 0x75, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, - 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x65, - 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x15, 0x72, 0x75, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x22, 0x5e, 0x0a, - 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x82, 0x01, - 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x56, 0x0a, 0x0b, 0x72, 0x75, - 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x35, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, - 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x6e, - 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, - 0x70, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x1e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x09, 0xba, 0x48, - 0x06, 0x1a, 0x04, 0x18, 0x14, 0x28, 0x01, 0x52, 0x0c, 0x6e, 0x75, 0x6d, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, 0x09, - 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x57, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x9c, 0x02, 0x0a, 0x1f, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x1a, 0xa0, 0x01, 0x0a, 0x0d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x49, 0x74, 0x65, 0x6d, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x54, - 0x0a, 0x14, 0x72, 0x75, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x20, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, + 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0xd1, 0x02, 0x0a, 0x1f, 0x4f, 0x72, 0x67, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, + 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0xdc, 0x01, 0x0a, + 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6e, 0x73, 0x5f, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x75, 0x6e, + 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x54, 0x0a, 0x14, 0x72, 0x75, 0x6e, 0x73, 0x5f, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x11, 0x72, 0x75, 0x6e, 0x73, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x19, + 0x72, 0x75, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, 0x72, 0x75, + 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x15, 0x72, 0x75, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, + 0x79, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x22, 0x5e, 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x11, 0x72, 0x75, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2a, 0xcb, 0x01, 0x0a, 0x11, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, - 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, - 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x20, 0x0a, 0x1c, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, - 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x44, 0x41, 0x59, 0x10, - 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, - 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x37, 0x5f, - 0x44, 0x41, 0x59, 0x53, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, - 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, - 0x53, 0x54, 0x5f, 0x33, 0x30, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, - 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, - 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x39, 0x30, 0x5f, 0x44, 0x41, 0x59, 0x53, - 0x10, 0x04, 0x32, 0xe1, 0x02, 0x0a, 0x11, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6b, 0x0a, 0x06, 0x54, 0x6f, 0x74, 0x61, - 0x6c, 0x73, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x17, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x12, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x56, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x6e, + 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x9f, 0x01, 0x0a, 0x1e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x09, 0xba, 0x48, 0x06, 0x1a, + 0x04, 0x18, 0x14, 0x28, 0x01, 0x52, 0x0c, 0x6e, 0x75, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x12, 0x4d, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x42, 0x08, 0xba, 0x48, + 0x05, 0x82, 0x01, 0x02, 0x20, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x22, 0x9c, 0x02, 0x0a, 0x1f, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, - 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x0e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, - 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0xa0, + 0x01, 0x0a, 0x0d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x39, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x74, 0x65, + 0x6d, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x54, 0x0a, 0x14, 0x72, + 0x75, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x11, + 0x72, 0x75, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2a, 0xcb, 0x01, 0x0a, 0x11, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x54, 0x69, 0x6d, + 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, + 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, + 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x44, 0x41, 0x59, 0x10, 0x01, 0x12, 0x23, + 0x0a, 0x1f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, + 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x37, 0x5f, 0x44, 0x41, 0x59, + 0x53, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, + 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, + 0x33, 0x30, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x4d, 0x45, 0x54, + 0x52, 0x49, 0x43, 0x53, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x57, 0x49, 0x4e, 0x44, 0x4f, 0x57, + 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x39, 0x30, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x04, 0x32, + 0xe1, 0x02, 0x0a, 0x11, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6b, 0x0a, 0x06, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x12, + 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x17, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, - 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, - 0x31, 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, 0x75, + 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x6f, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x52, + 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x61, 0x0a, 0x0e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x69, + 0x6c, 0x79, 0x52, 0x75, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/controlplane/v1/organization.pb.go b/app/controlplane/api/controlplane/v1/organization.pb.go index 1af61fee6..5b2135278 100644 --- a/app/controlplane/api/controlplane/v1/organization.pb.go +++ b/app/controlplane/api/controlplane/v1/organization.pb.go @@ -693,7 +693,7 @@ var file_controlplane_v1_organization_proto_rawDesc = []byte{ 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x02, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd1, 0x02, 0x0a, 0x29, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0d, 0x6d, @@ -703,166 +703,166 @@ var file_controlplane_v1_organization_proto_rawDesc = []byte{ 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x88, 0x01, 0x01, 0x12, 0x45, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x88, 0x01, 0x01, 0x12, 0x44, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x6f, - 0x6c, 0x65, 0x42, 0x0b, 0xba, 0x48, 0x08, 0x82, 0x01, 0x05, 0x10, 0x01, 0x22, 0x01, 0x00, 0x48, - 0x03, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, - 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x68, 0x69, 0x70, 0x5f, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, - 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x72, 0x6f, - 0x6c, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, - 0x70, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x49, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, - 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x68, 0x69, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, - 0x48, 0x05, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x68, 0x69, 0x70, 0x49, 0x64, 0x22, 0x2d, 0x0a, 0x2b, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, - 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, - 0x03, 0xb0, 0x01, 0x01, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, - 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x6f, 0x6c, - 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x04, 0x72, 0x6f, 0x6c, - 0x65, 0x22, 0x69, 0x0a, 0x2b, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x6c, 0x65, 0x42, 0x0a, 0xba, 0x48, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x48, 0x03, + 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, + 0x69, 0x70, 0x5f, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x72, 0x6f, 0x6c, + 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, - 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3f, 0x0a, 0x20, + 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x68, 0x69, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, + 0x05, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, + 0x69, 0x70, 0x49, 0x64, 0x22, 0x2d, 0x0a, 0x2b, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x2a, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, 0x03, + 0xb0, 0x01, 0x01, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, + 0x64, 0x12, 0x3d, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x6f, 0x6c, 0x65, + 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x22, 0x69, 0x0a, 0x2b, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3f, 0x0a, 0x20, 0x4f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, + 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x55, 0x0a, 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x55, 0x0a, - 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa8, 0x04, 0x0a, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, + 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x22, 0xa8, 0x04, 0x0a, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x19, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, + 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x16, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x41, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x50, + 0x0a, 0x22, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, + 0x69, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x1f, 0x70, 0x72, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x5a, 0x0a, 0x28, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x02, 0x52, 0x23, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, + 0x4f, 0x72, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x42, 0x1c, 0x0a, 0x1a, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x70, + 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x2b, 0x0a, 0x29, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x6f, 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x22, 0x55, + 0x0a, 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3f, 0x0a, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x19, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, - 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x16, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x4f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1e, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x41, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, - 0x50, 0x0a, 0x22, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x69, - 0x63, 0x69, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x1f, 0x70, - 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, - 0x01, 0x12, 0x5a, 0x0a, 0x28, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x6f, 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x23, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x6f, 0x4f, 0x72, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x42, 0x1c, 0x0a, - 0x1a, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x25, 0x0a, 0x23, 0x5f, - 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, - 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x2b, 0x0a, 0x29, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x22, - 0x55, 0x0a, 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3f, 0x0a, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x4f, 0x72, 0x67, 0x61, 0x6e, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x95, 0x06, 0x0a, - 0x13, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x31, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x95, 0x06, 0x0a, 0x13, + 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x12, 0x3a, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, - 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x12, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, - 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, - 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/controlplane/v1/pagination.proto b/app/controlplane/api/controlplane/v1/pagination.proto index 1627227e3..f49beff0d 100644 --- a/app/controlplane/api/controlplane/v1/pagination.proto +++ b/app/controlplane/api/controlplane/v1/pagination.proto @@ -33,7 +33,7 @@ message CursorPaginationRequest { gte: 1 lte: 100 }, - (buf.validate.field).ignore = IGNORE_IF_UNPOPULATED + (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE ]; } diff --git a/app/controlplane/api/controlplane/v1/project.pb.go b/app/controlplane/api/controlplane/v1/project.pb.go index 09ad7553e..1efe23e21 100644 --- a/app/controlplane/api/controlplane/v1/project.pb.go +++ b/app/controlplane/api/controlplane/v1/project.pb.go @@ -937,7 +937,7 @@ var file_controlplane_v1_project_proto_rawDesc = []byte{ 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x22, 0xa1, 0x02, 0x0a, 0x1e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, + 0x69, 0x64, 0x22, 0xa0, 0x02, 0x0a, 0x1e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, @@ -950,150 +950,150 @@ var file_controlplane_v1_project_proto_rawDesc = []byte{ 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0f, 0x6d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x46, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, - 0x42, 0x0e, 0xba, 0x48, 0x0b, 0xc8, 0x01, 0x01, 0x82, 0x01, 0x05, 0x10, 0x01, 0x22, 0x01, 0x00, - 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x21, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, - 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x10, 0x6d, 0x65, 0x6d, 0x62, - 0x65, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x42, 0x0d, 0xba, 0x48, 0x0a, 0xc8, 0x01, 0x01, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x52, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x21, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, + 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, + 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x10, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, + 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x24, 0x0a, 0x22, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xad, 0x01, + 0x0a, 0x1a, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x68, 0x69, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x0a, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x60, 0x01, 0x48, 0x00, 0x52, 0x09, 0x75, 0x73, 0x65, + 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x4d, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x16, 0x0a, 0x14, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x68, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0xa7, 0x02, + 0x0a, 0x25, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x10, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x5e, 0x0a, 0x10, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, + 0x0f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, - 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x24, 0x0a, 0x22, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x07, + 0x6e, 0x65, 0x77, 0x52, 0x6f, 0x6c, 0x65, 0x22, 0x28, 0x0a, 0x26, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x2b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, + 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc6, 0x01, 0x0a, 0x2c, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0b, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x76, 0x69, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xec, 0x01, + 0x0a, 0x18, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xba, 0x48, 0x04, 0x72, 0x02, 0x60, 0x01, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x09, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x42, 0x79, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x76, 0x69, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x42, 0x0d, 0x0a, + 0x0b, 0x5f, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x32, 0x8f, 0x05, 0x0a, + 0x0e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x74, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x31, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x65, 0x6d, 0x62, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xad, - 0x01, 0x0a, 0x1a, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x68, 0x69, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, - 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x60, 0x01, 0x48, 0x00, 0x52, 0x09, 0x75, 0x73, - 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x4d, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x16, 0x0a, 0x14, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x68, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0xa7, - 0x02, 0x0a, 0x25, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, - 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x5e, 0x0a, 0x10, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, + 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, + 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, + 0x6f, 0x6c, 0x65, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, - 0x52, 0x0f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x45, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, - 0x07, 0x6e, 0x65, 0x77, 0x52, 0x6f, 0x6c, 0x65, 0x22, 0x28, 0x0a, 0x26, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x2b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x57, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc6, 0x01, 0x0a, 0x2c, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0b, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x76, 0x69, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xec, - 0x01, 0x0a, 0x18, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0a, 0x75, - 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x60, 0x01, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, - 0x61, 0x69, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x42, 0x79, 0x88, 0x01, 0x01, 0x12, 0x39, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x76, - 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x42, 0x0d, - 0x0a, 0x0b, 0x5f, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x32, 0x8f, 0x05, - 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x74, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x83, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, - 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4c, 0x69, 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4c, + 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/controlplane/v1/response_messages.pb.go b/app/controlplane/api/controlplane/v1/response_messages.pb.go index 5cf5cb783..67df08ab7 100644 --- a/app/controlplane/api/controlplane/v1/response_messages.pb.go +++ b/app/controlplane/api/controlplane/v1/response_messages.pb.go @@ -2947,7 +2947,7 @@ var file_controlplane_v1_response_messages_proto_rawDesc = []byte{ 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbf, 0x04, 0x0a, 0x1b, 0x57, 0x6f, 0x72, + 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbe, 0x04, 0x0a, 0x1b, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, @@ -2969,246 +2969,246 @@ var file_controlplane_v1_response_messages_proto_rawDesc = []byte{ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd1, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd0, 0x01, 0x0a, 0x07, 0x52, 0x61, 0x77, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x5e, 0x0a, 0x06, 0x66, 0x6f, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x5d, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x2e, 0x52, 0x61, 0x77, 0x42, 0x6f, 0x64, 0x79, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x09, 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, 0x22, - 0x01, 0x00, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x52, 0x0a, 0x06, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, - 0x0b, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x59, 0x41, 0x4d, 0x4c, 0x10, 0x02, 0x12, 0x0e, - 0x0a, 0x0a, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x43, 0x55, 0x45, 0x10, 0x03, 0x42, 0x0a, - 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x22, 0xde, 0x01, 0x0a, 0x04, 0x55, - 0x73, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbf, 0x02, 0x0a, 0x11, - 0x4f, 0x72, 0x67, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, 0x74, 0x65, - 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x20, + 0x00, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x52, 0x0a, 0x06, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x46, + 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x59, 0x41, 0x4d, 0x4c, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x43, 0x55, 0x45, 0x10, 0x03, 0x42, 0x0a, 0x0a, + 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x22, 0xde, 0x01, 0x0a, 0x04, 0x55, 0x73, + 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbf, 0x02, 0x0a, 0x11, 0x4f, + 0x72, 0x67, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x2a, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x12, 0x29, 0x0a, 0x04, + 0x75, 0x73, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, + 0x69, 0x70, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22, 0xbe, 0x05, 0x0a, + 0x07, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x83, 0x01, 0x0a, 0x21, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x12, 0x29, 0x0a, - 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, - 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, + 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x22, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, + 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x55, 0x0a, 0x28, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, + 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x23, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x4f, 0x72, 0x67, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x1f, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x32, 0x0a, 0x2e, 0x50, 0x4f, + 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, 0x47, 0x59, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2c, + 0x0a, 0x28, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x41, + 0x54, 0x45, 0x47, 0x59, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x2f, 0x0a, 0x2b, + 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, + 0x47, 0x59, 0x5f, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x02, 0x22, 0xf5, 0x05, + 0x0a, 0x0e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x68, 0x69, 0x70, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x22, 0xbe, 0x05, - 0x0a, 0x07, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, - 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x12, 0x83, 0x01, 0x0a, 0x21, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x49, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6e, - 0x67, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1e, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x22, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x55, 0x0a, 0x28, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x6f, 0x5f, 0x6f, 0x72, 0x67, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x23, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x4f, 0x72, - 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x1f, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x32, 0x0a, 0x2e, 0x50, - 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, 0x47, - 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x2c, 0x0a, 0x28, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, - 0x41, 0x54, 0x45, 0x47, 0x59, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x2f, 0x0a, - 0x2b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, - 0x45, 0x47, 0x59, 0x5f, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x02, 0x22, 0xf5, - 0x05, 0x0a, 0x0e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, 0x65, - 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, - 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5d, 0x0a, - 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, - 0x74, 0x65, 0x6d, 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x06, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x12, - 0x2e, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, - 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, + 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, + 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x25, 0x0a, 0x06, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x1d, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x4b, - 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, - 0x02, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xdd, 0x03, 0x0a, 0x0c, 0x41, 0x50, 0x49, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, - 0x0f, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, - 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, - 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, - 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, - 0x55, 0x73, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xa6, 0x01, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, - 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, - 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, - 0x45, 0x44, 0x45, 0x44, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, - 0x12, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x58, 0x50, 0x49, - 0x52, 0x45, 0x44, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x05, 0x2a, - 0xa1, 0x01, 0x0a, 0x16, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x24, 0x50, 0x4f, - 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, - 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x2c, 0x0a, 0x28, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, - 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, - 0x10, 0x01, 0x12, 0x2f, 0x0a, 0x2b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, - 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x57, - 0x49, 0x54, 0x48, 0x4f, 0x55, 0x54, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x53, 0x10, 0x02, 0x2a, 0xd4, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, - 0x69, 0x70, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, - 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x4d, 0x42, 0x45, - 0x52, 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x56, - 0x49, 0x45, 0x57, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4d, 0x45, 0x4d, 0x42, 0x45, - 0x52, 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x41, - 0x44, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, - 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x4f, 0x57, - 0x4e, 0x45, 0x52, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, - 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x4d, 0x45, 0x4d, - 0x42, 0x45, 0x52, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, - 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x43, 0x4f, 0x4e, - 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x4f, 0x52, 0x10, 0x05, 0x2a, 0x60, 0x0a, 0x0e, 0x41, 0x6c, - 0x6c, 0x6f, 0x77, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x1c, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x26, - 0x0a, 0x1c, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, - 0x1a, 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, 0xf4, 0x03, 0x2a, 0x6d, 0x0a, 0x12, - 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x41, 0x75, 0x74, 0x68, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x20, 0x46, 0x45, 0x44, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, - 0x41, 0x55, 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x21, 0x46, 0x45, 0x44, 0x45, - 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, 0x1a, - 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, 0xf4, 0x03, 0x2a, 0x84, 0x01, 0x0a, 0x19, - 0x55, 0x73, 0x65, 0x72, 0x57, 0x69, 0x74, 0x68, 0x4e, 0x6f, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x68, 0x69, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x29, 0x55, 0x53, 0x45, - 0x52, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, - 0x53, 0x48, 0x49, 0x50, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x32, 0x0a, 0x28, 0x55, 0x53, 0x45, 0x52, + 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5d, 0x0a, 0x11, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, + 0x65, 0x6d, 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2e, + 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x39, + 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x25, 0x0a, 0x06, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x22, 0x6e, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x1d, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x4b, 0x10, + 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, + 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xdd, 0x03, 0x0a, 0x0c, 0x41, 0x50, 0x49, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x55, + 0x73, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xa6, 0x01, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x1a, 0x0a, 0x16, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, + 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x52, + 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, + 0x44, 0x45, 0x44, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x58, 0x50, 0x49, 0x52, + 0x45, 0x44, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x55, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x05, 0x2a, 0xa1, + 0x01, 0x0a, 0x16, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x24, 0x50, 0x4f, 0x4c, + 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x46, + 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x2c, 0x0a, 0x28, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, + 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, + 0x57, 0x49, 0x54, 0x48, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, + 0x01, 0x12, 0x2f, 0x0a, 0x2b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x56, 0x49, 0x4f, 0x4c, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x4f, 0x55, 0x54, 0x5f, 0x56, 0x49, 0x4f, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x53, + 0x10, 0x02, 0x2a, 0xd4, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, + 0x70, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, + 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, + 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x56, 0x49, + 0x45, 0x57, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, + 0x53, 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x41, 0x44, + 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, + 0x48, 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x4f, 0x57, 0x4e, + 0x45, 0x52, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, 0x48, + 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x4d, 0x45, 0x4d, 0x42, + 0x45, 0x52, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, 0x48, + 0x49, 0x50, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x43, 0x4f, 0x4e, 0x54, + 0x52, 0x49, 0x42, 0x55, 0x54, 0x4f, 0x52, 0x10, 0x05, 0x2a, 0x60, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, + 0x6f, 0x77, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x1c, 0x41, + 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x26, 0x0a, + 0x1c, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x1a, + 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, 0xf4, 0x03, 0x2a, 0x6d, 0x0a, 0x12, 0x46, + 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x41, 0x75, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x24, 0x0a, 0x20, 0x46, 0x45, 0x44, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, + 0x55, 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x21, 0x46, 0x45, 0x44, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, 0x1a, 0x04, + 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, 0xf4, 0x03, 0x2a, 0x84, 0x01, 0x0a, 0x19, 0x55, + 0x73, 0x65, 0x72, 0x57, 0x69, 0x74, 0x68, 0x4e, 0x6f, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x68, 0x69, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x29, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, - 0x48, 0x49, 0x50, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, - 0x5f, 0x4f, 0x52, 0x47, 0x10, 0x01, 0x1a, 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, - 0xf4, 0x03, 0x2a, 0x80, 0x01, 0x0a, 0x17, 0x55, 0x73, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x72, 0x67, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2c, - 0x0a, 0x28, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, - 0x52, 0x5f, 0x4f, 0x46, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x31, 0x0a, 0x27, - 0x55, 0x53, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, - 0x4f, 0x46, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x49, 0x4e, 0x5f, 0x4f, 0x52, 0x47, 0x10, 0x01, 0x1a, 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, - 0x04, 0xa0, 0x45, 0xf4, 0x03, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, - 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, - 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x48, 0x49, 0x50, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x32, 0x0a, 0x28, 0x55, 0x53, 0x45, 0x52, 0x5f, + 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x53, 0x48, + 0x49, 0x50, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x5f, + 0x4f, 0x52, 0x47, 0x10, 0x01, 0x1a, 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, 0xa0, 0x45, 0xf4, + 0x03, 0x2a, 0x80, 0x01, 0x0a, 0x17, 0x55, 0x73, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x4d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x72, 0x67, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2c, 0x0a, + 0x28, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, + 0x5f, 0x4f, 0x46, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x31, 0x0a, 0x27, 0x55, + 0x53, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x4f, + 0x46, 0x5f, 0x4f, 0x52, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x49, 0x4e, 0x5f, 0x4f, 0x52, 0x47, 0x10, 0x01, 0x1a, 0x04, 0xa8, 0x45, 0x93, 0x03, 0x1a, 0x04, + 0xa0, 0x45, 0xf4, 0x03, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, + 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/controlplane/v1/shared_message.pb.go b/app/controlplane/api/controlplane/v1/shared_message.pb.go index 95b4e2afc..0fdf794cd 100644 --- a/app/controlplane/api/controlplane/v1/shared_message.pb.go +++ b/app/controlplane/api/controlplane/v1/shared_message.pb.go @@ -157,9 +157,9 @@ var file_controlplane_v1_shared_message_proto_rawDesc = []byte{ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x02, 0x0a, 0x11, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xba, 0x48, 0x08, 0xd0, 0x01, 0x01, 0x72, 0x03, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xba, 0x48, 0x08, 0xd8, 0x01, 0x01, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xba, 0x48, 0x07, 0xd0, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xba, 0x48, 0x07, 0xd8, 0x01, 0x01, 0x72, 0x02, 0x10, 0x01, 0x48, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x3a, 0x9b, 0x01, 0xba, 0x48, 0x97, 0x01, 0x1a, 0x94, 0x01, 0x0a, 0x13, 0x69, 0x64, 0x5f, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, diff --git a/app/controlplane/api/controlplane/v1/shared_message.proto b/app/controlplane/api/controlplane/v1/shared_message.proto index 8fd881bab..ecfec4602 100644 --- a/app/controlplane/api/controlplane/v1/shared_message.proto +++ b/app/controlplane/api/controlplane/v1/shared_message.proto @@ -26,12 +26,12 @@ message IdentityReference { // ID is optional, but if provided, it must be a valid UUID. optional string id = 1 [ (buf.validate.field).string.uuid = true, - (buf.validate.field).ignore_empty = true + (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE ]; // Name is optional, but if provided, it must be a non-empty string. optional string name = 2 [ (buf.validate.field).string.min_len = 1, - (buf.validate.field).ignore_empty = true + (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE ]; // Custom validation to ensure that either id or name is provided diff --git a/app/controlplane/api/controlplane/v1/workflow.pb.go b/app/controlplane/api/controlplane/v1/workflow.pb.go index 9a6c97725..82b2c040f 100644 --- a/app/controlplane/api/controlplane/v1/workflow.pb.go +++ b/app/controlplane/api/controlplane/v1/workflow.pb.go @@ -791,7 +791,7 @@ var file_controlplane_v1_workflow_proto_rawDesc = []byte{ 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, - 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd0, 0x01, 0x01, 0x52, 0x0c, 0x63, 0x6f, + 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd8, 0x01, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x42, 0x79, 0x74, 0x65, @@ -862,7 +862,7 @@ var file_controlplane_v1_workflow_proto_rawDesc = []byte{ 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xe3, 0x05, 0x0a, 0x1a, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, + 0x65, 0x22, 0xd3, 0x05, 0x0a, 0x1a, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, @@ -877,26 +877,25 @@ var file_controlplane_v1_workflow_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x88, 0x01, 0x01, - 0x12, 0x7c, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, + 0x12, 0x6e, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, - 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0c, 0xba, 0x48, 0x09, 0xd0, 0x01, - 0x01, 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x75, 0x6e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x61, - 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0c, 0xba, 0x48, - 0x09, 0xd0, 0x01, 0x01, 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x78, 0x0a, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x69, 0x6e, 0x64, - 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x42, 0x0c, 0xba, 0x48, 0x09, 0xd0, 0x01, 0x01, 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, + 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x60, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0b, + 0xba, 0x48, 0x08, 0xd8, 0x01, 0x01, 0x82, 0x01, 0x02, 0x20, 0x00, 0x52, 0x15, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x77, 0x0a, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, + 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x69, 0x6e, + 0x64, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x42, 0x0b, 0xba, 0x48, 0x08, 0xd8, 0x01, 0x01, 0x82, 0x01, 0x02, 0x20, 0x00, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, diff --git a/app/controlplane/api/controlplane/v1/workflow.proto b/app/controlplane/api/controlplane/v1/workflow.proto index 5e18c479d..ee15c87a2 100644 --- a/app/controlplane/api/controlplane/v1/workflow.proto +++ b/app/controlplane/api/controlplane/v1/workflow.proto @@ -45,7 +45,7 @@ message WorkflowServiceCreateRequest { string project_name = 2 [(buf.validate.field).string = {min_len: 1}]; // The name of the workflow contract string contract_name = 3 [(buf.validate.field) = { - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE cel: { message: "must contain only lowercase letters, numbers, and hyphens." expression: "this.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')" @@ -120,25 +120,20 @@ message WorkflowServiceListRequest { // If the workflow is public optional bool workflow_public = 5; // The type of runner that ran the workflow - workflowcontract.v1.CraftingSchema.Runner.RunnerType workflow_run_runner_type = 6 [ - (buf.validate.field).enum = { - not_in: [0] - }, - (buf.validate.field).ignore_empty = true - ]; + workflowcontract.v1.CraftingSchema.Runner.RunnerType workflow_run_runner_type = 6; // The status of the last workflow run RunStatus workflow_run_last_status = 7 [ (buf.validate.field).enum = { not_in: [0] }, - (buf.validate.field).ignore_empty = true + (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE ]; // The time window for the last known workflow activity WorkflowActivityWindow workflow_last_activity_window = 8 [ (buf.validate.field).enum = { not_in: [0] }, - (buf.validate.field).ignore_empty = true + (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE ]; // Pagination options OffsetPaginationRequest pagination = 9; diff --git a/app/controlplane/api/controlplane/v1/workflow_contract.pb.go b/app/controlplane/api/controlplane/v1/workflow_contract.pb.go index 663bca79b..5eaded6cc 100644 --- a/app/controlplane/api/controlplane/v1/workflow_contract.pb.go +++ b/app/controlplane/api/controlplane/v1/workflow_contract.pb.go @@ -683,7 +683,7 @@ var file_controlplane_v1_workflow_contract_proto_rawDesc = []byte{ 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, - 0xd0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, 0x77, + 0xd8, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x61, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, @@ -712,7 +712,7 @@ var file_controlplane_v1_workflow_contract_proto_rawDesc = []byte{ 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, - 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd0, 0x01, 0x01, 0x52, 0x04, 0x6e, + 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0xd8, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x61, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, diff --git a/app/controlplane/api/controlplane/v1/workflow_contract.proto b/app/controlplane/api/controlplane/v1/workflow_contract.proto index 8e6510aae..42284712e 100644 --- a/app/controlplane/api/controlplane/v1/workflow_contract.proto +++ b/app/controlplane/api/controlplane/v1/workflow_contract.proto @@ -39,7 +39,7 @@ message WorkflowContractServiceListResponse { message WorkflowContractServiceCreateRequest { string name = 1 [(buf.validate.field) = { - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE cel: { id: "name.dns-1123" message: "must contain only lowercase letters, numbers, and hyphens." @@ -62,7 +62,7 @@ message WorkflowContractServiceCreateResponse { message WorkflowContractServiceUpdateRequest { string name = 1 [(buf.validate.field) = { - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE cel: { id: "name.dns-1123" message: "must contain only lowercase letters, numbers, and hyphens." diff --git a/app/controlplane/api/controlplane/v1/workflow_run.pb.go b/app/controlplane/api/controlplane/v1/workflow_run.pb.go index 4cff15282..6b0898f55 100644 --- a/app/controlplane/api/controlplane/v1/workflow_run.pb.go +++ b/app/controlplane/api/controlplane/v1/workflow_run.pb.go @@ -1986,218 +1986,217 @@ var file_controlplane_v1_workflow_run_proto_rawDesc = []byte{ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x20, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0xb3, 0x02, 0x0a, 0x1f, 0x41, 0x74, 0x74, 0x65, + 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0xb2, 0x02, 0x0a, 0x1f, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x07, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x60, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x09, 0xba, 0x48, 0x06, - 0x82, 0x01, 0x03, 0x22, 0x01, 0x00, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0b, 0x54, 0x72, 0x69, 0x67, 0x67, - 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, - 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x1d, - 0x0a, 0x19, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, - 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x22, 0x0a, - 0x20, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x8c, 0x05, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0xac, 0x01, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x86, 0x01, 0xba, 0x48, - 0x82, 0x01, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, - 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, - 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, - 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, - 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, - 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, - 0x29, 0xd0, 0x01, 0x01, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x0f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0b, 0xba, 0x48, 0x08, 0xd0, 0x01, 0x01, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x52, - 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x54, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x52, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x73, - 0x6f, 0x72, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, - 0x8e, 0x01, 0xba, 0x48, 0x8a, 0x01, 0x1a, 0x87, 0x01, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x74, - 0x20, 0x69, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x20, 0x69, 0x73, 0x20, 0x73, 0x65, 0x74, 0x1a, 0x36, 0x21, 0x28, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x21, - 0x3d, 0x20, 0x27, 0x27, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x27, 0x29, - 0x22, 0xa5, 0x01, 0x0a, 0x1e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, - 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x49, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, - 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, - 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, 0x03, 0xb0, 0x01, 0x01, - 0x48, 0x00, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, - 0x00, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x42, 0x0c, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, - 0xc5, 0x03, 0x0a, 0x1e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, + 0x82, 0x01, 0x02, 0x20, 0x00, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, + 0x19, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, + 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x22, 0x0a, 0x20, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x8c, 0x05, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0xac, 0x01, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x86, 0x01, 0xba, 0x48, 0x82, + 0x01, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, + 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, + 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, + 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, + 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, + 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, + 0xd8, 0x01, 0x01, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x0b, 0xba, 0x48, 0x08, 0xd8, 0x01, 0x01, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x54, + 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x52, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, + 0x72, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x8e, + 0x01, 0xba, 0x48, 0x8a, 0x01, 0x1a, 0x87, 0x01, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x74, 0x20, + 0x69, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x20, 0x69, 0x73, 0x20, 0x73, 0x65, 0x74, 0x1a, 0x36, 0x21, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x21, 0x3d, + 0x20, 0x27, 0x27, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x27, 0x29, 0x22, + 0xa5, 0x01, 0x0a, 0x1e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, + 0x49, 0x74, 0x65, 0x6d, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, + 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, 0x03, 0xb0, 0x01, 0x01, 0x48, + 0x00, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, + 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x42, 0x0c, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0xc5, + 0x03, 0x0a, 0x1e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x1a, 0xf9, 0x01, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x43, 0x0a, 0x0c, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x1a, 0xf9, 0x01, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x43, 0x0a, - 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, - 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x57, - 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x27, 0x41, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x28, 0x41, - 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, + 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x57, 0x0a, + 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, + 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x27, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x55, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x1a, 0x59, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x39, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, - 0x74, 0x65, 0x6d, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x32, 0xd3, 0x07, 0x0a, - 0x12, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x73, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x64, 0x4f, 0x72, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, - 0x6e, 0x64, 0x4f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, - 0x4f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x28, 0x41, 0x74, + 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x1a, 0x59, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x39, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x41, 0x53, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x32, 0xd3, 0x07, 0x0a, 0x12, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x73, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x64, 0x4f, 0x72, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, + 0x64, 0x4f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x4f, + 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x2e, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x2e, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x6a, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, - 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x0e, - 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x12, 0x38, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x30, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x76, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x0e, 0x47, - 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x2e, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x0e, 0x47, + 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x12, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x63, 0x65, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x32, 0xe6, 0x01, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x4c, 0x69, 0x73, - 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, - 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, - 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, - 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, - 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x72, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x30, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x76, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x0e, 0x47, 0x65, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x32, 0xe6, 0x01, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x67, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, + 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x69, + 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, + 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, + 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/controlplane/v1/workflow_run.proto b/app/controlplane/api/controlplane/v1/workflow_run.proto index 732d1f414..4651b13d6 100644 --- a/app/controlplane/api/controlplane/v1/workflow_run.proto +++ b/app/controlplane/api/controlplane/v1/workflow_run.proto @@ -191,7 +191,7 @@ message WorkflowRunServiceListRequest { // Filters // by workflow string workflow_name = 1 [(buf.validate.field) = { - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE cel: { message: "must contain only lowercase letters, numbers, and hyphens." expression: "this.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')" @@ -206,7 +206,7 @@ message WorkflowRunServiceListRequest { // by project version string project_version = 5 [(buf.validate.field) = { string: {uuid: true} - ignore_empty: true + ignore: IGNORE_IF_ZERO_VALUE }]; // by policy violations status PolicyViolationsFilter policy_violations = 6; diff --git a/app/controlplane/api/gen/frontend/buf/validate/validate.ts b/app/controlplane/api/gen/frontend/buf/validate/validate.ts index 8441923fd..a6ea4e069 100644 --- a/app/controlplane/api/gen/frontend/buf/validate/validate.ts +++ b/app/controlplane/api/gen/frontend/buf/validate/validate.ts @@ -1,176 +1,110 @@ /* eslint-disable */ import Long from "long"; import _m0 from "protobufjs/minimal"; +import { + FieldDescriptorProto_Type, + fieldDescriptorProto_TypeFromJSON, + fieldDescriptorProto_TypeToJSON, +} from "../../google/protobuf/descriptor"; import { Duration } from "../../google/protobuf/duration"; +import { FieldMask } from "../../google/protobuf/field_mask"; import { Timestamp } from "../../google/protobuf/timestamp"; -import { Constraint } from "./expression"; export const protobufPackage = "buf.validate"; /** - * Specifies how FieldConstraints.ignore behaves. See the documentation for - * FieldConstraints.required for definitions of "populated" and "nullable". + * Specifies how `FieldRules.ignore` behaves, depending on the field's value, and + * whether the field tracks presence. */ export enum Ignore { /** - * IGNORE_UNSPECIFIED - Validation is only skipped if it's an unpopulated nullable fields. + * IGNORE_UNSPECIFIED - Ignore rules if the field tracks presence and is unset. This is the default + * behavior. + * + * In proto3, only message fields, members of a Protobuf `oneof`, and fields + * with the `optional` label track presence. Consequently, the following fields + * are always validated, whether a value is set or not: * * ```proto * syntax="proto3"; * - * message Request { - * // The uri rule applies to any value, including the empty string. - * string foo = 1 [ - * (buf.validate.field).string.uri = true - * ]; - * - * // The uri rule only applies if the field is set, including if it's - * // set to the empty string. - * optional string bar = 2 [ - * (buf.validate.field).string.uri = true + * message RulesApply { + * string email = 1 [ + * (buf.validate.field).string.email = true * ]; - * - * // The min_items rule always applies, even if the list is empty. - * repeated string baz = 3 [ - * (buf.validate.field).repeated.min_items = 3 + * int32 age = 2 [ + * (buf.validate.field).int32.gt = 0 * ]; - * - * // The custom CEL rule applies only if the field is set, including if - * // it's the "zero" value of that message. - * SomeMessage quux = 4 [ - * (buf.validate.field).cel = {/* ... * /} + * repeated string labels = 3 [ + * (buf.validate.field).repeated.min_items = 1 * ]; * } * ``` - */ - IGNORE_UNSPECIFIED = 0, - /** - * IGNORE_IF_UNPOPULATED - Validation is skipped if the field is unpopulated. This rule is redundant - * if the field is already nullable. This value is equivalent behavior to the - * deprecated ignore_empty rule. - * - * ```proto - * syntax="proto3 - * - * message Request { - * // The uri rule applies only if the value is not the empty string. - * string foo = 1 [ - * (buf.validate.field).string.uri = true, - * (buf.validate.field).ignore = IGNORE_IF_UNPOPULATED - * ]; - * - * // IGNORE_IF_UNPOPULATED is equivalent to IGNORE_UNSPECIFIED in this - * // case: the uri rule only applies if the field is set, including if - * // it's set to the empty string. - * optional string bar = 2 [ - * (buf.validate.field).string.uri = true, - * (buf.validate.field).ignore = IGNORE_IF_UNPOPULATED - * ]; - * - * // The min_items rule only applies if the list has at least one item. - * repeated string baz = 3 [ - * (buf.validate.field).repeated.min_items = 3, - * (buf.validate.field).ignore = IGNORE_IF_UNPOPULATED - * ]; * - * // IGNORE_IF_UNPOPULATED is equivalent to IGNORE_UNSPECIFIED in this - * // case: the custom CEL rule applies only if the field is set, including - * // if it's the "zero" value of that message. - * SomeMessage quux = 4 [ - * (buf.validate.field).cel = {/* ... * /}, - * (buf.validate.field).ignore = IGNORE_IF_UNPOPULATED - * ]; - * } - * ``` - */ - IGNORE_IF_UNPOPULATED = 1, - /** - * IGNORE_IF_DEFAULT_VALUE - Validation is skipped if the field is unpopulated or if it is a nullable - * field populated with its default value. This is typically the zero or - * empty value, but proto2 scalars support custom defaults. For messages, the - * default is a non-null message with all its fields unpopulated. + * In contrast, the following fields track presence, and are only validated if + * a value is set: * * ```proto - * syntax="proto3 - * - * message Request { - * // IGNORE_IF_DEFAULT_VALUE is equivalent to IGNORE_IF_UNPOPULATED in - * // this case; the uri rule applies only if the value is not the empty - * // string. - * string foo = 1 [ - * (buf.validate.field).string.uri = true, - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE - * ]; - * - * // The uri rule only applies if the field is set to a value other than - * // the empty string. - * optional string bar = 2 [ - * (buf.validate.field).string.uri = true, - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE - * ]; + * syntax="proto3"; * - * // IGNORE_IF_DEFAULT_VALUE is equivalent to IGNORE_IF_UNPOPULATED in - * // this case; the min_items rule only applies if the list has at least - * // one item. - * repeated string baz = 3 [ - * (buf.validate.field).repeated.min_items = 3, - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE + * message RulesApplyIfSet { + * optional string email = 1 [ + * (buf.validate.field).string.email = true * ]; - * - * // The custom CEL rule only applies if the field is set to a value other - * // than an empty message (i.e., fields are unpopulated). - * SomeMessage quux = 4 [ - * (buf.validate.field).cel = {/* ... * /}, - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE + * oneof ref { + * string reference = 2 [ + * (buf.validate.field).string.uuid = true + * ]; + * string name = 3 [ + * (buf.validate.field).string.min_len = 4 + * ]; + * } + * SomeMessage msg = 4 [ + * (buf.validate.field).cel = {/* ... * /} * ]; * } * ``` * - * This rule is affected by proto2 custom default values: + * To ensure that such a field is set, add the `required` rule. * - * ```proto - * syntax="proto2"; + * To learn which fields track presence, see the + * [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + */ + IGNORE_UNSPECIFIED = 0, + /** + * IGNORE_IF_ZERO_VALUE - Ignore rules if the field is unset, or set to the zero value. * - * message Request { - * // The gt rule only applies if the field is set and it's value is not - * the default (i.e., not -42). The rule even applies if the field is set - * to zero since the default value differs. - * optional int32 value = 1 [ - * default = -42, - * (buf.validate.field).int32.gt = 0, - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE - * ]; - * } + * The zero value depends on the field type: + * - For strings, the zero value is the empty string. + * - For bytes, the zero value is empty bytes. + * - For bool, the zero value is false. + * - For numeric types, the zero value is zero. + * - For enums, the zero value is the first defined enum value. + * - For repeated fields, the zero is an empty list. + * - For map fields, the zero is an empty map. + * - For message fields, absence of the message (typically a null-value) is considered zero value. + * + * For fields that track presence (e.g. adding the `optional` label in proto3), + * this a no-op and behavior is the same as the default `IGNORE_UNSPECIFIED`. */ - IGNORE_IF_DEFAULT_VALUE = 2, + IGNORE_IF_ZERO_VALUE = 1, /** - * IGNORE_ALWAYS - The validation rules of this field will be skipped and not evaluated. This - * is useful for situations that necessitate turning off the rules of a field - * containing a message that may not make sense in the current context, or to - * temporarily disable constraints during development. + * IGNORE_ALWAYS - Always ignore rules, including the `required` rule. + * + * This is useful for ignoring the rules of a referenced message, or to + * temporarily ignore rules during development. * * ```proto * message MyMessage { - * // The field's rules will always be ignored, including any validation's + * // The field's rules will always be ignored, including any validations * // on value's fields. * MyOtherMessage value = 1 [ - * (buf.validate.field).ignore = IGNORE_ALWAYS]; + * (buf.validate.field).ignore = IGNORE_ALWAYS + * ]; * } * ``` */ IGNORE_ALWAYS = 3, - /** - * IGNORE_EMPTY - Deprecated: Use IGNORE_IF_UNPOPULATED instead. TODO: Remove this value pre-v1. - * - * @deprecated - */ - IGNORE_EMPTY = 1, - /** - * IGNORE_DEFAULT - Deprecated: Use IGNORE_IF_DEFAULT_VALUE. TODO: Remove this value pre-v1. - * - * @deprecated - */ - IGNORE_DEFAULT = 2, UNRECOGNIZED = -1, } @@ -180,20 +114,11 @@ export function ignoreFromJSON(object: any): Ignore { case "IGNORE_UNSPECIFIED": return Ignore.IGNORE_UNSPECIFIED; case 1: - case "IGNORE_IF_UNPOPULATED": - return Ignore.IGNORE_IF_UNPOPULATED; - case 2: - case "IGNORE_IF_DEFAULT_VALUE": - return Ignore.IGNORE_IF_DEFAULT_VALUE; + case "IGNORE_IF_ZERO_VALUE": + return Ignore.IGNORE_IF_ZERO_VALUE; case 3: case "IGNORE_ALWAYS": return Ignore.IGNORE_ALWAYS; - case 1: - case "IGNORE_EMPTY": - return Ignore.IGNORE_EMPTY; - case 2: - case "IGNORE_DEFAULT": - return Ignore.IGNORE_DEFAULT; case -1: case "UNRECOGNIZED": default: @@ -205,28 +130,22 @@ export function ignoreToJSON(object: Ignore): string { switch (object) { case Ignore.IGNORE_UNSPECIFIED: return "IGNORE_UNSPECIFIED"; - case Ignore.IGNORE_IF_UNPOPULATED: - return "IGNORE_IF_UNPOPULATED"; - case Ignore.IGNORE_IF_DEFAULT_VALUE: - return "IGNORE_IF_DEFAULT_VALUE"; + case Ignore.IGNORE_IF_ZERO_VALUE: + return "IGNORE_IF_ZERO_VALUE"; case Ignore.IGNORE_ALWAYS: return "IGNORE_ALWAYS"; - case Ignore.IGNORE_EMPTY: - return "IGNORE_EMPTY"; - case Ignore.IGNORE_DEFAULT: - return "IGNORE_DEFAULT"; case Ignore.UNRECOGNIZED: default: return "UNRECOGNIZED"; } } -/** WellKnownRegex contain some well-known patterns. */ +/** KnownRegex contains some well-known patterns. */ export enum KnownRegex { KNOWN_REGEX_UNSPECIFIED = 0, - /** KNOWN_REGEX_HTTP_HEADER_NAME - HTTP header name as defined by [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.2). */ + /** KNOWN_REGEX_HTTP_HEADER_NAME - HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2). */ KNOWN_REGEX_HTTP_HEADER_NAME = 1, - /** KNOWN_REGEX_HTTP_HEADER_VALUE - HTTP header value as defined by [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.2.4). */ + /** KNOWN_REGEX_HTTP_HEADER_VALUE - HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4). */ KNOWN_REGEX_HTTP_HEADER_VALUE = 2, UNRECOGNIZED = -1, } @@ -264,28 +183,74 @@ export function knownRegexToJSON(object: KnownRegex): string { } /** - * MessageConstraints represents validation rules that are applied to the entire message. - * It includes disabling options and a list of Constraint messages representing Common Expression Language (CEL) validation rules. + * `Rule` represents a validation rule written in the Common Expression + * Language (CEL) syntax. Each Rule includes a unique identifier, an + * optional error message, and the CEL expression to evaluate. For more + * information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + * + * ```proto + * message Foo { + * option (buf.validate.message).cel = { + * id: "foo.bar" + * message: "bar must be greater than 0" + * expression: "this.bar > 0" + * }; + * int32 bar = 1; + * } + * ``` + */ +export interface Rule { + /** + * `id` is a string that serves as a machine-readable name for this Rule. + * It should be unique within its scope, which could be either a message or a field. + */ + id: string; + /** + * `message` is an optional field that provides a human-readable error message + * for this Rule when the CEL expression evaluates to false. If a + * non-empty message is provided, any strings resulting from the CEL + * expression evaluation are ignored. + */ + message: string; + /** + * `expression` is the actual CEL expression that will be evaluated for + * validation. This string must resolve to either a boolean or a string + * value. If the expression evaluates to false or a non-empty string, the + * validation is considered failed, and the message is rejected. + */ + expression: string; +} + +/** + * MessageRules represents validation rules that are applied to the entire message. + * It includes disabling options and a list of Rule messages representing Common Expression Language (CEL) validation rules. */ -export interface MessageConstraints { +export interface MessageRules { /** - * `disabled` is a boolean flag that, when set to true, nullifies any validation rules for this message. - * This includes any fields within the message that would otherwise support validation. + * `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + * rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + * + * This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + * simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + * be same as the `expression`. + * + * For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). * * ```proto * message MyMessage { - * // validation will be bypassed for this message - * option (buf.validate.message).disabled = true; + * // The field `foo` must be greater than 42. + * option (buf.validate.message).cel_expression = "this.foo > 42"; + * // The field `foo` must be less than 84. + * option (buf.validate.message).cel_expression = "this.foo < 84"; + * optional int32 foo = 1; * } * ``` */ - disabled?: - | boolean - | undefined; + celExpression: string[]; /** - * `cel` is a repeated field of type Constraint. Each Constraint specifies a validation rule to be applied to this message. - * These constraints are written in Common Expression Language (CEL) syntax. For more information on - * CEL, [see our documentation](https://github.com/bufbuild/protovalidate/blob/main/docs/cel.md). + * `cel` is a repeated field of type Rule. Each Rule specifies a validation rule to be applied to this message. + * These rules are written in Common Expression Language (CEL) syntax. For more information, + * [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). * * ```proto * message MyMessage { @@ -299,18 +264,65 @@ export interface MessageConstraints { * } * ``` */ - cel: Constraint[]; + cel: Rule[]; + /** + * `oneof` is a repeated field of type MessageOneofRule that specifies a list of fields + * of which at most one can be present. If `required` is also specified, then exactly one + * of the specified fields _must_ be present. + * + * This will enforce oneof-like constraints with a few features not provided by + * actual Protobuf oneof declarations: + * 1. Repeated and map fields are allowed in this validation. In a Protobuf oneof, + * only scalar fields are allowed. + * 2. Fields with implicit presence are allowed. In a Protobuf oneof, all member + * fields have explicit presence. This means that, for the purpose of determining + * how many fields are set, explicitly setting such a field to its zero value is + * effectively the same as not setting it at all. + * 3. This will always generate validation errors for a message unmarshalled from + * serialized data that sets more than one field. With a Protobuf oneof, when + * multiple fields are present in the serialized form, earlier values are usually + * silently ignored when unmarshalling, with only the last field being set when + * unmarshalling completes. + * + * Note that adding a field to a `oneof` will also set the IGNORE_IF_ZERO_VALUE on the fields. This means + * only the field that is set will be validated and the unset fields are not validated according to the field rules. + * This behavior can be overridden by setting `ignore` against a field. + * + * ```proto + * message MyMessage { + * // Only one of `field1` or `field2` _can_ be present in this message. + * option (buf.validate.message).oneof = { fields: ["field1", "field2"] }; + * // Exactly one of `field3` or `field4` _must_ be present in this message. + * option (buf.validate.message).oneof = { fields: ["field3", "field4"], required: true }; + * string field1 = 1; + * bytes field2 = 2; + * bool field3 = 3; + * int32 field4 = 4; + * } + * ``` + */ + oneof: MessageOneofRule[]; +} + +export interface MessageOneofRule { + /** + * A list of field names to include in the oneof. All field names must be + * defined in the message. At least one field must be specified, and + * duplicates are not permitted. + */ + fields: string[]; + /** If true, one of the fields specified _must_ be set. */ + required: boolean; } /** - * The `OneofConstraints` message type enables you to manage constraints for + * The `OneofRules` message type enables you to manage rules for * oneof fields in your protobuf messages. */ -export interface OneofConstraints { +export interface OneofRules { /** - * If `required` is true, exactly one field of the oneof must be present. A - * validation error is returned if no fields in the oneof are present. The - * field itself may still be a default value; further constraints + * If `required` is true, exactly one field of the oneof must be set. A + * validation error is returned if no fields in the oneof are set. Further rules * should be placed on the fields themselves to ensure they are valid values, * such as `min_len` or `gt`. * @@ -326,18 +338,36 @@ export interface OneofConstraints { * } * ``` */ - required?: boolean | undefined; + required: boolean; } /** - * FieldConstraints encapsulates the rules for each type of field. Depending on + * FieldRules encapsulates the rules for each type of field. Depending on * the field, the correct set should be used to ensure proper validations. */ -export interface FieldConstraints { +export interface FieldRules { + /** + * `cel_expression` is a repeated field CEL expressions. Each expression specifies a validation + * rule to be applied to this message. These rules are written in Common Expression Language (CEL) syntax. + * + * This is a simplified form of the `cel` Rule field, where only `expression` is set. This allows for + * simpler syntax when defining CEL Rules where `id` and `message` derived from the `expression`. `id` will + * be same as the `expression`. + * + * For more information, [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). + * + * ```proto + * message MyMessage { + * // The field `value` must be greater than 42. + * optional int32 value = 1 [(buf.validate.field).cel_expression = "this > 42"]; + * } + * ``` + */ + celExpression: string[]; /** * `cel` is a repeated field used to represent a textual expression - * in the Common Expression Language (CEL) syntax. For more information on - * CEL, [see our documentation](https://github.com/bufbuild/protovalidate/blob/main/docs/cel.md). + * in the Common Expression Language (CEL) syntax. For more information, + * [see our documentation](https://buf.build/docs/protovalidate/schemas/custom-rules/). * * ```proto * message MyMessage { @@ -350,38 +380,76 @@ export interface FieldConstraints { * } * ``` */ - cel: Constraint[]; + cel: Rule[]; /** - * If `required` is true, the field must be populated. A populated field can be - * described as "serialized in the wire format," which includes: + * If `required` is true, the field must be set. A validation error is returned + * if the field is not set. + * + * ```proto + * syntax="proto3"; + * + * message FieldsWithPresence { + * // Requires any string to be set, including the empty string. + * optional string link = 1 [ + * (buf.validate.field).required = true + * ]; + * // Requires true or false to be set. + * optional bool disabled = 2 [ + * (buf.validate.field).required = true + * ]; + * // Requires a message to be set, including the empty message. + * SomeMessage msg = 4 [ + * (buf.validate.field).required = true + * ]; + * } + * ``` * - * - the following "nullable" fields must be explicitly set to be considered populated: - * - singular message fields (whose fields may be unpopulated/default values) - * - member fields of a oneof (may be their default value) - * - proto3 optional fields (may be their default value) - * - proto2 scalar fields (both optional and required) - * - proto3 scalar fields must be non-zero to be considered populated - * - repeated and map fields must be non-empty to be considered populated + * All fields in the example above track presence. By default, Protovalidate + * ignores rules on those fields if no value is set. `required` ensures that + * the fields are set and valid. + * + * Fields that don't track presence are always validated by Protovalidate, + * whether they are set or not. It is not necessary to add `required`. It + * can be added to indicate that the field cannot be the zero value. * * ```proto - * message MyMessage { - * // The field `value` must be set to a non-null value. - * optional MyOtherMessage value = 1 [(buf.validate.field).required = true]; + * syntax="proto3"; + * + * message FieldsWithoutPresence { + * // `string.email` always applies, even to an empty string. + * string link = 1 [ + * (buf.validate.field).string.email = true + * ]; + * // `repeated.min_items` always applies, even to an empty list. + * repeated string labels = 2 [ + * (buf.validate.field).repeated.min_items = 1 + * ]; + * // `required`, for fields that don't track presence, indicates + * // the value of the field can't be the zero value. + * int32 zero_value_not_allowed = 3 [ + * (buf.validate.field).required = true + * ]; * } * ``` + * + * To learn which fields track presence, see the + * [Field Presence cheat sheet](https://protobuf.dev/programming-guides/field_presence/#cheat). + * + * Note: While field rules can be applied to repeated items, map keys, and map + * values, the elements are always considered to be set. Consequently, + * specifying `repeated.items.required` is redundant. */ required: boolean; /** - * Skip validation on the field if its value matches the specified criteria. - * See Ignore enum for details. + * Ignore validation rules on the field if its value matches the specified + * criteria. See the `Ignore` enum for details. * * ```proto * message UpdateRequest { - * // The uri rule only applies if the field is populated and not an empty - * // string. - * optional string url = 1 [ - * (buf.validate.field).ignore = IGNORE_IF_DEFAULT_VALUE, - * (buf.validate.field).string.uri = true, + * // The uri rule only applies if the field is not an empty string. + * string url = 1 [ + * (buf.validate.field).ignore = IGNORE_IF_ZERO_VALUE, + * (buf.validate.field).string.uri = true * ]; * } * ``` @@ -414,25 +482,36 @@ export interface FieldConstraints { /** Well-Known Field Types */ any?: AnyRules | undefined; duration?: DurationRules | undefined; - timestamp?: - | TimestampRules - | undefined; - /** - * DEPRECATED: use ignore=IGNORE_ALWAYS instead. TODO: remove this field pre-v1. - * - * @deprecated - */ - skipped: boolean; + fieldMask?: FieldMaskRules | undefined; + timestamp?: TimestampRules | undefined; +} + +/** + * PredefinedRules are custom rules that can be re-used with + * multiple fields. + */ +export interface PredefinedRules { /** - * DEPRECATED: use ignore=IGNORE_IF_UNPOPULATED instead. TODO: remove this field pre-v1. + * `cel` is a repeated field used to represent a textual expression + * in the Common Expression Language (CEL) syntax. For more information, + * [see our documentation](https://buf.build/docs/protovalidate/schemas/predefined-rules/). * - * @deprecated + * ```proto + * message MyMessage { + * // The field `value` must be greater than 42. + * optional int32 value = 1 [(buf.validate.predefined).cel = { + * id: "my_message.value", + * message: "value must be greater than 42", + * expression: "this > 42", + * }]; + * } + * ``` */ - ignoreEmpty: boolean; + cel: Rule[]; } /** - * FloatRules describes the constraints applied to `float` values. These + * FloatRules describes the rules applied to `float` values. These * rules may also be applied to the `google.protobuf.FloatValue` Well-Known-Type. */ export interface FloatRules { @@ -447,9 +526,7 @@ export interface FloatRules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -534,7 +611,7 @@ export interface FloatRules { * ```proto * message MyFloat { * // value must be in list [1.0, 2.0, 3.0] - * repeated float value = 1 (buf.validate.field).float = { in: [1.0, 2.0, 3.0] }; + * float value = 1 [(buf.validate.field).float = { in: [1.0, 2.0, 3.0] }]; * } * ``` */ @@ -547,7 +624,7 @@ export interface FloatRules { * ```proto * message MyFloat { * // value must not be in list [1.0, 2.0, 3.0] - * repeated float value = 1 (buf.validate.field).float = { not_in: [1.0, 2.0, 3.0] }; + * float value = 1 [(buf.validate.field).float = { not_in: [1.0, 2.0, 3.0] }]; * } * ``` */ @@ -557,10 +634,25 @@ export interface FloatRules { * infinite or NaN, an error message is generated. */ finite: boolean; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyFloat { + * float value = 1 [ + * (buf.validate.field).float.example = 1.0, + * (buf.validate.field).float.example = inf + * ]; + * } + * ``` + */ + example: number[]; } /** - * DoubleRules describes the constraints applied to `double` values. These + * DoubleRules describes the rules applied to `double` values. These * rules may also be applied to the `google.protobuf.DoubleValue` Well-Known-Type. */ export interface DoubleRules { @@ -575,9 +667,7 @@ export interface DoubleRules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified @@ -662,7 +752,7 @@ export interface DoubleRules { * ```proto * message MyDouble { * // value must be in list [1.0, 2.0, 3.0] - * repeated double value = 1 (buf.validate.field).double = { in: [1.0, 2.0, 3.0] }; + * double value = 1 [(buf.validate.field).double = { in: [1.0, 2.0, 3.0] }]; * } * ``` */ @@ -675,7 +765,7 @@ export interface DoubleRules { * ```proto * message MyDouble { * // value must not be in list [1.0, 2.0, 3.0] - * repeated double value = 1 (buf.validate.field).double = { not_in: [1.0, 2.0, 3.0] }; + * double value = 1 [(buf.validate.field).double = { not_in: [1.0, 2.0, 3.0] }]; * } * ``` */ @@ -685,10 +775,25 @@ export interface DoubleRules { * infinite or NaN, an error message is generated. */ finite: boolean; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyDouble { + * double value = 1 [ + * (buf.validate.field).double.example = 1.0, + * (buf.validate.field).double.example = inf + * ]; + * } + * ``` + */ + example: number[]; } /** - * Int32Rules describes the constraints applied to `int32` values. These + * Int32Rules describes the rules applied to `int32` values. These * rules may also be applied to the `google.protobuf.Int32Value` Well-Known-Type. */ export interface Int32Rules { @@ -703,9 +808,7 @@ export interface Int32Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field * < value). If the field value is equal to or greater than the specified @@ -790,7 +893,7 @@ export interface Int32Rules { * ```proto * message MyInt32 { * // value must be in list [1, 2, 3] - * repeated int32 value = 1 (buf.validate.field).int32 = { in: [1, 2, 3] }; + * int32 value = 1 [(buf.validate.field).int32 = { in: [1, 2, 3] }]; * } * ``` */ @@ -803,15 +906,30 @@ export interface Int32Rules { * ```proto * message MyInt32 { * // value must not be in list [1, 2, 3] - * repeated int32 value = 1 (buf.validate.field).int32 = { not_in: [1, 2, 3] }; + * int32 value = 1 [(buf.validate.field).int32 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyInt32 { + * int32 value = 1 [ + * (buf.validate.field).int32.example = 1, + * (buf.validate.field).int32.example = -10 + * ]; + * } + * ``` + */ + example: number[]; } /** - * Int64Rules describes the constraints applied to `int64` values. These + * Int64Rules describes the rules applied to `int64` values. These * rules may also be applied to the `google.protobuf.Int64Value` Well-Known-Type. */ export interface Int64Rules { @@ -826,9 +944,7 @@ export interface Int64Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -913,7 +1029,7 @@ export interface Int64Rules { * ```proto * message MyInt64 { * // value must be in list [1, 2, 3] - * repeated int64 value = 1 (buf.validate.field).int64 = { in: [1, 2, 3] }; + * int64 value = 1 [(buf.validate.field).int64 = { in: [1, 2, 3] }]; * } * ``` */ @@ -926,15 +1042,30 @@ export interface Int64Rules { * ```proto * message MyInt64 { * // value must not be in list [1, 2, 3] - * repeated int64 value = 1 (buf.validate.field).int64 = { not_in: [1, 2, 3] }; + * int64 value = 1 [(buf.validate.field).int64 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyInt64 { + * int64 value = 1 [ + * (buf.validate.field).int64.example = 1, + * (buf.validate.field).int64.example = -10 + * ]; + * } + * ``` + */ + example: number[]; } /** - * UInt32Rules describes the constraints applied to `uint32` values. These + * UInt32Rules describes the rules applied to `uint32` values. These * rules may also be applied to the `google.protobuf.UInt32Value` Well-Known-Type. */ export interface UInt32Rules { @@ -949,9 +1080,7 @@ export interface UInt32Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1036,7 +1165,7 @@ export interface UInt32Rules { * ```proto * message MyUInt32 { * // value must be in list [1, 2, 3] - * repeated uint32 value = 1 (buf.validate.field).uint32 = { in: [1, 2, 3] }; + * uint32 value = 1 [(buf.validate.field).uint32 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1049,15 +1178,30 @@ export interface UInt32Rules { * ```proto * message MyUInt32 { * // value must not be in list [1, 2, 3] - * repeated uint32 value = 1 (buf.validate.field).uint32 = { not_in: [1, 2, 3] }; + * uint32 value = 1 [(buf.validate.field).uint32 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyUInt32 { + * uint32 value = 1 [ + * (buf.validate.field).uint32.example = 1, + * (buf.validate.field).uint32.example = 10 + * ]; + * } + * ``` + */ + example: number[]; } /** - * UInt64Rules describes the constraints applied to `uint64` values. These + * UInt64Rules describes the rules applied to `uint64` values. These * rules may also be applied to the `google.protobuf.UInt64Value` Well-Known-Type. */ export interface UInt64Rules { @@ -1072,9 +1216,7 @@ export interface UInt64Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1159,7 +1301,7 @@ export interface UInt64Rules { * ```proto * message MyUInt64 { * // value must be in list [1, 2, 3] - * repeated uint64 value = 1 (buf.validate.field).uint64 = { in: [1, 2, 3] }; + * uint64 value = 1 [(buf.validate.field).uint64 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1172,14 +1314,29 @@ export interface UInt64Rules { * ```proto * message MyUInt64 { * // value must not be in list [1, 2, 3] - * repeated uint64 value = 1 (buf.validate.field).uint64 = { not_in: [1, 2, 3] }; + * uint64 value = 1 [(buf.validate.field).uint64 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyUInt64 { + * uint64 value = 1 [ + * (buf.validate.field).uint64.example = 1, + * (buf.validate.field).uint64.example = -10 + * ]; + * } + * ``` + */ + example: number[]; } -/** SInt32Rules describes the constraints applied to `sint32` values. */ +/** SInt32Rules describes the rules applied to `sint32` values. */ export interface SInt32Rules { /** * `const` requires the field value to exactly match the specified value. If @@ -1192,9 +1349,7 @@ export interface SInt32Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field * < value). If the field value is equal to or greater than the specified @@ -1279,7 +1434,7 @@ export interface SInt32Rules { * ```proto * message MySInt32 { * // value must be in list [1, 2, 3] - * repeated sint32 value = 1 (buf.validate.field).sint32 = { in: [1, 2, 3] }; + * sint32 value = 1 [(buf.validate.field).sint32 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1292,14 +1447,29 @@ export interface SInt32Rules { * ```proto * message MySInt32 { * // value must not be in list [1, 2, 3] - * repeated sint32 value = 1 (buf.validate.field).sint32 = { not_in: [1, 2, 3] }; + * sint32 value = 1 [(buf.validate.field).sint32 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MySInt32 { + * sint32 value = 1 [ + * (buf.validate.field).sint32.example = 1, + * (buf.validate.field).sint32.example = -10 + * ]; + * } + * ``` + */ + example: number[]; } -/** SInt64Rules describes the constraints applied to `sint64` values. */ +/** SInt64Rules describes the rules applied to `sint64` values. */ export interface SInt64Rules { /** * `const` requires the field value to exactly match the specified value. If @@ -1312,9 +1482,7 @@ export interface SInt64Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field * < value). If the field value is equal to or greater than the specified @@ -1399,7 +1567,7 @@ export interface SInt64Rules { * ```proto * message MySInt64 { * // value must be in list [1, 2, 3] - * repeated sint64 value = 1 (buf.validate.field).sint64 = { in: [1, 2, 3] }; + * sint64 value = 1 [(buf.validate.field).sint64 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1412,14 +1580,29 @@ export interface SInt64Rules { * ```proto * message MySInt64 { * // value must not be in list [1, 2, 3] - * repeated sint64 value = 1 (buf.validate.field).sint64 = { not_in: [1, 2, 3] }; + * sint64 value = 1 [(buf.validate.field).sint64 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MySInt64 { + * sint64 value = 1 [ + * (buf.validate.field).sint64.example = 1, + * (buf.validate.field).sint64.example = -10 + * ]; + * } + * ``` + */ + example: number[]; } -/** Fixed32Rules describes the constraints applied to `fixed32` values. */ +/** Fixed32Rules describes the rules applied to `fixed32` values. */ export interface Fixed32Rules { /** * `const` requires the field value to exactly match the specified value. @@ -1432,9 +1615,7 @@ export interface Fixed32Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1519,7 +1700,7 @@ export interface Fixed32Rules { * ```proto * message MyFixed32 { * // value must be in list [1, 2, 3] - * repeated fixed32 value = 1 (buf.validate.field).fixed32 = { in: [1, 2, 3] }; + * fixed32 value = 1 [(buf.validate.field).fixed32 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1532,14 +1713,29 @@ export interface Fixed32Rules { * ```proto * message MyFixed32 { * // value must not be in list [1, 2, 3] - * repeated fixed32 value = 1 (buf.validate.field).fixed32 = { not_in: [1, 2, 3] }; + * fixed32 value = 1 [(buf.validate.field).fixed32 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyFixed32 { + * fixed32 value = 1 [ + * (buf.validate.field).fixed32.example = 1, + * (buf.validate.field).fixed32.example = 2 + * ]; + * } + * ``` + */ + example: number[]; } -/** Fixed64Rules describes the constraints applied to `fixed64` values. */ +/** Fixed64Rules describes the rules applied to `fixed64` values. */ export interface Fixed64Rules { /** * `const` requires the field value to exactly match the specified value. If @@ -1552,9 +1748,7 @@ export interface Fixed64Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1639,7 +1833,7 @@ export interface Fixed64Rules { * ```proto * message MyFixed64 { * // value must be in list [1, 2, 3] - * repeated fixed64 value = 1 (buf.validate.field).fixed64 = { in: [1, 2, 3] }; + * fixed64 value = 1 [(buf.validate.field).fixed64 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1652,14 +1846,29 @@ export interface Fixed64Rules { * ```proto * message MyFixed64 { * // value must not be in list [1, 2, 3] - * repeated fixed64 value = 1 (buf.validate.field).fixed64 = { not_in: [1, 2, 3] }; + * fixed64 value = 1 [(buf.validate.field).fixed64 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyFixed64 { + * fixed64 value = 1 [ + * (buf.validate.field).fixed64.example = 1, + * (buf.validate.field).fixed64.example = 2 + * ]; + * } + * ``` + */ + example: number[]; } -/** SFixed32Rules describes the constraints applied to `fixed32` values. */ +/** SFixed32Rules describes the rules applied to `fixed32` values. */ export interface SFixed32Rules { /** * `const` requires the field value to exactly match the specified value. If @@ -1672,9 +1881,7 @@ export interface SFixed32Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1759,7 +1966,7 @@ export interface SFixed32Rules { * ```proto * message MySFixed32 { * // value must be in list [1, 2, 3] - * repeated sfixed32 value = 1 (buf.validate.field).sfixed32 = { in: [1, 2, 3] }; + * sfixed32 value = 1 [(buf.validate.field).sfixed32 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1772,14 +1979,29 @@ export interface SFixed32Rules { * ```proto * message MySFixed32 { * // value must not be in list [1, 2, 3] - * repeated sfixed32 value = 1 (buf.validate.field).sfixed32 = { not_in: [1, 2, 3] }; + * sfixed32 value = 1 [(buf.validate.field).sfixed32 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MySFixed32 { + * sfixed32 value = 1 [ + * (buf.validate.field).sfixed32.example = 1, + * (buf.validate.field).sfixed32.example = 2 + * ]; + * } + * ``` + */ + example: number[]; } -/** SFixed64Rules describes the constraints applied to `fixed64` values. */ +/** SFixed64Rules describes the rules applied to `fixed64` values. */ export interface SFixed64Rules { /** * `const` requires the field value to exactly match the specified value. If @@ -1792,9 +2014,7 @@ export interface SFixed64Rules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `lt` requires the field value to be less than the specified value (field < * value). If the field value is equal to or greater than the specified value, @@ -1879,7 +2099,7 @@ export interface SFixed64Rules { * ```proto * message MySFixed64 { * // value must be in list [1, 2, 3] - * repeated sfixed64 value = 1 (buf.validate.field).sfixed64 = { in: [1, 2, 3] }; + * sfixed64 value = 1 [(buf.validate.field).sfixed64 = { in: [1, 2, 3] }]; * } * ``` */ @@ -1892,15 +2112,30 @@ export interface SFixed64Rules { * ```proto * message MySFixed64 { * // value must not be in list [1, 2, 3] - * repeated sfixed64 value = 1 (buf.validate.field).sfixed64 = { not_in: [1, 2, 3] }; + * sfixed64 value = 1 [(buf.validate.field).sfixed64 = { not_in: [1, 2, 3] }]; * } * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MySFixed64 { + * sfixed64 value = 1 [ + * (buf.validate.field).sfixed64.example = 1, + * (buf.validate.field).sfixed64.example = 2 + * ]; + * } + * ``` + */ + example: number[]; } /** - * BoolRules describes the constraints applied to `bool` values. These rules + * BoolRules describes the rules applied to `bool` values. These rules * may also be applied to the `google.protobuf.BoolValue` Well-Known-Type. */ export interface BoolRules { @@ -1915,11 +2150,26 @@ export interface BoolRules { * } * ``` */ - const?: boolean | undefined; + const: boolean; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyBool { + * bool value = 1 [ + * (buf.validate.field).bool.example = 1, + * (buf.validate.field).bool.example = 2 + * ]; + * } + * ``` + */ + example: boolean[]; } /** - * StringRules describes the constraints applied to `string` values These + * StringRules describes the rules applied to `string` values These * rules may also be applied to the `google.protobuf.StringValue` Well-Known-Type. */ export interface StringRules { @@ -1934,9 +2184,7 @@ export interface StringRules { * } * ``` */ - const?: - | string - | undefined; + const: string; /** * `len` dictates that the field value must have the specified * number of characters (Unicode code points), which may differ from the number @@ -1950,9 +2198,7 @@ export interface StringRules { * } * ``` */ - len?: - | number - | undefined; + len: number; /** * `min_len` specifies that the field value must have at least the specified * number of characters (Unicode code points), which may differ from the number @@ -1966,9 +2212,7 @@ export interface StringRules { * } * ``` */ - minLen?: - | number - | undefined; + minLen: number; /** * `max_len` specifies that the field value must have no more than the specified * number of characters (Unicode code points), which may differ from the @@ -1982,9 +2226,7 @@ export interface StringRules { * } * ``` */ - maxLen?: - | number - | undefined; + maxLen: number; /** * `len_bytes` dictates that the field value must have the specified number of * bytes. If the field value does not match the specified length in bytes, @@ -1997,9 +2239,7 @@ export interface StringRules { * } * ``` */ - lenBytes?: - | number - | undefined; + lenBytes: number; /** * `min_bytes` specifies that the field value must have at least the specified * number of bytes. If the field value contains fewer bytes, an error message @@ -2013,9 +2253,7 @@ export interface StringRules { * * ``` */ - minBytes?: - | number - | undefined; + minBytes: number; /** * `max_bytes` specifies that the field value must have no more than the * specified number of bytes. If the field value contains more bytes, an @@ -2028,9 +2266,7 @@ export interface StringRules { * } * ``` */ - maxBytes?: - | number - | undefined; + maxBytes: number; /** * `pattern` specifies that the field value must match the specified * regular expression (RE2 syntax), with the expression provided without any @@ -2044,9 +2280,7 @@ export interface StringRules { * } * ``` */ - pattern?: - | string - | undefined; + pattern: string; /** * `prefix` specifies that the field value must have the * specified substring at the beginning of the string. If the field value @@ -2060,9 +2294,7 @@ export interface StringRules { * } * ``` */ - prefix?: - | string - | undefined; + prefix: string; /** * `suffix` specifies that the field value must have the * specified substring at the end of the string. If the field value doesn't @@ -2075,9 +2307,7 @@ export interface StringRules { * } * ``` */ - suffix?: - | string - | undefined; + suffix: string; /** * `contains` specifies that the field value must have the * specified substring anywhere in the string. If the field value doesn't @@ -2090,9 +2320,7 @@ export interface StringRules { * } * ``` */ - contains?: - | string - | undefined; + contains: string; /** * `not_contains` specifies that the field value must not have the * specified substring anywhere in the string. If the field value contains @@ -2105,9 +2333,7 @@ export interface StringRules { * } * ``` */ - notContains?: - | string - | undefined; + notContains: string; /** * `in` specifies that the field value must be equal to one of the specified * values. If the field value isn't one of the specified values, an error @@ -2116,7 +2342,7 @@ export interface StringRules { * ```proto * message MyString { * // value must be in list ["apple", "banana"] - * repeated string value = 1 [(buf.validate.field).string.in = "apple", (buf.validate.field).string.in = "banana"]; + * string value = 1 [(buf.validate.field).string.in = "apple", (buf.validate.field).string.in = "banana"]; * } * ``` */ @@ -2128,14 +2354,20 @@ export interface StringRules { * ```proto * message MyString { * // value must not be in list ["orange", "grape"] - * repeated string value = 1 [(buf.validate.field).string.not_in = "orange", (buf.validate.field).string.not_in = "grape"]; + * string value = 1 [(buf.validate.field).string.not_in = "orange", (buf.validate.field).string.not_in = "grape"]; * } * ``` */ notIn: string[]; /** - * `email` specifies that the field value must be a valid email address - * (addr-spec only) as defined by [RFC 5322](https://tools.ietf.org/html/rfc5322#section-3.4.1). + * `email` specifies that the field value must be a valid email address, for + * example "foo@example.com". + * + * Conforms to the definition for a valid email address from the [HTML standard](https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address). + * Note that this standard willfully deviates from [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322), + * which allows many unexpected forms of email addresses and will easily match + * a typographical error. + * * If the field value isn't a valid email address, an error message will be generated. * * ```proto @@ -2149,10 +2381,18 @@ export interface StringRules { | boolean | undefined; /** - * `hostname` specifies that the field value must be a valid - * hostname as defined by [RFC 1034](https://tools.ietf.org/html/rfc1034#section-3.5). This constraint doesn't support - * internationalized domain names (IDNs). If the field value isn't a - * valid hostname, an error message will be generated. + * `hostname` specifies that the field value must be a valid hostname, for + * example "foo.example.com". + * + * A valid hostname follows the rules below: + * - The name consists of one or more labels, separated by a dot ("."). + * - Each label can be 1 to 63 alphanumeric characters. + * - A label can contain hyphens ("-"), but must not start or end with a hyphen. + * - The right-most label must not be digits only. + * - The name can have a trailing dot—for example, "foo.example.com.". + * - The name can be 253 characters at most, excluding the optional trailing dot. + * + * If the field value isn't a valid hostname, an error message will be generated. * * ```proto * message MyString { @@ -2165,8 +2405,15 @@ export interface StringRules { | boolean | undefined; /** - * `ip` specifies that the field value must be a valid IP - * (v4 or v6) address, without surrounding square brackets for IPv6 addresses. + * `ip` specifies that the field value must be a valid IP (v4 or v6) address. + * + * IPv4 addresses are expected in the dotted decimal format—for example, "192.168.5.21". + * IPv6 addresses are expected in their text representation—for example, "::1", + * or "2001:0DB8:ABCD:0012::0". + * + * Both formats are well-defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + * Zone identifiers for IPv6 addresses (for example, "fe80::a%en1") are supported. + * * If the field value isn't a valid IP address, an error message will be * generated. * @@ -2181,9 +2428,9 @@ export interface StringRules { | boolean | undefined; /** - * `ipv4` specifies that the field value must be a valid IPv4 - * address. If the field value isn't a valid IPv4 address, an error message - * will be generated. + * `ipv4` specifies that the field value must be a valid IPv4 address—for + * example "192.168.5.21". If the field value isn't a valid IPv4 address, an + * error message will be generated. * * ```proto * message MyString { @@ -2196,9 +2443,9 @@ export interface StringRules { | boolean | undefined; /** - * `ipv6` specifies that the field value must be a valid - * IPv6 address, without surrounding square brackets. If the field value is - * not a valid IPv6 address, an error message will be generated. + * `ipv6` specifies that the field value must be a valid IPv6 address—for + * example "::1", or "d7a:115c:a1e0:ab12:4843:cd96:626b:430b". If the field + * value is not a valid IPv6 address, an error message will be generated. * * ```proto * message MyString { @@ -2211,9 +2458,13 @@ export interface StringRules { | boolean | undefined; /** - * `uri` specifies that the field value must be a valid, - * absolute URI as defined by [RFC 3986](https://tools.ietf.org/html/rfc3986#section-3). If the field value isn't a valid, - * absolute URI, an error message will be generated. + * `uri` specifies that the field value must be a valid URI, for example + * "https://example.com/foo/bar?baz=quux#frag". + * + * URI is defined in the internet standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). + * Zone Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + * + * If the field value isn't a valid URI, an error message will be generated. * * ```proto * message MyString { @@ -2226,13 +2477,20 @@ export interface StringRules { | boolean | undefined; /** - * `uri_ref` specifies that the field value must be a valid URI - * as defined by [RFC 3986](https://tools.ietf.org/html/rfc3986#section-3) and may be either relative or absolute. If the - * field value isn't a valid URI, an error message will be generated. + * `uri_ref` specifies that the field value must be a valid URI Reference—either + * a URI such as "https://example.com/foo/bar?baz=quux#frag", or a Relative + * Reference such as "./foo/bar?query". + * + * URI, URI Reference, and Relative Reference are defined in the internet + * standard [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986). Zone + * Identifiers in IPv6 address literals are supported ([RFC 6874](https://datatracker.ietf.org/doc/html/rfc6874)). + * + * If the field value isn't a valid URI Reference, an error message will be + * generated. * * ```proto * message MyString { - * // value must be a valid URI + * // value must be a valid URI Reference * string value = 1 [(buf.validate.field).string.uri_ref = true]; * } * ``` @@ -2242,10 +2500,9 @@ export interface StringRules { | undefined; /** * `address` specifies that the field value must be either a valid hostname - * as defined by [RFC 1034](https://tools.ietf.org/html/rfc1034#section-3.5) - * (which doesn't support internationalized domain names or IDNs) or a valid - * IP (v4 or v6). If the field value isn't a valid hostname or IP, an error - * message will be generated. + * (for example, "example.com"), or a valid IP (v4 or v6) address (for example, + * "192.168.0.1", or "::1"). If the field value isn't a valid hostname or IP, + * an error message will be generated. * * ```proto * message MyString { @@ -2259,7 +2516,7 @@ export interface StringRules { | undefined; /** * `uuid` specifies that the field value must be a valid UUID as defined by - * [RFC 4122](https://tools.ietf.org/html/rfc4122#section-4.1.2). If the + * [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). If the * field value isn't a valid UUID, an error message will be generated. * * ```proto @@ -2274,7 +2531,7 @@ export interface StringRules { | undefined; /** * `tuuid` (trimmed UUID) specifies that the field value must be a valid UUID as - * defined by [RFC 4122](https://tools.ietf.org/html/rfc4122#section-4.1.2) with all dashes + * defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2) with all dashes * omitted. If the field value isn't a valid UUID without dashes, an error message * will be generated. * @@ -2289,9 +2546,10 @@ export interface StringRules { | boolean | undefined; /** - * `ip_with_prefixlen` specifies that the field value must be a valid IP (v4 or v6) - * address with prefix length. If the field value isn't a valid IP with prefix - * length, an error message will be generated. + * `ip_with_prefixlen` specifies that the field value must be a valid IP + * (v4 or v6) address with prefix length—for example, "192.168.5.21/16" or + * "2001:0DB8:ABCD:0012::F1/64". If the field value isn't a valid IP with + * prefix length, an error message will be generated. * * ```proto * message MyString { @@ -2305,9 +2563,9 @@ export interface StringRules { | undefined; /** * `ipv4_with_prefixlen` specifies that the field value must be a valid - * IPv4 address with prefix. - * If the field value isn't a valid IPv4 address with prefix length, - * an error message will be generated. + * IPv4 address with prefix length—for example, "192.168.5.21/16". If the + * field value isn't a valid IPv4 address with prefix length, an error + * message will be generated. * * ```proto * message MyString { @@ -2321,7 +2579,7 @@ export interface StringRules { | undefined; /** * `ipv6_with_prefixlen` specifies that the field value must be a valid - * IPv6 address with prefix length. + * IPv6 address with prefix length—for example, "2001:0DB8:ABCD:0012::F1/64". * If the field value is not a valid IPv6 address with prefix length, * an error message will be generated. * @@ -2336,10 +2594,15 @@ export interface StringRules { | boolean | undefined; /** - * `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) prefix. + * `ip_prefix` specifies that the field value must be a valid IP (v4 or v6) + * prefix—for example, "192.168.0.0/16" or "2001:0DB8:ABCD:0012::0/64". + * + * The prefix must have all zeros for the unmasked bits. For example, + * "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + * prefix, and the remaining 64 bits must be zero. + * * If the field value isn't a valid IP prefix, an error message will be - * generated. The prefix must have all zeros for the masked bits of the prefix (e.g., - * `127.0.0.0/16`, not `127.0.0.1/16`). + * generated. * * ```proto * message MyString { @@ -2353,9 +2616,14 @@ export interface StringRules { | undefined; /** * `ipv4_prefix` specifies that the field value must be a valid IPv4 - * prefix. If the field value isn't a valid IPv4 prefix, an error message - * will be generated. The prefix must have all zeros for the masked bits of - * the prefix (e.g., `127.0.0.0/16`, not `127.0.0.1/16`). + * prefix, for example "192.168.0.0/16". + * + * The prefix must have all zeros for the unmasked bits. For example, + * "192.168.0.0/16" designates the left-most 16 bits for the prefix, + * and the remaining 16 bits must be zero. + * + * If the field value isn't a valid IPv4 prefix, an error message + * will be generated. * * ```proto * message MyString { @@ -2368,10 +2636,15 @@ export interface StringRules { | boolean | undefined; /** - * `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix. + * `ipv6_prefix` specifies that the field value must be a valid IPv6 prefix—for + * example, "2001:0DB8:ABCD:0012::0/64". + * + * The prefix must have all zeros for the unmasked bits. For example, + * "2001:0DB8:ABCD:0012::0/64" designates the left-most 64 bits for the + * prefix, and the remaining 64 bits must be zero. + * * If the field value is not a valid IPv6 prefix, an error message will be - * generated. The prefix must have all zeros for the masked bits of the prefix - * (e.g., `2001:db8::/48`, not `2001:db8::1/48`). + * generated. * * ```proto * message MyString { @@ -2384,14 +2657,35 @@ export interface StringRules { | boolean | undefined; /** - * `host_and_port` specifies the field value must be a valid host and port - * pair. The host must be a valid hostname or IP address while the port - * must be in the range of 0-65535, inclusive. IPv6 addresses must be delimited - * with square brackets (e.g., `[::1]:1234`). + * `host_and_port` specifies that the field value must be valid host/port + * pair—for example, "example.com:8080". + * + * The host can be one of: + * - An IPv4 address in dotted decimal format—for example, "192.168.5.21". + * - An IPv6 address enclosed in square brackets—for example, "[2001:0DB8:ABCD:0012::F1]". + * - A hostname—for example, "example.com". + * + * The port is separated by a colon. It must be non-empty, with a decimal number + * in the range of 0-65535, inclusive. */ hostAndPort?: | boolean | undefined; + /** + * `ulid` specifies that the field value must be a valid ULID (Universally Unique + * Lexicographically Sortable Identifier) as defined by the [ULID specification](https://github.com/ulid/spec). + * If the field value isn't a valid ULID, an error message will be generated. + * + * ```proto + * message MyString { + * // value must be a valid ULID + * string value = 1 [(buf.validate.field).string.ulid = true]; + * } + * ``` + */ + ulid?: + | boolean + | undefined; /** * `well_known_regex` specifies a common well-known pattern * defined as a regex. If the field value doesn't match the well-known @@ -2411,8 +2705,8 @@ export interface StringRules { * | Name | Number | Description | * |-------------------------------|--------|-------------------------------------------| * | KNOWN_REGEX_UNSPECIFIED | 0 | | - * | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.2) | - * | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://tools.ietf.org/html/rfc7230#section-3.2.4) | + * | KNOWN_REGEX_HTTP_HEADER_NAME | 1 | HTTP header name as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) | + * | KNOWN_REGEX_HTTP_HEADER_VALUE | 2 | HTTP header value as defined by [RFC 7230](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.4) | */ wellKnownRegex?: | KnownRegex @@ -2420,7 +2714,7 @@ export interface StringRules { /** * This applies to regexes `HTTP_HEADER_NAME` and `HTTP_HEADER_VALUE` to * enable strict header validation. By default, this is true, and HTTP header - * validations are [RFC-compliant](https://tools.ietf.org/html/rfc7230#section-3). Setting to false will enable looser + * validations are [RFC-compliant](https://datatracker.ietf.org/doc/html/rfc7230#section-3). Setting to false will enable looser * validations that only disallow `\r\n\0` characters, which can be used to * bypass header matching rules. * @@ -2431,11 +2725,26 @@ export interface StringRules { * } * ``` */ - strict?: boolean | undefined; + strict: boolean; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyString { + * string value = 1 [ + * (buf.validate.field).string.example = "hello", + * (buf.validate.field).string.example = "world" + * ]; + * } + * ``` + */ + example: string[]; } /** - * BytesRules describe the constraints applied to `bytes` values. These rules + * BytesRules describe the rules applied to `bytes` values. These rules * may also be applied to the `google.protobuf.BytesValue` Well-Known-Type. */ export interface BytesRules { @@ -2450,9 +2759,7 @@ export interface BytesRules { * } * ``` */ - const?: - | Uint8Array - | undefined; + const: Uint8Array; /** * `len` requires the field value to have the specified length in bytes. * If the field value doesn't match, an error message is generated. @@ -2464,9 +2771,7 @@ export interface BytesRules { * } * ``` */ - len?: - | number - | undefined; + len: number; /** * `min_len` requires the field value to have at least the specified minimum * length in bytes. @@ -2479,9 +2784,7 @@ export interface BytesRules { * } * ``` */ - minLen?: - | number - | undefined; + minLen: number; /** * `max_len` requires the field value to have at most the specified maximum * length in bytes. @@ -2494,9 +2797,7 @@ export interface BytesRules { * } * ``` */ - maxLen?: - | number - | undefined; + maxLen: number; /** * `pattern` requires the field value to match the specified regular * expression ([RE2 syntax](https://github.com/google/re2/wiki/Syntax)). @@ -2511,9 +2812,7 @@ export interface BytesRules { * } * ``` */ - pattern?: - | string - | undefined; + pattern: string; /** * `prefix` requires the field value to have the specified bytes at the * beginning of the string. @@ -2526,9 +2825,7 @@ export interface BytesRules { * } * ``` */ - prefix?: - | Uint8Array - | undefined; + prefix: Uint8Array; /** * `suffix` requires the field value to have the specified bytes at the end * of the string. @@ -2541,30 +2838,26 @@ export interface BytesRules { * } * ``` */ - suffix?: - | Uint8Array - | undefined; + suffix: Uint8Array; /** * `contains` requires the field value to have the specified bytes anywhere in * the string. * If the field value doesn't meet the requirement, an error message is generated. * - * ```protobuf + * ```proto * message MyBytes { * // value does not contain \x02\x03 * optional bytes value = 1 [(buf.validate.field).bytes.contains = "\x02\x03"]; * } * ``` */ - contains?: - | Uint8Array - | undefined; + contains: Uint8Array; /** * `in` requires the field value to be equal to one of the specified * values. If the field value doesn't match any of the specified values, an * error message is generated. * - * ```protobuf + * ```proto * message MyBytes { * // value must in ["\x01\x02", "\x02\x03", "\x03\x04"] * optional bytes value = 1 [(buf.validate.field).bytes.in = {"\x01\x02", "\x02\x03", "\x03\x04"}]; @@ -2588,7 +2881,7 @@ export interface BytesRules { notIn: Uint8Array[]; /** * `ip` ensures that the field `value` is a valid IP address (v4 or v6) in byte format. - * If the field value doesn't meet this constraint, an error message is generated. + * If the field value doesn't meet this rule, an error message is generated. * * ```proto * message MyBytes { @@ -2602,7 +2895,7 @@ export interface BytesRules { | undefined; /** * `ipv4` ensures that the field `value` is a valid IPv4 address in byte format. - * If the field value doesn't meet this constraint, an error message is generated. + * If the field value doesn't meet this rule, an error message is generated. * * ```proto * message MyBytes { @@ -2616,7 +2909,7 @@ export interface BytesRules { | undefined; /** * `ipv6` ensures that the field `value` is a valid IPv6 address in byte format. - * If the field value doesn't meet this constraint, an error message is generated. + * If the field value doesn't meet this rule, an error message is generated. * ```proto * message MyBytes { * // value must be a valid IPv6 address @@ -2624,10 +2917,44 @@ export interface BytesRules { * } * ``` */ - ipv6?: boolean | undefined; + ipv6?: + | boolean + | undefined; + /** + * `uuid` ensures that the field `value` encodes the 128-bit UUID data as + * defined by [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.2). + * The field must contain exactly 16 bytes + * representing the UUID. If the field value isn't a valid UUID, an error + * message will be generated. + * + * ```proto + * message MyBytes { + * // value must be a valid UUID + * optional bytes value = 1 [(buf.validate.field).bytes.uuid = true]; + * } + * ``` + */ + uuid?: + | boolean + | undefined; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyBytes { + * bytes value = 1 [ + * (buf.validate.field).bytes.example = "\x01\x02", + * (buf.validate.field).bytes.example = "\x02\x03" + * ]; + * } + * ``` + */ + example: Uint8Array[]; } -/** EnumRules describe the constraints applied to `enum` values. */ +/** EnumRules describe the rules applied to `enum` values. */ export interface EnumRules { /** * `const` requires the field value to exactly match the specified enum value. @@ -2646,9 +2973,7 @@ export interface EnumRules { * } * ``` */ - const?: - | number - | undefined; + const: number; /** * `defined_only` requires the field value to be one of the defined values for * this enum, failing on any undefined value. @@ -2666,9 +2991,7 @@ export interface EnumRules { * } * ``` */ - definedOnly?: - | boolean - | undefined; + definedOnly: boolean; /** * `in` requires the field value to be equal to one of the * specified enum values. If the field value doesn't match any of the @@ -2707,9 +3030,28 @@ export interface EnumRules { * ``` */ notIn: number[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * enum MyEnum { + * MY_ENUM_UNSPECIFIED = 0; + * MY_ENUM_VALUE1 = 1; + * MY_ENUM_VALUE2 = 2; + * } + * + * message MyMessage { + * (buf.validate.field).enum.example = 1, + * (buf.validate.field).enum.example = 2 + * } + * ``` + */ + example: number[]; } -/** RepeatedRules describe the constraints applied to `repeated` values. */ +/** RepeatedRules describe the rules applied to `repeated` values. */ export interface RepeatedRules { /** * `min_items` requires that this field must contain at least the specified @@ -2724,9 +3066,7 @@ export interface RepeatedRules { * } * ``` */ - minItems?: - | number - | undefined; + minItems: number; /** * `max_items` denotes that this field must not exceed a * certain number of items as the upper limit. If the field contains more @@ -2740,12 +3080,10 @@ export interface RepeatedRules { * } * ``` */ - maxItems?: - | number - | undefined; + maxItems: number; /** * `unique` indicates that all elements in this field must - * be unique. This constraint is strictly applicable to scalar and enum + * be unique. This rule is strictly applicable to scalar and enum * types, with message types not being supported. * * ```proto @@ -2755,17 +3093,15 @@ export interface RepeatedRules { * } * ``` */ - unique?: - | boolean - | undefined; + unique: boolean; /** - * `items` details the constraints to be applied to each item + * `items` details the rules to be applied to each item * in the field. Even for repeated message fields, validation is executed - * against each item unless skip is explicitly specified. + * against each item unless `ignore` is specified. * * ```proto * message MyRepeated { - * // The items in the field `value` must follow the specified constraints. + * // The items in the field `value` must follow the specified rules. * repeated string value = 1 [(buf.validate.field).repeated.items = { * string: { * min_len: 3 @@ -2774,11 +3110,14 @@ export interface RepeatedRules { * }]; * } * ``` + * + * Note that the `required` rule does not apply. Repeated items + * cannot be unset. */ - items?: FieldConstraints | undefined; + items?: FieldRules; } -/** MapRules describe the constraints applied to `map` values. */ +/** MapRules describe the rules applied to `map` values. */ export interface MapRules { /** * Specifies the minimum number of key-value pairs allowed. If the field has @@ -2791,9 +3130,7 @@ export interface MapRules { * } * ``` */ - minPairs?: - | number - | undefined; + minPairs: number; /** * Specifies the maximum number of key-value pairs allowed. If the field has * more key-value pairs than specified, an error message is generated. @@ -2805,15 +3142,13 @@ export interface MapRules { * } * ``` */ - maxPairs?: - | number - | undefined; + maxPairs: number; /** - * Specifies the constraints to be applied to each key in the field. + * Specifies the rules to be applied to each key in the field. * * ```proto * message MyMap { - * // The keys in the field `value` must follow the specified constraints. + * // The keys in the field `value` must follow the specified rules. * map value = 1 [(buf.validate.field).map.keys = { * string: { * min_len: 3 @@ -2822,18 +3157,18 @@ export interface MapRules { * }]; * } * ``` + * + * Note that the `required` rule does not apply. Map keys cannot be unset. */ - keys?: - | FieldConstraints - | undefined; + keys?: FieldRules; /** - * Specifies the constraints to be applied to the value of each key in the + * Specifies the rules to be applied to the value of each key in the * field. Message values will still have their validations evaluated unless - * skip is specified here. + * `ignore` is specified. * * ```proto * message MyMap { - * // The values in the field `value` must follow the specified constraints. + * // The values in the field `value` must follow the specified rules. * map value = 1 [(buf.validate.field).map.values = { * string: { * min_len: 5 @@ -2842,11 +3177,12 @@ export interface MapRules { * }]; * } * ``` + * Note that the `required` rule does not apply. Map values cannot be unset. */ - values?: FieldConstraints | undefined; + values?: FieldRules; } -/** AnyRules describe constraints applied exclusively to the `google.protobuf.Any` well-known type. */ +/** AnyRules describe rules applied exclusively to the `google.protobuf.Any` well-known type. */ export interface AnyRules { /** * `in` requires the field's `type_url` to be equal to one of the @@ -2856,7 +3192,9 @@ export interface AnyRules { * ```proto * message MyAny { * // The `value` field must have a `type_url` equal to one of the specified values. - * google.protobuf.Any value = 1 [(buf.validate.field).any.in = ["type.googleapis.com/MyType1", "type.googleapis.com/MyType2"]]; + * google.protobuf.Any value = 1 [(buf.validate.field).any = { + * in: ["type.googleapis.com/MyType1", "type.googleapis.com/MyType2"] + * }]; * } * ``` */ @@ -2866,15 +3204,17 @@ export interface AnyRules { * * ```proto * message MyAny { - * // The field `value` must not have a `type_url` equal to any of the specified values. - * google.protobuf.Any value = 1 [(buf.validate.field).any.not_in = ["type.googleapis.com/ForbiddenType1", "type.googleapis.com/ForbiddenType2"]]; + * // The `value` field must not have a `type_url` equal to any of the specified values. + * google.protobuf.Any value = 1 [(buf.validate.field).any = { + * not_in: ["type.googleapis.com/ForbiddenType1", "type.googleapis.com/ForbiddenType2"] + * }]; * } * ``` */ notIn: string[]; } -/** DurationRules describe the constraints applied exclusively to the `google.protobuf.Duration` well-known type. */ +/** DurationRules describe the rules applied exclusively to the `google.protobuf.Duration` well-known type. */ export interface DurationRules { /** * `const` dictates that the field must match the specified value of the `google.protobuf.Duration` type exactly. @@ -2888,9 +3228,7 @@ export interface DurationRules { * } * ``` */ - const?: - | Duration - | undefined; + const?: Duration; /** * `lt` stipulates that the field must be less than the specified value of the `google.protobuf.Duration` type, * exclusive. If the field's value is greater than or equal to the specified @@ -2994,9 +3332,92 @@ export interface DurationRules { * ``` */ notIn: Duration[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyDuration { + * google.protobuf.Duration value = 1 [ + * (buf.validate.field).duration.example = { seconds: 1 }, + * (buf.validate.field).duration.example = { seconds: 2 }, + * ]; + * } + * ``` + */ + example: Duration[]; +} + +/** FieldMaskRules describe rules applied exclusively to the `google.protobuf.FieldMask` well-known type. */ +export interface FieldMaskRules { + /** + * `const` dictates that the field must match the specified value of the `google.protobuf.FieldMask` type exactly. + * If the field's value deviates from the specified value, an error message + * will be generated. + * + * ```proto + * message MyFieldMask { + * // value must equal ["a"] + * google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask.const = { + * paths: ["a"] + * }]; + * } + * ``` + */ + const?: string[]; + /** + * `in` requires the field value to only contain paths matching specified + * values or their subpaths. + * If any of the field value's paths doesn't match the rule, + * an error message is generated. + * See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + * + * ```proto + * message MyFieldMask { + * // The `value` FieldMask must only contain paths listed in `in`. + * google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + * in: ["a", "b", "c.a"] + * }]; + * } + * ``` + */ + in: string[]; + /** + * `not_in` requires the field value to not contain paths matching specified + * values or their subpaths. + * If any of the field value's paths matches the rule, + * an error message is generated. + * See: https://protobuf.dev/reference/protobuf/google.protobuf/#field-mask + * + * ```proto + * message MyFieldMask { + * // The `value` FieldMask shall not contain paths listed in `not_in`. + * google.protobuf.FieldMask value = 1 [(buf.validate.field).field_mask = { + * not_in: ["forbidden", "immutable", "c.a"] + * }]; + * } + * ``` + */ + notIn: string[]; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyFieldMask { + * google.protobuf.FieldMask value = 1 [ + * (buf.validate.field).field_mask.example = { paths: ["a", "b"] }, + * (buf.validate.field).field_mask.example = { paths: ["c.a", "d"] }, + * ]; + * } + * ``` + */ + example: string[][]; } -/** TimestampRules describe the constraints applied exclusively to the `google.protobuf.Timestamp` well-known type. */ +/** TimestampRules describe the rules applied exclusively to the `google.protobuf.Timestamp` well-known type. */ export interface TimestampRules { /** * `const` dictates that this field, of the `google.protobuf.Timestamp` type, must exactly match the specified value. If the field value doesn't correspond to the specified timestamp, an error message will be generated. @@ -3008,9 +3429,7 @@ export interface TimestampRules { * } * ``` */ - const?: - | Date - | undefined; + const?: Date; /** * requires the duration field value to be less than the specified value (field < value). If the field value doesn't meet the required conditions, an error message is generated. * @@ -3119,44 +3538,346 @@ export interface TimestampRules { * } * ``` */ - within?: Duration | undefined; + within?: Duration; + /** + * `example` specifies values that the field may have. These values SHOULD + * conform to other rules. `example` values will not impact validation + * but may be used as helpful guidance on how to populate the given field. + * + * ```proto + * message MyTimestamp { + * google.protobuf.Timestamp value = 1 [ + * (buf.validate.field).timestamp.example = { seconds: 1672444800 }, + * (buf.validate.field).timestamp.example = { seconds: 1672531200 }, + * ]; + * } + * ``` + */ + example: Date[]; +} + +/** + * `Violations` is a collection of `Violation` messages. This message type is returned by + * Protovalidate when a proto message fails to meet the requirements set by the `Rule` validation rules. + * Each individual violation is represented by a `Violation` message. + */ +export interface Violations { + /** `violations` is a repeated field that contains all the `Violation` messages corresponding to the violations detected. */ + violations: Violation[]; +} + +/** + * `Violation` represents a single instance where a validation rule, expressed + * as a `Rule`, was not met. It provides information about the field that + * caused the violation, the specific rule that wasn't fulfilled, and a + * human-readable error message. + * + * For example, consider the following message: + * + * ```proto + * message User { + * int32 age = 1 [(buf.validate.field).cel = { + * id: "user.age", + * expression: "this < 18 ? 'User must be at least 18 years old' : ''", + * }]; + * } + * ``` + * + * It could produce the following violation: + * + * ```json + * { + * "ruleId": "user.age", + * "message": "User must be at least 18 years old", + * "field": { + * "elements": [ + * { + * "fieldNumber": 1, + * "fieldName": "age", + * "fieldType": "TYPE_INT32" + * } + * ] + * }, + * "rule": { + * "elements": [ + * { + * "fieldNumber": 23, + * "fieldName": "cel", + * "fieldType": "TYPE_MESSAGE", + * "index": "0" + * } + * ] + * } + * } + * ``` + */ +export interface Violation { + /** + * `field` is a machine-readable path to the field that failed validation. + * This could be a nested field, in which case the path will include all the parent fields leading to the actual field that caused the violation. + * + * For example, consider the following message: + * + * ```proto + * message Message { + * bool a = 1 [(buf.validate.field).required = true]; + * } + * ``` + * + * It could produce the following violation: + * + * ```textproto + * violation { + * field { element { field_number: 1, field_name: "a", field_type: 8 } } + * ... + * } + * ``` + */ + field?: FieldPath; + /** + * `rule` is a machine-readable path that points to the specific rule that failed validation. + * This will be a nested field starting from the FieldRules of the field that failed validation. + * For custom rules, this will provide the path of the rule, e.g. `cel[0]`. + * + * For example, consider the following message: + * + * ```proto + * message Message { + * bool a = 1 [(buf.validate.field).required = true]; + * bool b = 2 [(buf.validate.field).cel = { + * id: "custom_rule", + * expression: "!this ? 'b must be true': ''" + * }] + * } + * ``` + * + * It could produce the following violations: + * + * ```textproto + * violation { + * rule { element { field_number: 25, field_name: "required", field_type: 8 } } + * ... + * } + * violation { + * rule { element { field_number: 23, field_name: "cel", field_type: 11, index: 0 } } + * ... + * } + * ``` + */ + rule?: FieldPath; + /** + * `rule_id` is the unique identifier of the `Rule` that was not fulfilled. + * This is the same `id` that was specified in the `Rule` message, allowing easy tracing of which rule was violated. + */ + ruleId: string; + /** + * `message` is a human-readable error message that describes the nature of the violation. + * This can be the default error message from the violated `Rule`, or it can be a custom message that gives more context about the violation. + */ + message: string; + /** `for_key` indicates whether the violation was caused by a map key, rather than a value. */ + forKey: boolean; +} + +/** + * `FieldPath` provides a path to a nested protobuf field. + * + * This message provides enough information to render a dotted field path even without protobuf descriptors. + * It also provides enough information to resolve a nested field through unknown wire data. + */ +export interface FieldPath { + /** `elements` contains each element of the path, starting from the root and recursing downward. */ + elements: FieldPathElement[]; +} + +/** + * `FieldPathElement` provides enough information to nest through a single protobuf field. + * + * If the selected field is a map or repeated field, the `subscript` value selects a specific element from it. + * A path that refers to a value nested under a map key or repeated field index will have a `subscript` value. + * The `field_type` field allows unambiguous resolution of a field even if descriptors are not available. + */ +export interface FieldPathElement { + /** `field_number` is the field number this path element refers to. */ + fieldNumber: number; + /** + * `field_name` contains the field name this path element refers to. + * This can be used to display a human-readable path even if the field number is unknown. + */ + fieldName: string; + /** + * `field_type` specifies the type of this field. When using reflection, this value is not needed. + * + * This value is provided to make it possible to traverse unknown fields through wire data. + * When traversing wire data, be mindful of both packed[1] and delimited[2] encoding schemes. + * + * [1]: https://protobuf.dev/programming-guides/encoding/#packed + * [2]: https://protobuf.dev/programming-guides/encoding/#groups + * + * N.B.: Although groups are deprecated, the corresponding delimited encoding scheme is not, and + * can be explicitly used in Protocol Buffers 2023 Edition. + */ + fieldType: FieldDescriptorProto_Type; + /** + * `key_type` specifies the map key type of this field. This value is useful when traversing + * unknown fields through wire data: specifically, it allows handling the differences between + * different integer encodings. + */ + keyType: FieldDescriptorProto_Type; + /** + * `value_type` specifies map value type of this field. This is useful if you want to display a + * value inside unknown fields through wire data. + */ + valueType: FieldDescriptorProto_Type; + /** `index` specifies a 0-based index into a repeated field. */ + index?: + | number + | undefined; + /** `bool_key` specifies a map key of type bool. */ + boolKey?: + | boolean + | undefined; + /** `int_key` specifies a map key of type int32, int64, sint32, sint64, sfixed32 or sfixed64. */ + intKey?: + | number + | undefined; + /** `uint_key` specifies a map key of type uint32, uint64, fixed32 or fixed64. */ + uintKey?: + | number + | undefined; + /** `string_key` specifies a map key of type string. */ + stringKey?: string | undefined; } -function createBaseMessageConstraints(): MessageConstraints { - return { disabled: undefined, cel: [] }; +function createBaseRule(): Rule { + return { id: "", message: "", expression: "" }; } -export const MessageConstraints = { - encode(message: MessageConstraints, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.disabled !== undefined) { - writer.uint32(8).bool(message.disabled); +export const Rule = { + encode(message: Rule, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); } - for (const v of message.cel) { - Constraint.encode(v!, writer.uint32(26).fork()).ldelim(); + if (message.message !== "") { + writer.uint32(18).string(message.message); + } + if (message.expression !== "") { + writer.uint32(26).string(message.expression); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): MessageConstraints { + decode(input: _m0.Reader | Uint8Array, length?: number): Rule { const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMessageConstraints(); + const message = createBaseRule(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - if (tag !== 8) { + if (tag !== 10) { break; } - message.disabled = reader.bool(); + message.id = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.message = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.expression = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Rule { + return { + id: isSet(object.id) ? String(object.id) : "", + message: isSet(object.message) ? String(object.message) : "", + expression: isSet(object.expression) ? String(object.expression) : "", + }; + }, + + toJSON(message: Rule): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.message !== undefined && (obj.message = message.message); + message.expression !== undefined && (obj.expression = message.expression); + return obj; + }, + + create, I>>(base?: I): Rule { + return Rule.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): Rule { + const message = createBaseRule(); + message.id = object.id ?? ""; + message.message = object.message ?? ""; + message.expression = object.expression ?? ""; + return message; + }, +}; + +function createBaseMessageRules(): MessageRules { + return { celExpression: [], cel: [], oneof: [] }; +} + +export const MessageRules = { + encode(message: MessageRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.celExpression) { + writer.uint32(42).string(v!); + } + for (const v of message.cel) { + Rule.encode(v!, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.oneof) { + MessageOneofRule.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MessageRules { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMessageRules(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + if (tag !== 42) { + break; + } + + message.celExpression.push(reader.string()); continue; case 3: if (tag !== 26) { break; } - message.cel.push(Constraint.decode(reader, reader.uint32())); + message.cel.push(Rule.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.oneof.push(MessageOneofRule.decode(reader, reader.uint32())); continue; } if ((tag & 7) === 4 || tag === 0) { @@ -3167,52 +3888,138 @@ export const MessageConstraints = { return message; }, - fromJSON(object: any): MessageConstraints { + fromJSON(object: any): MessageRules { return { - disabled: isSet(object.disabled) ? Boolean(object.disabled) : undefined, - cel: Array.isArray(object?.cel) ? object.cel.map((e: any) => Constraint.fromJSON(e)) : [], + celExpression: Array.isArray(object?.celExpression) ? object.celExpression.map((e: any) => String(e)) : [], + cel: Array.isArray(object?.cel) ? object.cel.map((e: any) => Rule.fromJSON(e)) : [], + oneof: Array.isArray(object?.oneof) ? object.oneof.map((e: any) => MessageOneofRule.fromJSON(e)) : [], }; }, - toJSON(message: MessageConstraints): unknown { + toJSON(message: MessageRules): unknown { const obj: any = {}; - message.disabled !== undefined && (obj.disabled = message.disabled); + if (message.celExpression) { + obj.celExpression = message.celExpression.map((e) => e); + } else { + obj.celExpression = []; + } if (message.cel) { - obj.cel = message.cel.map((e) => e ? Constraint.toJSON(e) : undefined); + obj.cel = message.cel.map((e) => e ? Rule.toJSON(e) : undefined); } else { obj.cel = []; } + if (message.oneof) { + obj.oneof = message.oneof.map((e) => e ? MessageOneofRule.toJSON(e) : undefined); + } else { + obj.oneof = []; + } + return obj; + }, + + create, I>>(base?: I): MessageRules { + return MessageRules.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): MessageRules { + const message = createBaseMessageRules(); + message.celExpression = object.celExpression?.map((e) => e) || []; + message.cel = object.cel?.map((e) => Rule.fromPartial(e)) || []; + message.oneof = object.oneof?.map((e) => MessageOneofRule.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseMessageOneofRule(): MessageOneofRule { + return { fields: [], required: false }; +} + +export const MessageOneofRule = { + encode(message: MessageOneofRule, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.fields) { + writer.uint32(10).string(v!); + } + if (message.required === true) { + writer.uint32(16).bool(message.required); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MessageOneofRule { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMessageOneofRule(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.fields.push(reader.string()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.required = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MessageOneofRule { + return { + fields: Array.isArray(object?.fields) ? object.fields.map((e: any) => String(e)) : [], + required: isSet(object.required) ? Boolean(object.required) : false, + }; + }, + + toJSON(message: MessageOneofRule): unknown { + const obj: any = {}; + if (message.fields) { + obj.fields = message.fields.map((e) => e); + } else { + obj.fields = []; + } + message.required !== undefined && (obj.required = message.required); return obj; }, - create, I>>(base?: I): MessageConstraints { - return MessageConstraints.fromPartial(base ?? {}); + create, I>>(base?: I): MessageOneofRule { + return MessageOneofRule.fromPartial(base ?? {}); }, - fromPartial, I>>(object: I): MessageConstraints { - const message = createBaseMessageConstraints(); - message.disabled = object.disabled ?? undefined; - message.cel = object.cel?.map((e) => Constraint.fromPartial(e)) || []; + fromPartial, I>>(object: I): MessageOneofRule { + const message = createBaseMessageOneofRule(); + message.fields = object.fields?.map((e) => e) || []; + message.required = object.required ?? false; return message; }, }; -function createBaseOneofConstraints(): OneofConstraints { - return { required: undefined }; +function createBaseOneofRules(): OneofRules { + return { required: false }; } -export const OneofConstraints = { - encode(message: OneofConstraints, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.required !== undefined) { +export const OneofRules = { + encode(message: OneofRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.required === true) { writer.uint32(8).bool(message.required); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): OneofConstraints { + decode(input: _m0.Reader | Uint8Array, length?: number): OneofRules { const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOneofConstraints(); + const message = createBaseOneofRules(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -3232,29 +4039,30 @@ export const OneofConstraints = { return message; }, - fromJSON(object: any): OneofConstraints { - return { required: isSet(object.required) ? Boolean(object.required) : undefined }; + fromJSON(object: any): OneofRules { + return { required: isSet(object.required) ? Boolean(object.required) : false }; }, - toJSON(message: OneofConstraints): unknown { + toJSON(message: OneofRules): unknown { const obj: any = {}; message.required !== undefined && (obj.required = message.required); return obj; }, - create, I>>(base?: I): OneofConstraints { - return OneofConstraints.fromPartial(base ?? {}); + create, I>>(base?: I): OneofRules { + return OneofRules.fromPartial(base ?? {}); }, - fromPartial, I>>(object: I): OneofConstraints { - const message = createBaseOneofConstraints(); - message.required = object.required ?? undefined; + fromPartial, I>>(object: I): OneofRules { + const message = createBaseOneofRules(); + message.required = object.required ?? false; return message; }, }; -function createBaseFieldConstraints(): FieldConstraints { +function createBaseFieldRules(): FieldRules { return { + celExpression: [], cel: [], required: false, ignore: 0, @@ -3278,16 +4086,18 @@ function createBaseFieldConstraints(): FieldConstraints { map: undefined, any: undefined, duration: undefined, + fieldMask: undefined, timestamp: undefined, - skipped: false, - ignoreEmpty: false, }; } -export const FieldConstraints = { - encode(message: FieldConstraints, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { +export const FieldRules = { + encode(message: FieldRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.celExpression) { + writer.uint32(234).string(v!); + } for (const v of message.cel) { - Constraint.encode(v!, writer.uint32(186).fork()).ldelim(); + Rule.encode(v!, writer.uint32(186).fork()).ldelim(); } if (message.required === true) { writer.uint32(200).bool(message.required); @@ -3355,31 +4165,35 @@ export const FieldConstraints = { if (message.duration !== undefined) { DurationRules.encode(message.duration, writer.uint32(170).fork()).ldelim(); } + if (message.fieldMask !== undefined) { + FieldMaskRules.encode(message.fieldMask, writer.uint32(226).fork()).ldelim(); + } if (message.timestamp !== undefined) { TimestampRules.encode(message.timestamp, writer.uint32(178).fork()).ldelim(); } - if (message.skipped === true) { - writer.uint32(192).bool(message.skipped); - } - if (message.ignoreEmpty === true) { - writer.uint32(208).bool(message.ignoreEmpty); - } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): FieldConstraints { + decode(input: _m0.Reader | Uint8Array, length?: number): FieldRules { const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFieldConstraints(); + const message = createBaseFieldRules(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 29: + if (tag !== 234) { + break; + } + + message.celExpression.push(reader.string()); + continue; case 23: if (tag !== 186) { break; } - message.cel.push(Constraint.decode(reader, reader.uint32())); + message.cel.push(Rule.decode(reader, reader.uint32())); continue; case 25: if (tag !== 200) { @@ -3535,26 +4349,19 @@ export const FieldConstraints = { message.duration = DurationRules.decode(reader, reader.uint32()); continue; - case 22: - if (tag !== 178) { - break; - } - - message.timestamp = TimestampRules.decode(reader, reader.uint32()); - continue; - case 24: - if (tag !== 192) { + case 28: + if (tag !== 226) { break; } - message.skipped = reader.bool(); + message.fieldMask = FieldMaskRules.decode(reader, reader.uint32()); continue; - case 26: - if (tag !== 208) { + case 22: + if (tag !== 178) { break; } - message.ignoreEmpty = reader.bool(); + message.timestamp = TimestampRules.decode(reader, reader.uint32()); continue; } if ((tag & 7) === 4 || tag === 0) { @@ -3565,9 +4372,10 @@ export const FieldConstraints = { return message; }, - fromJSON(object: any): FieldConstraints { + fromJSON(object: any): FieldRules { return { - cel: Array.isArray(object?.cel) ? object.cel.map((e: any) => Constraint.fromJSON(e)) : [], + celExpression: Array.isArray(object?.celExpression) ? object.celExpression.map((e: any) => String(e)) : [], + cel: Array.isArray(object?.cel) ? object.cel.map((e: any) => Rule.fromJSON(e)) : [], required: isSet(object.required) ? Boolean(object.required) : false, ignore: isSet(object.ignore) ? ignoreFromJSON(object.ignore) : 0, float: isSet(object.float) ? FloatRules.fromJSON(object.float) : undefined, @@ -3590,16 +4398,20 @@ export const FieldConstraints = { map: isSet(object.map) ? MapRules.fromJSON(object.map) : undefined, any: isSet(object.any) ? AnyRules.fromJSON(object.any) : undefined, duration: isSet(object.duration) ? DurationRules.fromJSON(object.duration) : undefined, + fieldMask: isSet(object.fieldMask) ? FieldMaskRules.fromJSON(object.fieldMask) : undefined, timestamp: isSet(object.timestamp) ? TimestampRules.fromJSON(object.timestamp) : undefined, - skipped: isSet(object.skipped) ? Boolean(object.skipped) : false, - ignoreEmpty: isSet(object.ignoreEmpty) ? Boolean(object.ignoreEmpty) : false, }; }, - toJSON(message: FieldConstraints): unknown { + toJSON(message: FieldRules): unknown { const obj: any = {}; + if (message.celExpression) { + obj.celExpression = message.celExpression.map((e) => e); + } else { + obj.celExpression = []; + } if (message.cel) { - obj.cel = message.cel.map((e) => e ? Constraint.toJSON(e) : undefined); + obj.cel = message.cel.map((e) => e ? Rule.toJSON(e) : undefined); } else { obj.cel = []; } @@ -3629,20 +4441,21 @@ export const FieldConstraints = { message.any !== undefined && (obj.any = message.any ? AnyRules.toJSON(message.any) : undefined); message.duration !== undefined && (obj.duration = message.duration ? DurationRules.toJSON(message.duration) : undefined); + message.fieldMask !== undefined && + (obj.fieldMask = message.fieldMask ? FieldMaskRules.toJSON(message.fieldMask) : undefined); message.timestamp !== undefined && (obj.timestamp = message.timestamp ? TimestampRules.toJSON(message.timestamp) : undefined); - message.skipped !== undefined && (obj.skipped = message.skipped); - message.ignoreEmpty !== undefined && (obj.ignoreEmpty = message.ignoreEmpty); return obj; }, - create, I>>(base?: I): FieldConstraints { - return FieldConstraints.fromPartial(base ?? {}); + create, I>>(base?: I): FieldRules { + return FieldRules.fromPartial(base ?? {}); }, - fromPartial, I>>(object: I): FieldConstraints { - const message = createBaseFieldConstraints(); - message.cel = object.cel?.map((e) => Constraint.fromPartial(e)) || []; + fromPartial, I>>(object: I): FieldRules { + const message = createBaseFieldRules(); + message.celExpression = object.celExpression?.map((e) => e) || []; + message.cel = object.cel?.map((e) => Rule.fromPartial(e)) || []; message.required = object.required ?? false; message.ignore = object.ignore ?? 0; message.float = (object.float !== undefined && object.float !== null) @@ -3697,18 +4510,79 @@ export const FieldConstraints = { message.duration = (object.duration !== undefined && object.duration !== null) ? DurationRules.fromPartial(object.duration) : undefined; + message.fieldMask = (object.fieldMask !== undefined && object.fieldMask !== null) + ? FieldMaskRules.fromPartial(object.fieldMask) + : undefined; message.timestamp = (object.timestamp !== undefined && object.timestamp !== null) ? TimestampRules.fromPartial(object.timestamp) : undefined; - message.skipped = object.skipped ?? false; - message.ignoreEmpty = object.ignoreEmpty ?? false; + return message; + }, +}; + +function createBasePredefinedRules(): PredefinedRules { + return { cel: [] }; +} + +export const PredefinedRules = { + encode(message: PredefinedRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.cel) { + Rule.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PredefinedRules { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePredefinedRules(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.cel.push(Rule.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): PredefinedRules { + return { cel: Array.isArray(object?.cel) ? object.cel.map((e: any) => Rule.fromJSON(e)) : [] }; + }, + + toJSON(message: PredefinedRules): unknown { + const obj: any = {}; + if (message.cel) { + obj.cel = message.cel.map((e) => e ? Rule.toJSON(e) : undefined); + } else { + obj.cel = []; + } + return obj; + }, + + create, I>>(base?: I): PredefinedRules { + return PredefinedRules.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): PredefinedRules { + const message = createBasePredefinedRules(); + message.cel = object.cel?.map((e) => Rule.fromPartial(e)) || []; return message; }, }; function createBaseFloatRules(): FloatRules { return { - const: undefined, + const: 0, lt: undefined, lte: undefined, gt: undefined, @@ -3716,12 +4590,13 @@ function createBaseFloatRules(): FloatRules { in: [], notIn: [], finite: false, + example: [], }; } export const FloatRules = { encode(message: FloatRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(13).float(message.const); } if (message.lt !== undefined) { @@ -3749,6 +4624,11 @@ export const FloatRules = { if (message.finite === true) { writer.uint32(64).bool(message.finite); } + writer.uint32(74).fork(); + for (const v of message.example) { + writer.float(v); + } + writer.ldelim(); return writer; }, @@ -3835,6 +4715,23 @@ export const FloatRules = { message.finite = reader.bool(); continue; + case 9: + if (tag === 77) { + message.example.push(reader.float()); + + continue; + } + + if (tag === 74) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.float()); + } + + continue; + } + + break; } if ((tag & 7) === 4 || tag === 0) { break; @@ -3846,7 +4743,7 @@ export const FloatRules = { fromJSON(object: any): FloatRules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, @@ -3854,6 +4751,7 @@ export const FloatRules = { in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], finite: isSet(object.finite) ? Boolean(object.finite) : false, + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -3875,6 +4773,11 @@ export const FloatRules = { obj.notIn = []; } message.finite !== undefined && (obj.finite = message.finite); + if (message.example) { + obj.example = message.example.map((e) => e); + } else { + obj.example = []; + } return obj; }, @@ -3884,7 +4787,7 @@ export const FloatRules = { fromPartial, I>>(object: I): FloatRules { const message = createBaseFloatRules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; @@ -3892,13 +4795,14 @@ export const FloatRules = { message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; message.finite = object.finite ?? false; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseDoubleRules(): DoubleRules { return { - const: undefined, + const: 0, lt: undefined, lte: undefined, gt: undefined, @@ -3906,12 +4810,13 @@ function createBaseDoubleRules(): DoubleRules { in: [], notIn: [], finite: false, + example: [], }; } export const DoubleRules = { encode(message: DoubleRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(9).double(message.const); } if (message.lt !== undefined) { @@ -3939,6 +4844,11 @@ export const DoubleRules = { if (message.finite === true) { writer.uint32(64).bool(message.finite); } + writer.uint32(74).fork(); + for (const v of message.example) { + writer.double(v); + } + writer.ldelim(); return writer; }, @@ -4025,6 +4935,23 @@ export const DoubleRules = { message.finite = reader.bool(); continue; + case 9: + if (tag === 73) { + message.example.push(reader.double()); + + continue; + } + + if (tag === 74) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.double()); + } + + continue; + } + + break; } if ((tag & 7) === 4 || tag === 0) { break; @@ -4036,7 +4963,7 @@ export const DoubleRules = { fromJSON(object: any): DoubleRules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, @@ -4044,6 +4971,7 @@ export const DoubleRules = { in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], finite: isSet(object.finite) ? Boolean(object.finite) : false, + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4065,6 +4993,11 @@ export const DoubleRules = { obj.notIn = []; } message.finite !== undefined && (obj.finite = message.finite); + if (message.example) { + obj.example = message.example.map((e) => e); + } else { + obj.example = []; + } return obj; }, @@ -4074,7 +5007,7 @@ export const DoubleRules = { fromPartial, I>>(object: I): DoubleRules { const message = createBaseDoubleRules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; @@ -4082,17 +5015,18 @@ export const DoubleRules = { message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; message.finite = object.finite ?? false; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseInt32Rules(): Int32Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const Int32Rules = { encode(message: Int32Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).int32(message.const); } if (message.lt !== undefined) { @@ -4117,6 +5051,11 @@ export const Int32Rules = { writer.int32(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.int32(v); + } + writer.ldelim(); return writer; }, @@ -4195,6 +5134,23 @@ export const Int32Rules = { continue; } + break; + case 8: + if (tag === 64) { + message.example.push(reader.int32()); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.int32()); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -4207,13 +5163,14 @@ export const Int32Rules = { fromJSON(object: any): Int32Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4234,6 +5191,11 @@ export const Int32Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -4243,24 +5205,25 @@ export const Int32Rules = { fromPartial, I>>(object: I): Int32Rules { const message = createBaseInt32Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseInt64Rules(): Int64Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const Int64Rules = { encode(message: Int64Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).int64(message.const); } if (message.lt !== undefined) { @@ -4285,8 +5248,13 @@ export const Int64Rules = { writer.int64(v); } writer.ldelim(); - return writer; - }, + writer.uint32(74).fork(); + for (const v of message.example) { + writer.int64(v); + } + writer.ldelim(); + return writer; + }, decode(input: _m0.Reader | Uint8Array, length?: number): Int64Rules { const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); @@ -4363,6 +5331,23 @@ export const Int64Rules = { continue; } + break; + case 9: + if (tag === 72) { + message.example.push(longToNumber(reader.int64() as Long)); + + continue; + } + + if (tag === 74) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(longToNumber(reader.int64() as Long)); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -4375,13 +5360,14 @@ export const Int64Rules = { fromJSON(object: any): Int64Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4402,6 +5388,11 @@ export const Int64Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -4411,24 +5402,25 @@ export const Int64Rules = { fromPartial, I>>(object: I): Int64Rules { const message = createBaseInt64Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseUInt32Rules(): UInt32Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const UInt32Rules = { encode(message: UInt32Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).uint32(message.const); } if (message.lt !== undefined) { @@ -4453,6 +5445,11 @@ export const UInt32Rules = { writer.uint32(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.uint32(v); + } + writer.ldelim(); return writer; }, @@ -4531,6 +5528,23 @@ export const UInt32Rules = { continue; } + break; + case 8: + if (tag === 64) { + message.example.push(reader.uint32()); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.uint32()); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -4543,13 +5557,14 @@ export const UInt32Rules = { fromJSON(object: any): UInt32Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4570,6 +5585,11 @@ export const UInt32Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -4579,24 +5599,25 @@ export const UInt32Rules = { fromPartial, I>>(object: I): UInt32Rules { const message = createBaseUInt32Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseUInt64Rules(): UInt64Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const UInt64Rules = { encode(message: UInt64Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).uint64(message.const); } if (message.lt !== undefined) { @@ -4621,6 +5642,11 @@ export const UInt64Rules = { writer.uint64(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.uint64(v); + } + writer.ldelim(); return writer; }, @@ -4699,6 +5725,23 @@ export const UInt64Rules = { continue; } + break; + case 8: + if (tag === 64) { + message.example.push(longToNumber(reader.uint64() as Long)); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(longToNumber(reader.uint64() as Long)); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -4711,13 +5754,14 @@ export const UInt64Rules = { fromJSON(object: any): UInt64Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4738,6 +5782,11 @@ export const UInt64Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -4747,24 +5796,25 @@ export const UInt64Rules = { fromPartial, I>>(object: I): UInt64Rules { const message = createBaseUInt64Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseSInt32Rules(): SInt32Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const SInt32Rules = { encode(message: SInt32Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).sint32(message.const); } if (message.lt !== undefined) { @@ -4789,6 +5839,11 @@ export const SInt32Rules = { writer.sint32(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.sint32(v); + } + writer.ldelim(); return writer; }, @@ -4867,6 +5922,23 @@ export const SInt32Rules = { continue; } + break; + case 8: + if (tag === 64) { + message.example.push(reader.sint32()); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.sint32()); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -4879,13 +5951,14 @@ export const SInt32Rules = { fromJSON(object: any): SInt32Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -4906,6 +5979,11 @@ export const SInt32Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -4915,24 +5993,25 @@ export const SInt32Rules = { fromPartial, I>>(object: I): SInt32Rules { const message = createBaseSInt32Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseSInt64Rules(): SInt64Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const SInt64Rules = { encode(message: SInt64Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).sint64(message.const); } if (message.lt !== undefined) { @@ -4957,6 +6036,11 @@ export const SInt64Rules = { writer.sint64(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.sint64(v); + } + writer.ldelim(); return writer; }, @@ -5035,6 +6119,23 @@ export const SInt64Rules = { continue; } + break; + case 8: + if (tag === 64) { + message.example.push(longToNumber(reader.sint64() as Long)); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(longToNumber(reader.sint64() as Long)); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -5047,13 +6148,14 @@ export const SInt64Rules = { fromJSON(object: any): SInt64Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -5074,6 +6176,11 @@ export const SInt64Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -5083,24 +6190,25 @@ export const SInt64Rules = { fromPartial, I>>(object: I): SInt64Rules { const message = createBaseSInt64Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseFixed32Rules(): Fixed32Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const Fixed32Rules = { encode(message: Fixed32Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(13).fixed32(message.const); } if (message.lt !== undefined) { @@ -5125,6 +6233,11 @@ export const Fixed32Rules = { writer.fixed32(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.fixed32(v); + } + writer.ldelim(); return writer; }, @@ -5203,6 +6316,23 @@ export const Fixed32Rules = { continue; } + break; + case 8: + if (tag === 69) { + message.example.push(reader.fixed32()); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.fixed32()); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -5215,13 +6345,14 @@ export const Fixed32Rules = { fromJSON(object: any): Fixed32Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -5242,6 +6373,11 @@ export const Fixed32Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -5251,24 +6387,25 @@ export const Fixed32Rules = { fromPartial, I>>(object: I): Fixed32Rules { const message = createBaseFixed32Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseFixed64Rules(): Fixed64Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const Fixed64Rules = { encode(message: Fixed64Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(9).fixed64(message.const); } if (message.lt !== undefined) { @@ -5293,6 +6430,11 @@ export const Fixed64Rules = { writer.fixed64(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.fixed64(v); + } + writer.ldelim(); return writer; }, @@ -5371,6 +6513,23 @@ export const Fixed64Rules = { continue; } + break; + case 8: + if (tag === 65) { + message.example.push(longToNumber(reader.fixed64() as Long)); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(longToNumber(reader.fixed64() as Long)); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -5383,13 +6542,14 @@ export const Fixed64Rules = { fromJSON(object: any): Fixed64Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -5410,6 +6570,11 @@ export const Fixed64Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -5419,24 +6584,25 @@ export const Fixed64Rules = { fromPartial, I>>(object: I): Fixed64Rules { const message = createBaseFixed64Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseSFixed32Rules(): SFixed32Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const SFixed32Rules = { encode(message: SFixed32Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(13).sfixed32(message.const); } if (message.lt !== undefined) { @@ -5461,6 +6627,11 @@ export const SFixed32Rules = { writer.sfixed32(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.sfixed32(v); + } + writer.ldelim(); return writer; }, @@ -5539,6 +6710,23 @@ export const SFixed32Rules = { continue; } + break; + case 8: + if (tag === 69) { + message.example.push(reader.sfixed32()); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.sfixed32()); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -5551,13 +6739,14 @@ export const SFixed32Rules = { fromJSON(object: any): SFixed32Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -5578,6 +6767,11 @@ export const SFixed32Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -5587,24 +6781,25 @@ export const SFixed32Rules = { fromPartial, I>>(object: I): SFixed32Rules { const message = createBaseSFixed32Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseSFixed64Rules(): SFixed64Rules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { const: 0, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [], example: [] }; } export const SFixed64Rules = { encode(message: SFixed64Rules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(9).sfixed64(message.const); } if (message.lt !== undefined) { @@ -5629,6 +6824,11 @@ export const SFixed64Rules = { writer.sfixed64(v); } writer.ldelim(); + writer.uint32(66).fork(); + for (const v of message.example) { + writer.sfixed64(v); + } + writer.ldelim(); return writer; }, @@ -5707,6 +6907,23 @@ export const SFixed64Rules = { continue; } + break; + case 8: + if (tag === 65) { + message.example.push(longToNumber(reader.sfixed64() as Long)); + + continue; + } + + if (tag === 66) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(longToNumber(reader.sfixed64() as Long)); + } + + continue; + } + break; } if ((tag & 7) === 4 || tag === 0) { @@ -5719,13 +6936,14 @@ export const SFixed64Rules = { fromJSON(object: any): SFixed64Rules { return { - const: isSet(object.const) ? Number(object.const) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, lt: isSet(object.lt) ? Number(object.lt) : undefined, lte: isSet(object.lte) ? Number(object.lte) : undefined, gt: isSet(object.gt) ? Number(object.gt) : undefined, gte: isSet(object.gte) ? Number(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -5746,6 +6964,11 @@ export const SFixed64Rules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -5755,26 +6978,32 @@ export const SFixed64Rules = { fromPartial, I>>(object: I): SFixed64Rules { const message = createBaseSFixed64Rules(); - message.const = object.const ?? undefined; + message.const = object.const ?? 0; message.lt = object.lt ?? undefined; message.lte = object.lte ?? undefined; message.gt = object.gt ?? undefined; message.gte = object.gte ?? undefined; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseBoolRules(): BoolRules { - return { const: undefined }; + return { const: false, example: [] }; } export const BoolRules = { encode(message: BoolRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const === true) { writer.uint32(8).bool(message.const); } + writer.uint32(18).fork(); + for (const v of message.example) { + writer.bool(v); + } + writer.ldelim(); return writer; }, @@ -5792,6 +7021,23 @@ export const BoolRules = { message.const = reader.bool(); continue; + case 2: + if (tag === 16) { + message.example.push(reader.bool()); + + continue; + } + + if (tag === 18) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.bool()); + } + + continue; + } + + break; } if ((tag & 7) === 4 || tag === 0) { break; @@ -5802,12 +7048,20 @@ export const BoolRules = { }, fromJSON(object: any): BoolRules { - return { const: isSet(object.const) ? Boolean(object.const) : undefined }; + return { + const: isSet(object.const) ? Boolean(object.const) : false, + example: Array.isArray(object?.example) ? object.example.map((e: any) => Boolean(e)) : [], + }; }, toJSON(message: BoolRules): unknown { const obj: any = {}; message.const !== undefined && (obj.const = message.const); + if (message.example) { + obj.example = message.example.map((e) => e); + } else { + obj.example = []; + } return obj; }, @@ -5817,25 +7071,26 @@ export const BoolRules = { fromPartial, I>>(object: I): BoolRules { const message = createBaseBoolRules(); - message.const = object.const ?? undefined; + message.const = object.const ?? false; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseStringRules(): StringRules { return { - const: undefined, - len: undefined, - minLen: undefined, - maxLen: undefined, - lenBytes: undefined, - minBytes: undefined, - maxBytes: undefined, - pattern: undefined, - prefix: undefined, - suffix: undefined, - contains: undefined, - notContains: undefined, + const: "", + len: 0, + minLen: 0, + maxLen: 0, + lenBytes: 0, + minBytes: 0, + maxBytes: 0, + pattern: "", + prefix: "", + suffix: "", + contains: "", + notContains: "", in: [], notIn: [], email: undefined, @@ -5855,47 +7110,49 @@ function createBaseStringRules(): StringRules { ipv4Prefix: undefined, ipv6Prefix: undefined, hostAndPort: undefined, + ulid: undefined, wellKnownRegex: undefined, - strict: undefined, + strict: false, + example: [], }; } export const StringRules = { encode(message: StringRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== "") { writer.uint32(10).string(message.const); } - if (message.len !== undefined) { + if (message.len !== 0) { writer.uint32(152).uint64(message.len); } - if (message.minLen !== undefined) { + if (message.minLen !== 0) { writer.uint32(16).uint64(message.minLen); } - if (message.maxLen !== undefined) { + if (message.maxLen !== 0) { writer.uint32(24).uint64(message.maxLen); } - if (message.lenBytes !== undefined) { + if (message.lenBytes !== 0) { writer.uint32(160).uint64(message.lenBytes); } - if (message.minBytes !== undefined) { + if (message.minBytes !== 0) { writer.uint32(32).uint64(message.minBytes); } - if (message.maxBytes !== undefined) { + if (message.maxBytes !== 0) { writer.uint32(40).uint64(message.maxBytes); } - if (message.pattern !== undefined) { + if (message.pattern !== "") { writer.uint32(50).string(message.pattern); } - if (message.prefix !== undefined) { + if (message.prefix !== "") { writer.uint32(58).string(message.prefix); } - if (message.suffix !== undefined) { + if (message.suffix !== "") { writer.uint32(66).string(message.suffix); } - if (message.contains !== undefined) { + if (message.contains !== "") { writer.uint32(74).string(message.contains); } - if (message.notContains !== undefined) { + if (message.notContains !== "") { writer.uint32(186).string(message.notContains); } for (const v of message.in) { @@ -5955,12 +7212,18 @@ export const StringRules = { if (message.hostAndPort !== undefined) { writer.uint32(256).bool(message.hostAndPort); } + if (message.ulid !== undefined) { + writer.uint32(280).bool(message.ulid); + } if (message.wellKnownRegex !== undefined) { writer.uint32(192).int32(message.wellKnownRegex); } - if (message.strict !== undefined) { + if (message.strict === true) { writer.uint32(200).bool(message.strict); } + for (const v of message.example) { + writer.uint32(274).string(v!); + } return writer; }, @@ -6188,6 +7451,13 @@ export const StringRules = { message.hostAndPort = reader.bool(); continue; + case 35: + if (tag !== 280) { + break; + } + + message.ulid = reader.bool(); + continue; case 24: if (tag !== 192) { break; @@ -6202,6 +7472,13 @@ export const StringRules = { message.strict = reader.bool(); continue; + case 34: + if (tag !== 274) { + break; + } + + message.example.push(reader.string()); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -6213,18 +7490,18 @@ export const StringRules = { fromJSON(object: any): StringRules { return { - const: isSet(object.const) ? String(object.const) : undefined, - len: isSet(object.len) ? Number(object.len) : undefined, - minLen: isSet(object.minLen) ? Number(object.minLen) : undefined, - maxLen: isSet(object.maxLen) ? Number(object.maxLen) : undefined, - lenBytes: isSet(object.lenBytes) ? Number(object.lenBytes) : undefined, - minBytes: isSet(object.minBytes) ? Number(object.minBytes) : undefined, - maxBytes: isSet(object.maxBytes) ? Number(object.maxBytes) : undefined, - pattern: isSet(object.pattern) ? String(object.pattern) : undefined, - prefix: isSet(object.prefix) ? String(object.prefix) : undefined, - suffix: isSet(object.suffix) ? String(object.suffix) : undefined, - contains: isSet(object.contains) ? String(object.contains) : undefined, - notContains: isSet(object.notContains) ? String(object.notContains) : undefined, + const: isSet(object.const) ? String(object.const) : "", + len: isSet(object.len) ? Number(object.len) : 0, + minLen: isSet(object.minLen) ? Number(object.minLen) : 0, + maxLen: isSet(object.maxLen) ? Number(object.maxLen) : 0, + lenBytes: isSet(object.lenBytes) ? Number(object.lenBytes) : 0, + minBytes: isSet(object.minBytes) ? Number(object.minBytes) : 0, + maxBytes: isSet(object.maxBytes) ? Number(object.maxBytes) : 0, + pattern: isSet(object.pattern) ? String(object.pattern) : "", + prefix: isSet(object.prefix) ? String(object.prefix) : "", + suffix: isSet(object.suffix) ? String(object.suffix) : "", + contains: isSet(object.contains) ? String(object.contains) : "", + notContains: isSet(object.notContains) ? String(object.notContains) : "", in: Array.isArray(object?.in) ? object.in.map((e: any) => String(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => String(e)) : [], email: isSet(object.email) ? Boolean(object.email) : undefined, @@ -6244,8 +7521,10 @@ export const StringRules = { ipv4Prefix: isSet(object.ipv4Prefix) ? Boolean(object.ipv4Prefix) : undefined, ipv6Prefix: isSet(object.ipv6Prefix) ? Boolean(object.ipv6Prefix) : undefined, hostAndPort: isSet(object.hostAndPort) ? Boolean(object.hostAndPort) : undefined, + ulid: isSet(object.ulid) ? Boolean(object.ulid) : undefined, wellKnownRegex: isSet(object.wellKnownRegex) ? knownRegexFromJSON(object.wellKnownRegex) : undefined, - strict: isSet(object.strict) ? Boolean(object.strict) : undefined, + strict: isSet(object.strict) ? Boolean(object.strict) : false, + example: Array.isArray(object?.example) ? object.example.map((e: any) => String(e)) : [], }; }, @@ -6290,11 +7569,17 @@ export const StringRules = { message.ipv4Prefix !== undefined && (obj.ipv4Prefix = message.ipv4Prefix); message.ipv6Prefix !== undefined && (obj.ipv6Prefix = message.ipv6Prefix); message.hostAndPort !== undefined && (obj.hostAndPort = message.hostAndPort); + message.ulid !== undefined && (obj.ulid = message.ulid); message.wellKnownRegex !== undefined && (obj.wellKnownRegex = message.wellKnownRegex !== undefined ? knownRegexToJSON(message.wellKnownRegex) : undefined); message.strict !== undefined && (obj.strict = message.strict); + if (message.example) { + obj.example = message.example.map((e) => e); + } else { + obj.example = []; + } return obj; }, @@ -6304,18 +7589,18 @@ export const StringRules = { fromPartial, I>>(object: I): StringRules { const message = createBaseStringRules(); - message.const = object.const ?? undefined; - message.len = object.len ?? undefined; - message.minLen = object.minLen ?? undefined; - message.maxLen = object.maxLen ?? undefined; - message.lenBytes = object.lenBytes ?? undefined; - message.minBytes = object.minBytes ?? undefined; - message.maxBytes = object.maxBytes ?? undefined; - message.pattern = object.pattern ?? undefined; - message.prefix = object.prefix ?? undefined; - message.suffix = object.suffix ?? undefined; - message.contains = object.contains ?? undefined; - message.notContains = object.notContains ?? undefined; + message.const = object.const ?? ""; + message.len = object.len ?? 0; + message.minLen = object.minLen ?? 0; + message.maxLen = object.maxLen ?? 0; + message.lenBytes = object.lenBytes ?? 0; + message.minBytes = object.minBytes ?? 0; + message.maxBytes = object.maxBytes ?? 0; + message.pattern = object.pattern ?? ""; + message.prefix = object.prefix ?? ""; + message.suffix = object.suffix ?? ""; + message.contains = object.contains ?? ""; + message.notContains = object.notContains ?? ""; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; message.email = object.email ?? undefined; @@ -6335,54 +7620,58 @@ export const StringRules = { message.ipv4Prefix = object.ipv4Prefix ?? undefined; message.ipv6Prefix = object.ipv6Prefix ?? undefined; message.hostAndPort = object.hostAndPort ?? undefined; + message.ulid = object.ulid ?? undefined; message.wellKnownRegex = object.wellKnownRegex ?? undefined; - message.strict = object.strict ?? undefined; + message.strict = object.strict ?? false; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseBytesRules(): BytesRules { return { - const: undefined, - len: undefined, - minLen: undefined, - maxLen: undefined, - pattern: undefined, - prefix: undefined, - suffix: undefined, - contains: undefined, + const: new Uint8Array(0), + len: 0, + minLen: 0, + maxLen: 0, + pattern: "", + prefix: new Uint8Array(0), + suffix: new Uint8Array(0), + contains: new Uint8Array(0), in: [], notIn: [], ip: undefined, ipv4: undefined, ipv6: undefined, + uuid: undefined, + example: [], }; } export const BytesRules = { encode(message: BytesRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const.length !== 0) { writer.uint32(10).bytes(message.const); } - if (message.len !== undefined) { + if (message.len !== 0) { writer.uint32(104).uint64(message.len); } - if (message.minLen !== undefined) { + if (message.minLen !== 0) { writer.uint32(16).uint64(message.minLen); } - if (message.maxLen !== undefined) { + if (message.maxLen !== 0) { writer.uint32(24).uint64(message.maxLen); } - if (message.pattern !== undefined) { + if (message.pattern !== "") { writer.uint32(34).string(message.pattern); } - if (message.prefix !== undefined) { + if (message.prefix.length !== 0) { writer.uint32(42).bytes(message.prefix); } - if (message.suffix !== undefined) { + if (message.suffix.length !== 0) { writer.uint32(50).bytes(message.suffix); } - if (message.contains !== undefined) { + if (message.contains.length !== 0) { writer.uint32(58).bytes(message.contains); } for (const v of message.in) { @@ -6400,6 +7689,12 @@ export const BytesRules = { if (message.ipv6 !== undefined) { writer.uint32(96).bool(message.ipv6); } + if (message.uuid !== undefined) { + writer.uint32(120).bool(message.uuid); + } + for (const v of message.example) { + writer.uint32(114).bytes(v!); + } return writer; }, @@ -6501,6 +7796,20 @@ export const BytesRules = { message.ipv6 = reader.bool(); continue; + case 15: + if (tag !== 120) { + break; + } + + message.uuid = reader.bool(); + continue; + case 14: + if (tag !== 114) { + break; + } + + message.example.push(reader.bytes()); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -6512,36 +7821,38 @@ export const BytesRules = { fromJSON(object: any): BytesRules { return { - const: isSet(object.const) ? bytesFromBase64(object.const) : undefined, - len: isSet(object.len) ? Number(object.len) : undefined, - minLen: isSet(object.minLen) ? Number(object.minLen) : undefined, - maxLen: isSet(object.maxLen) ? Number(object.maxLen) : undefined, - pattern: isSet(object.pattern) ? String(object.pattern) : undefined, - prefix: isSet(object.prefix) ? bytesFromBase64(object.prefix) : undefined, - suffix: isSet(object.suffix) ? bytesFromBase64(object.suffix) : undefined, - contains: isSet(object.contains) ? bytesFromBase64(object.contains) : undefined, + const: isSet(object.const) ? bytesFromBase64(object.const) : new Uint8Array(0), + len: isSet(object.len) ? Number(object.len) : 0, + minLen: isSet(object.minLen) ? Number(object.minLen) : 0, + maxLen: isSet(object.maxLen) ? Number(object.maxLen) : 0, + pattern: isSet(object.pattern) ? String(object.pattern) : "", + prefix: isSet(object.prefix) ? bytesFromBase64(object.prefix) : new Uint8Array(0), + suffix: isSet(object.suffix) ? bytesFromBase64(object.suffix) : new Uint8Array(0), + contains: isSet(object.contains) ? bytesFromBase64(object.contains) : new Uint8Array(0), in: Array.isArray(object?.in) ? object.in.map((e: any) => bytesFromBase64(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => bytesFromBase64(e)) : [], ip: isSet(object.ip) ? Boolean(object.ip) : undefined, ipv4: isSet(object.ipv4) ? Boolean(object.ipv4) : undefined, ipv6: isSet(object.ipv6) ? Boolean(object.ipv6) : undefined, + uuid: isSet(object.uuid) ? Boolean(object.uuid) : undefined, + example: Array.isArray(object?.example) ? object.example.map((e: any) => bytesFromBase64(e)) : [], }; }, toJSON(message: BytesRules): unknown { const obj: any = {}; message.const !== undefined && - (obj.const = message.const !== undefined ? base64FromBytes(message.const) : undefined); + (obj.const = base64FromBytes(message.const !== undefined ? message.const : new Uint8Array(0))); message.len !== undefined && (obj.len = Math.round(message.len)); message.minLen !== undefined && (obj.minLen = Math.round(message.minLen)); message.maxLen !== undefined && (obj.maxLen = Math.round(message.maxLen)); message.pattern !== undefined && (obj.pattern = message.pattern); message.prefix !== undefined && - (obj.prefix = message.prefix !== undefined ? base64FromBytes(message.prefix) : undefined); + (obj.prefix = base64FromBytes(message.prefix !== undefined ? message.prefix : new Uint8Array(0))); message.suffix !== undefined && - (obj.suffix = message.suffix !== undefined ? base64FromBytes(message.suffix) : undefined); + (obj.suffix = base64FromBytes(message.suffix !== undefined ? message.suffix : new Uint8Array(0))); message.contains !== undefined && - (obj.contains = message.contains !== undefined ? base64FromBytes(message.contains) : undefined); + (obj.contains = base64FromBytes(message.contains !== undefined ? message.contains : new Uint8Array(0))); if (message.in) { obj.in = message.in.map((e) => base64FromBytes(e !== undefined ? e : new Uint8Array(0))); } else { @@ -6555,6 +7866,12 @@ export const BytesRules = { message.ip !== undefined && (obj.ip = message.ip); message.ipv4 !== undefined && (obj.ipv4 = message.ipv4); message.ipv6 !== undefined && (obj.ipv6 = message.ipv6); + message.uuid !== undefined && (obj.uuid = message.uuid); + if (message.example) { + obj.example = message.example.map((e) => base64FromBytes(e !== undefined ? e : new Uint8Array(0))); + } else { + obj.example = []; + } return obj; }, @@ -6564,33 +7881,35 @@ export const BytesRules = { fromPartial, I>>(object: I): BytesRules { const message = createBaseBytesRules(); - message.const = object.const ?? undefined; - message.len = object.len ?? undefined; - message.minLen = object.minLen ?? undefined; - message.maxLen = object.maxLen ?? undefined; - message.pattern = object.pattern ?? undefined; - message.prefix = object.prefix ?? undefined; - message.suffix = object.suffix ?? undefined; - message.contains = object.contains ?? undefined; + message.const = object.const ?? new Uint8Array(0); + message.len = object.len ?? 0; + message.minLen = object.minLen ?? 0; + message.maxLen = object.maxLen ?? 0; + message.pattern = object.pattern ?? ""; + message.prefix = object.prefix ?? new Uint8Array(0); + message.suffix = object.suffix ?? new Uint8Array(0); + message.contains = object.contains ?? new Uint8Array(0); message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; message.ip = object.ip ?? undefined; message.ipv4 = object.ipv4 ?? undefined; message.ipv6 = object.ipv6 ?? undefined; + message.uuid = object.uuid ?? undefined; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseEnumRules(): EnumRules { - return { const: undefined, definedOnly: undefined, in: [], notIn: [] }; + return { const: 0, definedOnly: false, in: [], notIn: [], example: [] }; } export const EnumRules = { encode(message: EnumRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.const !== undefined) { + if (message.const !== 0) { writer.uint32(8).int32(message.const); } - if (message.definedOnly !== undefined) { + if (message.definedOnly === true) { writer.uint32(16).bool(message.definedOnly); } writer.uint32(26).fork(); @@ -6603,6 +7922,11 @@ export const EnumRules = { writer.int32(v); } writer.ldelim(); + writer.uint32(42).fork(); + for (const v of message.example) { + writer.int32(v); + } + writer.ldelim(); return writer; }, @@ -6661,21 +7985,39 @@ export const EnumRules = { } break; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, + case 5: + if (tag === 40) { + message.example.push(reader.int32()); - fromJSON(object: any): EnumRules { + continue; + } + + if (tag === 42) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.example.push(reader.int32()); + } + + continue; + } + + break; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): EnumRules { return { - const: isSet(object.const) ? Number(object.const) : undefined, - definedOnly: isSet(object.definedOnly) ? Boolean(object.definedOnly) : undefined, + const: isSet(object.const) ? Number(object.const) : 0, + definedOnly: isSet(object.definedOnly) ? Boolean(object.definedOnly) : false, in: Array.isArray(object?.in) ? object.in.map((e: any) => Number(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Number(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Number(e)) : [], }; }, @@ -6693,6 +8035,11 @@ export const EnumRules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => Math.round(e)); + } else { + obj.example = []; + } return obj; }, @@ -6702,31 +8049,32 @@ export const EnumRules = { fromPartial, I>>(object: I): EnumRules { const message = createBaseEnumRules(); - message.const = object.const ?? undefined; - message.definedOnly = object.definedOnly ?? undefined; + message.const = object.const ?? 0; + message.definedOnly = object.definedOnly ?? false; message.in = object.in?.map((e) => e) || []; message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; function createBaseRepeatedRules(): RepeatedRules { - return { minItems: undefined, maxItems: undefined, unique: undefined, items: undefined }; + return { minItems: 0, maxItems: 0, unique: false, items: undefined }; } export const RepeatedRules = { encode(message: RepeatedRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.minItems !== undefined) { + if (message.minItems !== 0) { writer.uint32(8).uint64(message.minItems); } - if (message.maxItems !== undefined) { + if (message.maxItems !== 0) { writer.uint32(16).uint64(message.maxItems); } - if (message.unique !== undefined) { + if (message.unique === true) { writer.uint32(24).bool(message.unique); } if (message.items !== undefined) { - FieldConstraints.encode(message.items, writer.uint32(34).fork()).ldelim(); + FieldRules.encode(message.items, writer.uint32(34).fork()).ldelim(); } return writer; }, @@ -6764,7 +8112,7 @@ export const RepeatedRules = { break; } - message.items = FieldConstraints.decode(reader, reader.uint32()); + message.items = FieldRules.decode(reader, reader.uint32()); continue; } if ((tag & 7) === 4 || tag === 0) { @@ -6777,10 +8125,10 @@ export const RepeatedRules = { fromJSON(object: any): RepeatedRules { return { - minItems: isSet(object.minItems) ? Number(object.minItems) : undefined, - maxItems: isSet(object.maxItems) ? Number(object.maxItems) : undefined, - unique: isSet(object.unique) ? Boolean(object.unique) : undefined, - items: isSet(object.items) ? FieldConstraints.fromJSON(object.items) : undefined, + minItems: isSet(object.minItems) ? Number(object.minItems) : 0, + maxItems: isSet(object.maxItems) ? Number(object.maxItems) : 0, + unique: isSet(object.unique) ? Boolean(object.unique) : false, + items: isSet(object.items) ? FieldRules.fromJSON(object.items) : undefined, }; }, @@ -6789,7 +8137,7 @@ export const RepeatedRules = { message.minItems !== undefined && (obj.minItems = Math.round(message.minItems)); message.maxItems !== undefined && (obj.maxItems = Math.round(message.maxItems)); message.unique !== undefined && (obj.unique = message.unique); - message.items !== undefined && (obj.items = message.items ? FieldConstraints.toJSON(message.items) : undefined); + message.items !== undefined && (obj.items = message.items ? FieldRules.toJSON(message.items) : undefined); return obj; }, @@ -6799,33 +8147,33 @@ export const RepeatedRules = { fromPartial, I>>(object: I): RepeatedRules { const message = createBaseRepeatedRules(); - message.minItems = object.minItems ?? undefined; - message.maxItems = object.maxItems ?? undefined; - message.unique = object.unique ?? undefined; + message.minItems = object.minItems ?? 0; + message.maxItems = object.maxItems ?? 0; + message.unique = object.unique ?? false; message.items = (object.items !== undefined && object.items !== null) - ? FieldConstraints.fromPartial(object.items) + ? FieldRules.fromPartial(object.items) : undefined; return message; }, }; function createBaseMapRules(): MapRules { - return { minPairs: undefined, maxPairs: undefined, keys: undefined, values: undefined }; + return { minPairs: 0, maxPairs: 0, keys: undefined, values: undefined }; } export const MapRules = { encode(message: MapRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.minPairs !== undefined) { + if (message.minPairs !== 0) { writer.uint32(8).uint64(message.minPairs); } - if (message.maxPairs !== undefined) { + if (message.maxPairs !== 0) { writer.uint32(16).uint64(message.maxPairs); } if (message.keys !== undefined) { - FieldConstraints.encode(message.keys, writer.uint32(34).fork()).ldelim(); + FieldRules.encode(message.keys, writer.uint32(34).fork()).ldelim(); } if (message.values !== undefined) { - FieldConstraints.encode(message.values, writer.uint32(42).fork()).ldelim(); + FieldRules.encode(message.values, writer.uint32(42).fork()).ldelim(); } return writer; }, @@ -6856,14 +8204,14 @@ export const MapRules = { break; } - message.keys = FieldConstraints.decode(reader, reader.uint32()); + message.keys = FieldRules.decode(reader, reader.uint32()); continue; case 5: if (tag !== 42) { break; } - message.values = FieldConstraints.decode(reader, reader.uint32()); + message.values = FieldRules.decode(reader, reader.uint32()); continue; } if ((tag & 7) === 4 || tag === 0) { @@ -6876,10 +8224,10 @@ export const MapRules = { fromJSON(object: any): MapRules { return { - minPairs: isSet(object.minPairs) ? Number(object.minPairs) : undefined, - maxPairs: isSet(object.maxPairs) ? Number(object.maxPairs) : undefined, - keys: isSet(object.keys) ? FieldConstraints.fromJSON(object.keys) : undefined, - values: isSet(object.values) ? FieldConstraints.fromJSON(object.values) : undefined, + minPairs: isSet(object.minPairs) ? Number(object.minPairs) : 0, + maxPairs: isSet(object.maxPairs) ? Number(object.maxPairs) : 0, + keys: isSet(object.keys) ? FieldRules.fromJSON(object.keys) : undefined, + values: isSet(object.values) ? FieldRules.fromJSON(object.values) : undefined, }; }, @@ -6887,8 +8235,8 @@ export const MapRules = { const obj: any = {}; message.minPairs !== undefined && (obj.minPairs = Math.round(message.minPairs)); message.maxPairs !== undefined && (obj.maxPairs = Math.round(message.maxPairs)); - message.keys !== undefined && (obj.keys = message.keys ? FieldConstraints.toJSON(message.keys) : undefined); - message.values !== undefined && (obj.values = message.values ? FieldConstraints.toJSON(message.values) : undefined); + message.keys !== undefined && (obj.keys = message.keys ? FieldRules.toJSON(message.keys) : undefined); + message.values !== undefined && (obj.values = message.values ? FieldRules.toJSON(message.values) : undefined); return obj; }, @@ -6898,13 +8246,13 @@ export const MapRules = { fromPartial, I>>(object: I): MapRules { const message = createBaseMapRules(); - message.minPairs = object.minPairs ?? undefined; - message.maxPairs = object.maxPairs ?? undefined; + message.minPairs = object.minPairs ?? 0; + message.maxPairs = object.maxPairs ?? 0; message.keys = (object.keys !== undefined && object.keys !== null) - ? FieldConstraints.fromPartial(object.keys) + ? FieldRules.fromPartial(object.keys) : undefined; message.values = (object.values !== undefined && object.values !== null) - ? FieldConstraints.fromPartial(object.values) + ? FieldRules.fromPartial(object.values) : undefined; return message; }, @@ -6990,7 +8338,16 @@ export const AnyRules = { }; function createBaseDurationRules(): DurationRules { - return { const: undefined, lt: undefined, lte: undefined, gt: undefined, gte: undefined, in: [], notIn: [] }; + return { + const: undefined, + lt: undefined, + lte: undefined, + gt: undefined, + gte: undefined, + in: [], + notIn: [], + example: [], + }; } export const DurationRules = { @@ -7016,6 +8373,9 @@ export const DurationRules = { for (const v of message.notIn) { Duration.encode(v!, writer.uint32(66).fork()).ldelim(); } + for (const v of message.example) { + Duration.encode(v!, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -7075,6 +8435,13 @@ export const DurationRules = { message.notIn.push(Duration.decode(reader, reader.uint32())); continue; + case 9: + if (tag !== 74) { + break; + } + + message.example.push(Duration.decode(reader, reader.uint32())); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -7093,6 +8460,7 @@ export const DurationRules = { gte: isSet(object.gte) ? Duration.fromJSON(object.gte) : undefined, in: Array.isArray(object?.in) ? object.in.map((e: any) => Duration.fromJSON(e)) : [], notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => Duration.fromJSON(e)) : [], + example: Array.isArray(object?.example) ? object.example.map((e: any) => Duration.fromJSON(e)) : [], }; }, @@ -7113,6 +8481,11 @@ export const DurationRules = { } else { obj.notIn = []; } + if (message.example) { + obj.example = message.example.map((e) => e ? Duration.toJSON(e) : undefined); + } else { + obj.example = []; + } return obj; }, @@ -7131,6 +8504,118 @@ export const DurationRules = { message.gte = (object.gte !== undefined && object.gte !== null) ? Duration.fromPartial(object.gte) : undefined; message.in = object.in?.map((e) => Duration.fromPartial(e)) || []; message.notIn = object.notIn?.map((e) => Duration.fromPartial(e)) || []; + message.example = object.example?.map((e) => Duration.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseFieldMaskRules(): FieldMaskRules { + return { const: undefined, in: [], notIn: [], example: [] }; +} + +export const FieldMaskRules = { + encode(message: FieldMaskRules, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.const !== undefined) { + FieldMask.encode(FieldMask.wrap(message.const), writer.uint32(10).fork()).ldelim(); + } + for (const v of message.in) { + writer.uint32(18).string(v!); + } + for (const v of message.notIn) { + writer.uint32(26).string(v!); + } + for (const v of message.example) { + FieldMask.encode(FieldMask.wrap(v!), writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FieldMaskRules { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFieldMaskRules(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.const = FieldMask.unwrap(FieldMask.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.in.push(reader.string()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.notIn.push(reader.string()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.example.push(FieldMask.unwrap(FieldMask.decode(reader, reader.uint32()))); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): FieldMaskRules { + return { + const: isSet(object.const) ? FieldMask.unwrap(FieldMask.fromJSON(object.const)) : undefined, + in: Array.isArray(object?.in) ? object.in.map((e: any) => String(e)) : [], + notIn: Array.isArray(object?.notIn) ? object.notIn.map((e: any) => String(e)) : [], + example: Array.isArray(object?.example) + ? object.example.map((e: any) => FieldMask.unwrap(FieldMask.fromJSON(e))) + : [], + }; + }, + + toJSON(message: FieldMaskRules): unknown { + const obj: any = {}; + message.const !== undefined && (obj.const = FieldMask.toJSON(FieldMask.wrap(message.const))); + if (message.in) { + obj.in = message.in.map((e) => e); + } else { + obj.in = []; + } + if (message.notIn) { + obj.notIn = message.notIn.map((e) => e); + } else { + obj.notIn = []; + } + if (message.example) { + obj.example = message.example.map((e) => FieldMask.toJSON(FieldMask.wrap(e))); + } else { + obj.example = []; + } + return obj; + }, + + create, I>>(base?: I): FieldMaskRules { + return FieldMaskRules.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): FieldMaskRules { + const message = createBaseFieldMaskRules(); + message.const = object.const ?? undefined; + message.in = object.in?.map((e) => e) || []; + message.notIn = object.notIn?.map((e) => e) || []; + message.example = object.example?.map((e) => e) || []; return message; }, }; @@ -7145,6 +8630,7 @@ function createBaseTimestampRules(): TimestampRules { gte: undefined, gtNow: undefined, within: undefined, + example: [], }; } @@ -7174,6 +8660,9 @@ export const TimestampRules = { if (message.within !== undefined) { Duration.encode(message.within, writer.uint32(74).fork()).ldelim(); } + for (const v of message.example) { + Timestamp.encode(toTimestamp(v!), writer.uint32(82).fork()).ldelim(); + } return writer; }, @@ -7240,6 +8729,13 @@ export const TimestampRules = { message.within = Duration.decode(reader, reader.uint32()); continue; + case 10: + if (tag !== 82) { + break; + } + + message.example.push(fromTimestamp(Timestamp.decode(reader, reader.uint32()))); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -7259,6 +8755,7 @@ export const TimestampRules = { gte: isSet(object.gte) ? fromJsonTimestamp(object.gte) : undefined, gtNow: isSet(object.gtNow) ? Boolean(object.gtNow) : undefined, within: isSet(object.within) ? Duration.fromJSON(object.within) : undefined, + example: Array.isArray(object?.example) ? object.example.map((e: any) => fromJsonTimestamp(e)) : [], }; }, @@ -7272,6 +8769,11 @@ export const TimestampRules = { message.gte !== undefined && (obj.gte = message.gte.toISOString()); message.gtNow !== undefined && (obj.gtNow = message.gtNow); message.within !== undefined && (obj.within = message.within ? Duration.toJSON(message.within) : undefined); + if (message.example) { + obj.example = message.example.map((e) => e.toISOString()); + } else { + obj.example = []; + } return obj; }, @@ -7291,6 +8793,429 @@ export const TimestampRules = { message.within = (object.within !== undefined && object.within !== null) ? Duration.fromPartial(object.within) : undefined; + message.example = object.example?.map((e) => e) || []; + return message; + }, +}; + +function createBaseViolations(): Violations { + return { violations: [] }; +} + +export const Violations = { + encode(message: Violations, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.violations) { + Violation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Violations { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseViolations(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.violations.push(Violation.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Violations { + return { + violations: Array.isArray(object?.violations) ? object.violations.map((e: any) => Violation.fromJSON(e)) : [], + }; + }, + + toJSON(message: Violations): unknown { + const obj: any = {}; + if (message.violations) { + obj.violations = message.violations.map((e) => e ? Violation.toJSON(e) : undefined); + } else { + obj.violations = []; + } + return obj; + }, + + create, I>>(base?: I): Violations { + return Violations.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): Violations { + const message = createBaseViolations(); + message.violations = object.violations?.map((e) => Violation.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseViolation(): Violation { + return { field: undefined, rule: undefined, ruleId: "", message: "", forKey: false }; +} + +export const Violation = { + encode(message: Violation, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.field !== undefined) { + FieldPath.encode(message.field, writer.uint32(42).fork()).ldelim(); + } + if (message.rule !== undefined) { + FieldPath.encode(message.rule, writer.uint32(50).fork()).ldelim(); + } + if (message.ruleId !== "") { + writer.uint32(18).string(message.ruleId); + } + if (message.message !== "") { + writer.uint32(26).string(message.message); + } + if (message.forKey === true) { + writer.uint32(32).bool(message.forKey); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Violation { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseViolation(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + if (tag !== 42) { + break; + } + + message.field = FieldPath.decode(reader, reader.uint32()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.rule = FieldPath.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.ruleId = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.message = reader.string(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.forKey = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Violation { + return { + field: isSet(object.field) ? FieldPath.fromJSON(object.field) : undefined, + rule: isSet(object.rule) ? FieldPath.fromJSON(object.rule) : undefined, + ruleId: isSet(object.ruleId) ? String(object.ruleId) : "", + message: isSet(object.message) ? String(object.message) : "", + forKey: isSet(object.forKey) ? Boolean(object.forKey) : false, + }; + }, + + toJSON(message: Violation): unknown { + const obj: any = {}; + message.field !== undefined && (obj.field = message.field ? FieldPath.toJSON(message.field) : undefined); + message.rule !== undefined && (obj.rule = message.rule ? FieldPath.toJSON(message.rule) : undefined); + message.ruleId !== undefined && (obj.ruleId = message.ruleId); + message.message !== undefined && (obj.message = message.message); + message.forKey !== undefined && (obj.forKey = message.forKey); + return obj; + }, + + create, I>>(base?: I): Violation { + return Violation.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): Violation { + const message = createBaseViolation(); + message.field = (object.field !== undefined && object.field !== null) + ? FieldPath.fromPartial(object.field) + : undefined; + message.rule = (object.rule !== undefined && object.rule !== null) ? FieldPath.fromPartial(object.rule) : undefined; + message.ruleId = object.ruleId ?? ""; + message.message = object.message ?? ""; + message.forKey = object.forKey ?? false; + return message; + }, +}; + +function createBaseFieldPath(): FieldPath { + return { elements: [] }; +} + +export const FieldPath = { + encode(message: FieldPath, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.elements) { + FieldPathElement.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FieldPath { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFieldPath(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.elements.push(FieldPathElement.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): FieldPath { + return { + elements: Array.isArray(object?.elements) ? object.elements.map((e: any) => FieldPathElement.fromJSON(e)) : [], + }; + }, + + toJSON(message: FieldPath): unknown { + const obj: any = {}; + if (message.elements) { + obj.elements = message.elements.map((e) => e ? FieldPathElement.toJSON(e) : undefined); + } else { + obj.elements = []; + } + return obj; + }, + + create, I>>(base?: I): FieldPath { + return FieldPath.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): FieldPath { + const message = createBaseFieldPath(); + message.elements = object.elements?.map((e) => FieldPathElement.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseFieldPathElement(): FieldPathElement { + return { + fieldNumber: 0, + fieldName: "", + fieldType: 1, + keyType: 1, + valueType: 1, + index: undefined, + boolKey: undefined, + intKey: undefined, + uintKey: undefined, + stringKey: undefined, + }; +} + +export const FieldPathElement = { + encode(message: FieldPathElement, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.fieldNumber !== 0) { + writer.uint32(8).int32(message.fieldNumber); + } + if (message.fieldName !== "") { + writer.uint32(18).string(message.fieldName); + } + if (message.fieldType !== 1) { + writer.uint32(24).int32(message.fieldType); + } + if (message.keyType !== 1) { + writer.uint32(32).int32(message.keyType); + } + if (message.valueType !== 1) { + writer.uint32(40).int32(message.valueType); + } + if (message.index !== undefined) { + writer.uint32(48).uint64(message.index); + } + if (message.boolKey !== undefined) { + writer.uint32(56).bool(message.boolKey); + } + if (message.intKey !== undefined) { + writer.uint32(64).int64(message.intKey); + } + if (message.uintKey !== undefined) { + writer.uint32(72).uint64(message.uintKey); + } + if (message.stringKey !== undefined) { + writer.uint32(82).string(message.stringKey); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FieldPathElement { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFieldPathElement(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.fieldNumber = reader.int32(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.fieldName = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.fieldType = reader.int32() as any; + continue; + case 4: + if (tag !== 32) { + break; + } + + message.keyType = reader.int32() as any; + continue; + case 5: + if (tag !== 40) { + break; + } + + message.valueType = reader.int32() as any; + continue; + case 6: + if (tag !== 48) { + break; + } + + message.index = longToNumber(reader.uint64() as Long); + continue; + case 7: + if (tag !== 56) { + break; + } + + message.boolKey = reader.bool(); + continue; + case 8: + if (tag !== 64) { + break; + } + + message.intKey = longToNumber(reader.int64() as Long); + continue; + case 9: + if (tag !== 72) { + break; + } + + message.uintKey = longToNumber(reader.uint64() as Long); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.stringKey = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): FieldPathElement { + return { + fieldNumber: isSet(object.fieldNumber) ? Number(object.fieldNumber) : 0, + fieldName: isSet(object.fieldName) ? String(object.fieldName) : "", + fieldType: isSet(object.fieldType) ? fieldDescriptorProto_TypeFromJSON(object.fieldType) : 1, + keyType: isSet(object.keyType) ? fieldDescriptorProto_TypeFromJSON(object.keyType) : 1, + valueType: isSet(object.valueType) ? fieldDescriptorProto_TypeFromJSON(object.valueType) : 1, + index: isSet(object.index) ? Number(object.index) : undefined, + boolKey: isSet(object.boolKey) ? Boolean(object.boolKey) : undefined, + intKey: isSet(object.intKey) ? Number(object.intKey) : undefined, + uintKey: isSet(object.uintKey) ? Number(object.uintKey) : undefined, + stringKey: isSet(object.stringKey) ? String(object.stringKey) : undefined, + }; + }, + + toJSON(message: FieldPathElement): unknown { + const obj: any = {}; + message.fieldNumber !== undefined && (obj.fieldNumber = Math.round(message.fieldNumber)); + message.fieldName !== undefined && (obj.fieldName = message.fieldName); + message.fieldType !== undefined && (obj.fieldType = fieldDescriptorProto_TypeToJSON(message.fieldType)); + message.keyType !== undefined && (obj.keyType = fieldDescriptorProto_TypeToJSON(message.keyType)); + message.valueType !== undefined && (obj.valueType = fieldDescriptorProto_TypeToJSON(message.valueType)); + message.index !== undefined && (obj.index = Math.round(message.index)); + message.boolKey !== undefined && (obj.boolKey = message.boolKey); + message.intKey !== undefined && (obj.intKey = Math.round(message.intKey)); + message.uintKey !== undefined && (obj.uintKey = Math.round(message.uintKey)); + message.stringKey !== undefined && (obj.stringKey = message.stringKey); + return obj; + }, + + create, I>>(base?: I): FieldPathElement { + return FieldPathElement.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): FieldPathElement { + const message = createBaseFieldPathElement(); + message.fieldNumber = object.fieldNumber ?? 0; + message.fieldName = object.fieldName ?? ""; + message.fieldType = object.fieldType ?? 1; + message.keyType = object.keyType ?? 1; + message.valueType = object.valueType ?? 1; + message.index = object.index ?? undefined; + message.boolKey = object.boolKey ?? undefined; + message.intKey = object.intKey ?? undefined; + message.uintKey = object.uintKey ?? undefined; + message.stringKey = object.stringKey ?? undefined; return message; }, }; diff --git a/app/controlplane/api/gen/frontend/google/protobuf/field_mask.ts b/app/controlplane/api/gen/frontend/google/protobuf/field_mask.ts new file mode 100644 index 000000000..987af18f8 --- /dev/null +++ b/app/controlplane/api/gen/frontend/google/protobuf/field_mask.ts @@ -0,0 +1,290 @@ +/* eslint-disable */ +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "google.protobuf"; + +/** + * `FieldMask` represents a set of symbolic field paths, for example: + * + * paths: "f.a" + * paths: "f.b.d" + * + * Here `f` represents a field in some root message, `a` and `b` + * fields in the message found in `f`, and `d` a field found in the + * message in `f.b`. + * + * Field masks are used to specify a subset of fields that should be + * returned by a get operation or modified by an update operation. + * Field masks also have a custom JSON encoding (see below). + * + * # Field Masks in Projections + * + * When used in the context of a projection, a response message or + * sub-message is filtered by the API to only contain those fields as + * specified in the mask. For example, if the mask in the previous + * example is applied to a response message as follows: + * + * f { + * a : 22 + * b { + * d : 1 + * x : 2 + * } + * y : 13 + * } + * z: 8 + * + * The result will not contain specific values for fields x,y and z + * (their value will be set to the default, and omitted in proto text + * output): + * + * f { + * a : 22 + * b { + * d : 1 + * } + * } + * + * A repeated field is not allowed except at the last position of a + * paths string. + * + * If a FieldMask object is not present in a get operation, the + * operation applies to all fields (as if a FieldMask of all fields + * had been specified). + * + * Note that a field mask does not necessarily apply to the + * top-level response message. In case of a REST get operation, the + * field mask applies directly to the response, but in case of a REST + * list operation, the mask instead applies to each individual message + * in the returned resource list. In case of a REST custom method, + * other definitions may be used. Where the mask applies will be + * clearly documented together with its declaration in the API. In + * any case, the effect on the returned resource/resources is required + * behavior for APIs. + * + * # Field Masks in Update Operations + * + * A field mask in update operations specifies which fields of the + * targeted resource are going to be updated. The API is required + * to only change the values of the fields as specified in the mask + * and leave the others untouched. If a resource is passed in to + * describe the updated values, the API ignores the values of all + * fields not covered by the mask. + * + * If a repeated field is specified for an update operation, new values will + * be appended to the existing repeated field in the target resource. Note that + * a repeated field is only allowed in the last position of a `paths` string. + * + * If a sub-message is specified in the last position of the field mask for an + * update operation, then new value will be merged into the existing sub-message + * in the target resource. + * + * For example, given the target message: + * + * f { + * b { + * d: 1 + * x: 2 + * } + * c: [1] + * } + * + * And an update message: + * + * f { + * b { + * d: 10 + * } + * c: [2] + * } + * + * then if the field mask is: + * + * paths: ["f.b", "f.c"] + * + * then the result will be: + * + * f { + * b { + * d: 10 + * x: 2 + * } + * c: [1, 2] + * } + * + * An implementation may provide options to override this default behavior for + * repeated and message fields. + * + * In order to reset a field's value to the default, the field must + * be in the mask and set to the default value in the provided resource. + * Hence, in order to reset all fields of a resource, provide a default + * instance of the resource and set all fields in the mask, or do + * not provide a mask as described below. + * + * If a field mask is not present on update, the operation applies to + * all fields (as if a field mask of all fields has been specified). + * Note that in the presence of schema evolution, this may mean that + * fields the client does not know and has therefore not filled into + * the request will be reset to their default. If this is unwanted + * behavior, a specific service may require a client to always specify + * a field mask, producing an error if not. + * + * As with get operations, the location of the resource which + * describes the updated values in the request message depends on the + * operation kind. In any case, the effect of the field mask is + * required to be honored by the API. + * + * ## Considerations for HTTP REST + * + * The HTTP kind of an update operation which uses a field mask must + * be set to PATCH instead of PUT in order to satisfy HTTP semantics + * (PUT must only be used for full updates). + * + * # JSON Encoding of Field Masks + * + * In JSON, a field mask is encoded as a single string where paths are + * separated by a comma. Fields name in each path are converted + * to/from lower-camel naming conventions. + * + * As an example, consider the following message declarations: + * + * message Profile { + * User user = 1; + * Photo photo = 2; + * } + * message User { + * string display_name = 1; + * string address = 2; + * } + * + * In proto a field mask for `Profile` may look as such: + * + * mask { + * paths: "user.display_name" + * paths: "photo" + * } + * + * In JSON, the same mask is represented as below: + * + * { + * mask: "user.displayName,photo" + * } + * + * # Field Masks and Oneof Fields + * + * Field masks treat fields in oneofs just as regular fields. Consider the + * following message: + * + * message SampleMessage { + * oneof test_oneof { + * string name = 4; + * SubMessage sub_message = 9; + * } + * } + * + * The field mask can be: + * + * mask { + * paths: "name" + * } + * + * Or: + * + * mask { + * paths: "sub_message" + * } + * + * Note that oneof type names ("test_oneof" in this case) cannot be used in + * paths. + * + * ## Field Mask Verification + * + * The implementation of any API method which has a FieldMask type field in the + * request should verify the included field paths, and return an + * `INVALID_ARGUMENT` error if any path is unmappable. + */ +export interface FieldMask { + /** The set of field mask paths. */ + paths: string[]; +} + +function createBaseFieldMask(): FieldMask { + return { paths: [] }; +} + +export const FieldMask = { + encode(message: FieldMask, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.paths) { + writer.uint32(10).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FieldMask { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFieldMask(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.paths.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): FieldMask { + return { + paths: typeof (object) === "string" + ? object.split(",").filter(Boolean) + : Array.isArray(object?.paths) + ? object.paths.map(String) + : [], + }; + }, + + toJSON(message: FieldMask): string { + return message.paths.join(","); + }, + + create, I>>(base?: I): FieldMask { + return FieldMask.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): FieldMask { + const message = createBaseFieldMask(); + message.paths = object.paths?.map((e) => e) || []; + return message; + }, + + wrap(paths: string[]): FieldMask { + const result = createBaseFieldMask(); + result.paths = paths; + return result; + }, + + unwrap(message: FieldMask): string[] { + return message.paths; + }, +}; + +type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; + +export type DeepPartial = T extends Builtin ? T + : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> + : T extends {} ? { [K in keyof T]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin ? P + : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; diff --git a/app/controlplane/api/workflowcontract/v1/crafting_schema.pb.go b/app/controlplane/api/workflowcontract/v1/crafting_schema.pb.go index f0c59e450..710ca4b34 100644 --- a/app/controlplane/api/workflowcontract/v1/crafting_schema.pb.go +++ b/app/controlplane/api/workflowcontract/v1/crafting_schema.pb.go @@ -1836,7 +1836,7 @@ var file_workflowcontract_v1_crafting_schema_proto_rawDesc = []byte{ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x62, 0x75, 0x66, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x0e, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x82, 0x0e, 0x0a, 0x0e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x32, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xba, 0x48, 0x06, 0x72, 0x04, 0x0a, @@ -1867,360 +1867,360 @@ var file_workflowcontract_v1_crafting_schema_proto_rawDesc = []byte{ 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x73, 0x1a, 0x9f, 0x02, 0x0a, 0x06, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x04, + 0x73, 0x1a, 0x9e, 0x02, 0x0a, 0x06, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x0d, 0xba, 0x48, 0x08, 0x82, 0x01, 0x05, 0x10, 0x01, 0x22, 0x01, 0x00, 0x18, 0x01, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0a, 0x52, 0x75, 0x6e, 0x6e, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x55, 0x4e, 0x4e, 0x45, 0x52, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x5f, 0x41, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x49, 0x54, 0x4c, 0x41, 0x42, 0x5f, - 0x50, 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x5a, - 0x55, 0x52, 0x45, 0x5f, 0x50, 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x4a, 0x45, 0x4e, 0x4b, 0x49, 0x4e, 0x53, 0x5f, 0x4a, 0x4f, 0x42, 0x10, 0x04, 0x12, - 0x12, 0x0a, 0x0e, 0x43, 0x49, 0x52, 0x43, 0x4c, 0x45, 0x43, 0x49, 0x5f, 0x42, 0x55, 0x49, 0x4c, - 0x44, 0x10, 0x05, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x41, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x50, 0x49, - 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x45, 0x41, 0x4d, - 0x43, 0x49, 0x54, 0x59, 0x5f, 0x50, 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x07, 0x3a, - 0x02, 0x18, 0x01, 0x1a, 0xfa, 0x07, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x12, 0x5c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xba, 0x48, 0x08, 0x82, 0x01, - 0x05, 0x10, 0x01, 0x22, 0x01, 0x00, 0x18, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x99, - 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x84, 0x01, - 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, - 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x42, 0x0c, 0xba, 0x48, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x18, 0x01, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0a, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x55, 0x4e, 0x4e, 0x45, 0x52, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x5f, 0x41, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x49, 0x54, 0x4c, 0x41, 0x42, 0x5f, 0x50, + 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x5a, 0x55, + 0x52, 0x45, 0x5f, 0x50, 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x4a, 0x45, 0x4e, 0x4b, 0x49, 0x4e, 0x53, 0x5f, 0x4a, 0x4f, 0x42, 0x10, 0x04, 0x12, 0x12, + 0x0a, 0x0e, 0x43, 0x49, 0x52, 0x43, 0x4c, 0x45, 0x43, 0x49, 0x5f, 0x42, 0x55, 0x49, 0x4c, 0x44, + 0x10, 0x05, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x41, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x50, 0x49, 0x50, + 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x45, 0x41, 0x4d, 0x43, + 0x49, 0x54, 0x59, 0x5f, 0x50, 0x49, 0x50, 0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x07, 0x3a, 0x02, + 0x18, 0x01, 0x1a, 0xf9, 0x07, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, + 0x5b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0c, 0xba, 0x48, 0x07, 0x82, 0x01, 0x04, + 0x10, 0x01, 0x20, 0x00, 0x18, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x99, 0x01, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x84, 0x01, 0xba, 0x48, + 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, + 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, + 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, + 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, + 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, + 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, + 0x18, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x73, 0x6b, 0x69, 0x70, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xcb, 0x04, 0x0a, + 0x0c, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, + 0x19, 0x4d, 0x41, 0x54, 0x45, 0x52, 0x49, 0x41, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, 0x54, + 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x41, 0x52, 0x54, 0x49, 0x46, 0x41, 0x43, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, + 0x42, 0x4f, 0x4d, 0x5f, 0x43, 0x59, 0x43, 0x4c, 0x4f, 0x4e, 0x45, 0x44, 0x58, 0x5f, 0x4a, 0x53, + 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x42, 0x4f, 0x4d, 0x5f, 0x53, 0x50, 0x44, + 0x58, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x55, 0x4e, 0x49, + 0x54, 0x5f, 0x58, 0x4d, 0x4c, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x45, 0x4e, 0x56, + 0x45, 0x58, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x45, 0x4c, 0x4d, 0x5f, 0x43, 0x48, 0x41, + 0x52, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x41, 0x52, 0x49, 0x46, 0x10, 0x09, 0x12, + 0x0c, 0x0a, 0x08, 0x45, 0x56, 0x49, 0x44, 0x45, 0x4e, 0x43, 0x45, 0x10, 0x0b, 0x12, 0x0f, 0x0a, + 0x0b, 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0c, 0x12, 0x0c, + 0x0a, 0x08, 0x43, 0x53, 0x41, 0x46, 0x5f, 0x56, 0x45, 0x58, 0x10, 0x08, 0x12, 0x1f, 0x0a, 0x1b, + 0x43, 0x53, 0x41, 0x46, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x5f, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x0d, 0x12, 0x1a, 0x0a, + 0x16, 0x43, 0x53, 0x41, 0x46, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, + 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x0e, 0x12, 0x23, 0x0a, 0x1f, 0x43, 0x53, 0x41, + 0x46, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x49, 0x4e, 0x43, 0x49, 0x44, + 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x0f, 0x12, 0x1a, + 0x0a, 0x16, 0x47, 0x49, 0x54, 0x4c, 0x41, 0x42, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, + 0x59, 0x5f, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x5a, 0x41, + 0x50, 0x5f, 0x44, 0x41, 0x53, 0x54, 0x5f, 0x5a, 0x49, 0x50, 0x10, 0x11, 0x12, 0x16, 0x0a, 0x12, + 0x42, 0x4c, 0x41, 0x43, 0x4b, 0x44, 0x55, 0x43, 0x4b, 0x5f, 0x53, 0x43, 0x41, 0x5f, 0x4a, 0x53, + 0x4f, 0x4e, 0x10, 0x12, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x57, 0x49, 0x53, 0x54, 0x43, 0x4c, 0x49, + 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, + 0x47, 0x48, 0x41, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, 0x14, + 0x12, 0x14, 0x0a, 0x10, 0x47, 0x48, 0x41, 0x53, 0x5f, 0x53, 0x45, 0x43, 0x52, 0x45, 0x54, 0x5f, + 0x53, 0x43, 0x41, 0x4e, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x47, 0x48, 0x41, 0x53, 0x5f, 0x44, + 0x45, 0x50, 0x45, 0x4e, 0x44, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, 0x16, + 0x12, 0x0e, 0x0a, 0x0a, 0x4a, 0x41, 0x43, 0x4f, 0x43, 0x4f, 0x5f, 0x58, 0x4d, 0x4c, 0x10, 0x17, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x4c, 0x53, 0x41, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x45, 0x4e, 0x41, + 0x4e, 0x43, 0x45, 0x10, 0x18, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x4c, 0x4f, + 0x4f, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x19, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x4c, 0x4f, 0x4f, 0x50, + 0x5f, 0x50, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x1a, 0x3a, 0x02, 0x18, 0x01, 0x3a, 0x02, + 0x18, 0x01, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x12, 0x38, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x17, 0xba, 0x48, + 0x14, 0x72, 0x12, 0x0a, 0x10, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x23, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0f, 0xba, 0x48, 0x0c, 0x72, 0x0a, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x45, 0x0a, 0x04, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x53, 0x70, + 0x65, 0x63, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x22, 0x9b, 0x03, 0x0a, 0x14, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x56, 0x32, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4a, 0x0a, 0x09, 0x6d, 0x61, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x5f, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x6e, 0x76, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x06, 0x72, + 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x52, 0x06, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x12, + 0x39, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x0d, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0c, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x46, + 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xba, 0x48, 0x0b, 0x72, + 0x09, 0x32, 0x07, 0x5e, 0x5b, 0x5c, 0x77, 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x6d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, + 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x8a, 0x04, 0x0a, 0x10, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, + 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, + 0x72, 0x65, 0x66, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x52, + 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, + 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x43, + 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x77, + 0x69, 0x74, 0x68, 0x12, 0x63, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x3f, 0xba, 0x48, 0x3c, 0x92, 0x01, + 0x39, 0x22, 0x37, 0x72, 0x35, 0x32, 0x33, 0x5e, 0x28, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, + 0x2d, 0x5d, 0x2b, 0x5c, 0x2f, 0x29, 0x3f, 0x28, 0x5b, 0x5e, 0x5c, 0x73, 0x5c, 0x2f, 0x5d, 0x2b, + 0x5c, 0x2f, 0x29, 0x28, 0x5b, 0x5e, 0x5c, 0x73, 0x40, 0x5c, 0x2f, 0x5d, 0x2b, 0x29, 0x28, 0x40, + 0x5b, 0x5e, 0x5c, 0x73, 0x40, 0x5d, 0x2b, 0x29, 0x3f, 0x24, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, 0x74, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x67, 0x61, 0x74, 0x65, 0x1a, 0x37, 0x0a, 0x09, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x26, 0x0a, 0x10, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x0f, 0x0a, + 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0xf6, + 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x49, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, + 0xba, 0x48, 0x25, 0x72, 0x23, 0x0a, 0x21, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, + 0x70, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x0d, 0xba, 0x48, 0x0a, 0x72, 0x08, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x04, 0x73, 0x70, + 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, + 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0x92, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, + 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, + 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, + 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, + 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, + 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, + 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x50, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf8, 0x03, 0x0a, + 0x0a, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x12, 0x18, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x08, 0x65, + 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, + 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, + 0x42, 0x0e, 0xba, 0x48, 0x09, 0x82, 0x01, 0x06, 0x20, 0x01, 0x20, 0x03, 0x20, 0x0b, 0x18, 0x01, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x56, 0x32, 0x52, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, + 0x3d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x8c, + 0x01, 0xba, 0x48, 0x88, 0x01, 0x1a, 0x85, 0x01, 0x0a, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x73, 0x70, 0x65, 0x63, 0x12, 0x36, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x73, 0x70, 0x65, + 0x63, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x6f, 0x72, 0x20, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x20, 0x6d, 0x75, 0x73, 0x74, + 0x20, 0x62, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x1a, 0x3f, 0x68, 0x61, + 0x73, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x61, 0x74, 0x68, 0x29, 0x20, 0x7c, 0x7c, 0x20, + 0x68, 0x61, 0x73, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, + 0x64, 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x29, 0x20, 0x3e, 0x20, 0x30, 0x42, 0x08, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x96, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x81, 0x01, 0xba, 0x48, 0x7e, 0xba, 0x01, 0x7b, 0x0a, + 0x14, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x67, 0x6f, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, - 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, - 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, - 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, - 0x27, 0x29, 0x18, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x6f, + 0x2e, 0x1a, 0x27, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, + 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5d, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, + 0x5a, 0x30, 0x2d, 0x39, 0x5f, 0x5d, 0x2a, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x56, 0x32, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1c, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x57, 0x0a, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x73, 0x6b, 0x69, 0x70, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xcb, - 0x04, 0x0a, 0x0c, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1d, 0x0a, 0x19, 0x4d, 0x41, 0x54, 0x45, 0x52, 0x49, 0x41, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, - 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x10, 0x02, 0x12, - 0x0c, 0x0a, 0x08, 0x41, 0x52, 0x54, 0x49, 0x46, 0x41, 0x43, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, - 0x13, 0x53, 0x42, 0x4f, 0x4d, 0x5f, 0x43, 0x59, 0x43, 0x4c, 0x4f, 0x4e, 0x45, 0x44, 0x58, 0x5f, - 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x42, 0x4f, 0x4d, 0x5f, 0x53, - 0x50, 0x44, 0x58, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x55, - 0x4e, 0x49, 0x54, 0x5f, 0x58, 0x4d, 0x4c, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x45, - 0x4e, 0x56, 0x45, 0x58, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x45, 0x4c, 0x4d, 0x5f, 0x43, - 0x48, 0x41, 0x52, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x41, 0x52, 0x49, 0x46, 0x10, - 0x09, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x56, 0x49, 0x44, 0x45, 0x4e, 0x43, 0x45, 0x10, 0x0b, 0x12, - 0x0f, 0x0a, 0x0b, 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0c, - 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x53, 0x41, 0x46, 0x5f, 0x56, 0x45, 0x58, 0x10, 0x08, 0x12, 0x1f, - 0x0a, 0x1b, 0x43, 0x53, 0x41, 0x46, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x41, 0x4c, 0x5f, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x0d, 0x12, - 0x1a, 0x0a, 0x16, 0x43, 0x53, 0x41, 0x46, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, - 0x5f, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x0e, 0x12, 0x23, 0x0a, 0x1f, 0x43, - 0x53, 0x41, 0x46, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x49, 0x4e, 0x43, - 0x49, 0x44, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x0f, - 0x12, 0x1a, 0x0a, 0x16, 0x47, 0x49, 0x54, 0x4c, 0x41, 0x42, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, - 0x5a, 0x41, 0x50, 0x5f, 0x44, 0x41, 0x53, 0x54, 0x5f, 0x5a, 0x49, 0x50, 0x10, 0x11, 0x12, 0x16, - 0x0a, 0x12, 0x42, 0x4c, 0x41, 0x43, 0x4b, 0x44, 0x55, 0x43, 0x4b, 0x5f, 0x53, 0x43, 0x41, 0x5f, - 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x12, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x57, 0x49, 0x53, 0x54, 0x43, - 0x4c, 0x49, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x13, 0x12, 0x12, - 0x0a, 0x0e, 0x47, 0x48, 0x41, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4e, - 0x10, 0x14, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x48, 0x41, 0x53, 0x5f, 0x53, 0x45, 0x43, 0x52, 0x45, - 0x54, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x47, 0x48, 0x41, 0x53, - 0x5f, 0x44, 0x45, 0x50, 0x45, 0x4e, 0x44, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x53, 0x43, 0x41, 0x4e, - 0x10, 0x16, 0x12, 0x0e, 0x0a, 0x0a, 0x4a, 0x41, 0x43, 0x4f, 0x43, 0x4f, 0x5f, 0x58, 0x4d, 0x4c, - 0x10, 0x17, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x4c, 0x53, 0x41, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x45, - 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x18, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x49, 0x4e, - 0x4c, 0x4f, 0x4f, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x54, - 0x45, 0x58, 0x54, 0x10, 0x19, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x4c, 0x4f, - 0x4f, 0x50, 0x5f, 0x50, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x1a, 0x3a, 0x02, 0x18, 0x01, - 0x3a, 0x02, 0x18, 0x01, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x12, 0x38, 0x0a, 0x0b, 0x61, 0x70, 0x69, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x17, - 0xba, 0x48, 0x14, 0x72, 0x12, 0x0a, 0x10, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, - 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0f, 0xba, 0x48, 0x0c, 0x72, 0x0a, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, - 0x63, 0x74, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x20, 0x03, + 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0x50, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x08, 0x65, 0x6d, + 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x42, 0x0f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0xc9, 0x01, 0x0a, 0x15, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x48, + 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x74, + 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x77, 0x69, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x1a, 0x37, 0x0a, 0x09, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb5, 0x07, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x49, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xba, 0x48, 0x25, 0x72, + 0x23, 0x0a, 0x21, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x26, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, + 0xba, 0x48, 0x0f, 0x72, 0x0d, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, - 0x01, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x45, 0x0a, 0x04, 0x73, - 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x77, 0x6f, 0x72, 0x6b, + 0x01, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x04, 0x73, + 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, - 0x53, 0x70, 0x65, 0x63, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x04, 0x73, 0x70, - 0x65, 0x63, 0x22, 0x9b, 0x03, 0x0a, 0x14, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4a, 0x0a, 0x09, 0x6d, - 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x09, 0x6d, 0x61, - 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x5f, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x65, 0x6e, 0x76, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x42, 0x0a, - 0x06, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x52, 0x06, 0x72, 0x75, 0x6e, 0x6e, 0x65, - 0x72, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x0d, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, - 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x41, 0x0a, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x46, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xba, 0x48, - 0x0b, 0x72, 0x09, 0x32, 0x07, 0x5e, 0x5b, 0x5c, 0x77, 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x42, 0x06, 0xba, 0x48, 0x03, + 0xc8, 0x01, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x1a, 0x9d, 0x01, 0x0a, 0x0f, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x50, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x34, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x38, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x1a, 0xa7, 0x01, 0x0a, 0x13, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x47, 0x0a, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x04, 0x0a, 0x10, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, - 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, - 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, - 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x43, 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x69, 0x6f, 0x6e, 0x1a, 0xd7, 0x02, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x12, 0x57, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, - 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x04, 0x77, 0x69, 0x74, 0x68, 0x12, 0x63, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x3f, 0xba, 0x48, 0x3c, - 0x92, 0x01, 0x39, 0x22, 0x37, 0x72, 0x35, 0x32, 0x33, 0x5e, 0x28, 0x5b, 0x61, 0x2d, 0x7a, 0x30, - 0x2d, 0x39, 0x2d, 0x5d, 0x2b, 0x5c, 0x2f, 0x29, 0x3f, 0x28, 0x5b, 0x5e, 0x5c, 0x73, 0x5c, 0x2f, - 0x5d, 0x2b, 0x5c, 0x2f, 0x29, 0x28, 0x5b, 0x5e, 0x5c, 0x73, 0x40, 0x5c, 0x2f, 0x5d, 0x2b, 0x29, - 0x28, 0x40, 0x5b, 0x5e, 0x5c, 0x73, 0x40, 0x5d, 0x2b, 0x29, 0x3f, 0x24, 0x52, 0x0c, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, - 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x67, 0x61, 0x74, 0x65, 0x1a, 0x37, - 0x0a, 0x09, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x26, 0x0a, 0x10, 0x4d, 0x61, 0x74, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, - 0x0f, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, - 0x22, 0xf6, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x49, 0x0a, 0x0b, 0x61, - 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x28, 0xba, 0x48, 0x25, 0x72, 0x23, 0x0a, 0x21, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, - 0x6f, 0x6f, 0x70, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xba, 0x48, 0x0a, 0x72, 0x08, 0x0a, 0x06, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, - 0x01, 0x01, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x04, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x42, 0x06, 0xba, 0x48, 0x03, - 0xc8, 0x01, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0x92, 0x03, 0x0a, 0x08, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, - 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, - 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, - 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, - 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, - 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, - 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x1a, 0x3e, 0x0a, - 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0f, 0x0a, - 0x0d, 0x5f, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf7, - 0x03, 0x0a, 0x0a, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x12, 0x18, 0x0a, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x48, - 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, - 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, - 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x5c, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, - 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x0d, 0xba, 0x48, 0x08, 0x82, 0x01, 0x05, 0x22, 0x03, 0x01, 0x03, 0x0b, 0x18, - 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x56, 0x32, 0x52, 0x08, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x12, 0x3d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, - 0x8c, 0x01, 0xba, 0x48, 0x88, 0x01, 0x1a, 0x85, 0x01, 0x0a, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x73, 0x70, 0x65, 0x63, 0x12, 0x36, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x73, 0x70, - 0x65, 0x63, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x6f, 0x72, 0x20, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x20, 0x6d, 0x75, 0x73, - 0x74, 0x20, 0x62, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x1a, 0x3f, 0x68, - 0x61, 0x73, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x61, 0x74, 0x68, 0x29, 0x20, 0x7c, 0x7c, - 0x20, 0x68, 0x61, 0x73, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, - 0x65, 0x64, 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x28, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x29, 0x20, 0x3e, 0x20, 0x30, 0x42, 0x08, - 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x96, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x81, 0x01, 0xba, 0x48, 0x7e, 0xba, 0x01, 0x7b, - 0x0a, 0x14, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x67, 0x6f, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x76, 0x61, - 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, - 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, - 0x73, 0x2e, 0x1a, 0x27, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, - 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5d, 0x5b, 0x61, 0x2d, 0x7a, 0x41, - 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5f, 0x5d, 0x2a, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x0c, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x56, 0x32, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x1c, 0x0a, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x09, 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, 0x22, - 0x01, 0x03, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0x50, 0x0a, 0x09, 0x41, 0x75, 0x74, - 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x08, - 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x08, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x42, 0x0f, 0x0a, 0x06, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x05, 0xba, 0x48, 0x02, 0x08, 0x01, 0x22, 0xc9, 0x01, 0x0a, 0x15, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x74, 0x74, 0x61, 0x63, - 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x72, 0x65, 0x66, - 0x12, 0x48, 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x77, 0x69, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, - 0x69, 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x1a, 0x37, - 0x0a, 0x09, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb5, 0x07, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x49, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xba, 0x48, - 0x25, 0x72, 0x23, 0x0a, 0x21, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2e, - 0x64, 0x65, 0x76, 0x2f, 0x76, 0x31, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x12, 0xba, 0x48, 0x0f, 0x72, 0x0d, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0xba, 0x48, 0x03, - 0xc8, 0x01, 0x01, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, - 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x77, 0x6f, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, 0x61, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x41, 0x0a, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x42, 0x06, 0xba, - 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x1a, 0x9d, 0x01, 0x0a, 0x0f, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, - 0x50, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x12, 0x38, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x1a, 0xa7, 0x01, 0x0a, 0x13, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x0b, - 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, - 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd7, 0x02, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, - 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, - 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x41, 0x0a, 0x08, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, - 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x7f, - 0xba, 0x48, 0x7c, 0x1a, 0x7a, 0x0a, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6d, 0x61, 0x74, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x33, 0x69, 0x66, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x69, - 0x73, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x2c, 0x20, 0x74, 0x79, 0x70, 0x65, - 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x33, 0x21, 0x68, 0x61, 0x73, - 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x68, - 0x61, 0x73, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x26, 0x26, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x42, - 0x4d, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x7f, 0xba, 0x48, + 0x7c, 0x1a, 0x7a, 0x0a, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x12, 0x33, 0x69, 0x66, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x69, 0x73, 0x20, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x2c, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x73, + 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x33, 0x21, 0x68, 0x61, 0x73, 0x28, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x68, 0x61, 0x73, + 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x26, 0x26, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x42, 0x4d, 0x5a, + 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, + 0x6f, 0x6f, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/app/controlplane/api/workflowcontract/v1/crafting_schema_test.go b/app/controlplane/api/workflowcontract/v1/crafting_schema_test.go index dd013f775..d20b28125 100644 --- a/app/controlplane/api/workflowcontract/v1/crafting_schema_test.go +++ b/app/controlplane/api/workflowcontract/v1/crafting_schema_test.go @@ -1,5 +1,5 @@ // -// Copyright 2023 The Chainloop Authors. +// Copyright 2023-2025 The Chainloop Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "errors" "testing" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" v1 "github.com/chainloop-dev/chainloop/app/controlplane/api/workflowcontract/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -107,7 +107,7 @@ func TestPolicyAttachment(t *testing.T) { policy: &v1.PolicyAttachment{}, wantErr: true, nviolations: 1, - firstViolation: "policy", + firstViolation: "policy: exactly one field is required in oneof", }, { desc: "policy ref", @@ -143,7 +143,7 @@ func TestPolicyAttachment(t *testing.T) { Requirements: []string{"foo bar", "foo@bar@1.2.3", "foo @1.2", "123@123 ", "foo"}, }, nviolations: 5, - firstViolation: "requirements[0]", + firstViolation: "requirements[0]: value does not match regex pattern `^([a-z0-9-]+\\/)?([^\\s\\/]+\\/)([^\\s@\\/]+)(@[^\\s@]+)?$`", wantErr: true, }, } @@ -160,7 +160,7 @@ func TestPolicyAttachment(t *testing.T) { valErr := &protovalidate.ValidationError{} errors.As(err, &valErr) assert.Equal(t, tc.nviolations, len(valErr.Violations)) - assert.Equal(t, tc.firstViolation, valErr.Violations[0].FieldPath) + assert.Equal(t, tc.firstViolation, valErr.Violations[0].String()) assert.Contains(t, err.Error(), tc.firstViolation) return diff --git a/app/controlplane/api/workflowcontract/v1/policy_test.go b/app/controlplane/api/workflowcontract/v1/policy_test.go index 42c6a425f..451bc0cee 100644 --- a/app/controlplane/api/workflowcontract/v1/policy_test.go +++ b/app/controlplane/api/workflowcontract/v1/policy_test.go @@ -20,7 +20,7 @@ package v1_test import ( "testing" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" v1 "github.com/chainloop-dev/chainloop/app/controlplane/api/workflowcontract/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/app/controlplane/cmd/main.go b/app/controlplane/cmd/main.go index 7c1e501f7..a4423e0d7 100644 --- a/app/controlplane/cmd/main.go +++ b/app/controlplane/cmd/main.go @@ -23,7 +23,7 @@ import ( "os" "time" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" "github.com/getsentry/sentry-go" "github.com/nats-io/nats.go" flag "github.com/spf13/pflag" @@ -280,6 +280,6 @@ func initSentry(c *conf.Bootstrap, logger log.Logger) (cleanupFunc func(), err e return } -func newProtoValidator() (*protovalidate.Validator, error) { +func newProtoValidator() (protovalidate.Validator, error) { return protovalidate.New() } diff --git a/app/controlplane/internal/conf/buf.gen.yaml b/app/controlplane/internal/conf/buf.gen.yaml index 404d2d082..7b15ad0f9 100644 --- a/app/controlplane/internal/conf/buf.gen.yaml +++ b/app/controlplane/internal/conf/buf.gen.yaml @@ -1,5 +1,5 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative diff --git a/app/controlplane/internal/conf/buf.lock b/app/controlplane/internal/conf/buf.lock deleted file mode 100644 index 612343998..000000000 --- a/app/controlplane/internal/conf/buf.lock +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: 46a4cf4ba1094a34bcd89a6c67163b4b - digest: shake256:436ce453801917c11bc7b21d66bcfae87da2aceb804a041487be1e51dc9fbc219e61ea6a552db7a7aa6d63bb5efd0f3ed5fe3d4c42d4f750d0eb35f14144e3b6 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: f0e53af8f2fc4556b94f482688b57223 - digest: shake256:de26a277fc28b8b411ecf58729d78d32fcf15090ffd998a4469225b17889bfb51442eaab04bb7a8d88d203ecdf0a9febd4ffd52c18ed1c2229160c7bd353ca95 - - remote: buf.build - owner: kratos-go - repository: kratos - commit: e1d52e944e3845c6862a566db322432d diff --git a/app/controlplane/internal/conf/buf.yaml b/app/controlplane/internal/conf/buf.yaml deleted file mode 100644 index 74ccb2956..000000000 --- a/app/controlplane/internal/conf/buf.yaml +++ /dev/null @@ -1,14 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 - - buf.build/kratos-go/kratos:e1d52e944e3845c6862a566db322432d -lint: - use: - - STANDARD - ignore_only: - PACKAGE_DEFINED: - - ./conf.proto diff --git a/app/controlplane/internal/server/grpc.go b/app/controlplane/internal/server/grpc.go index 2a830b2dc..4fac0922a 100644 --- a/app/controlplane/internal/server/grpc.go +++ b/app/controlplane/internal/server/grpc.go @@ -29,7 +29,7 @@ import ( "github.com/chainloop-dev/chainloop/app/controlplane/pkg/biz" "github.com/chainloop-dev/chainloop/app/controlplane/pkg/jwt/user" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" "github.com/chainloop-dev/chainloop/app/controlplane/internal/service" "github.com/chainloop-dev/chainloop/app/controlplane/internal/usercontext" "github.com/chainloop-dev/chainloop/pkg/credentials" @@ -91,7 +91,7 @@ type Opts struct { FederatedConfig *conf.FederatedAuthentication BootstrapConfig *conf.Bootstrap Credentials credentials.ReaderWriter - Validator *protovalidate.Validator + Validator protovalidate.Validator } // NewGRPCServer new a gRPC server. diff --git a/app/controlplane/internal/server/http.go b/app/controlplane/internal/server/http.go index 7e5e8c5f5..00936f6ad 100644 --- a/app/controlplane/internal/server/http.go +++ b/app/controlplane/internal/server/http.go @@ -23,7 +23,7 @@ import ( middlewares_http "github.com/chainloop-dev/chainloop/pkg/middlewares/http" "github.com/golang-jwt/jwt/v4" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" v1 "github.com/chainloop-dev/chainloop/app/controlplane/api/controlplane/v1" "github.com/chainloop-dev/chainloop/app/controlplane/internal/service" "github.com/go-kratos/kratos/v2/middleware" @@ -104,7 +104,7 @@ func NewHTTPServer(opts *Opts, grpcSrv *grpc.Server) (*http.Server, error) { // Custom kraos middleware based on the protovalidate middleware // https://pkg.go.dev/github.com/grpc-ecosystem/go-grpc-middleware/v2@v2.1.0/interceptors/protovalidate#UnaryServerInterceptor // but tailored specifically for the http server -func protoValidateHTTPMiddleware(validator *protovalidate.Validator) middleware.Middleware { +func protoValidateHTTPMiddleware(validator protovalidate.Validator) middleware.Middleware { return func(handler middleware.Handler) middleware.Handler { return func(ctx context.Context, req interface{}) (reply interface{}, err error) { switch msg := req.(type) { diff --git a/app/controlplane/pkg/biz/casclient.go b/app/controlplane/pkg/biz/casclient.go index 1a155aaab..b72dbd238 100644 --- a/app/controlplane/pkg/biz/casclient.go +++ b/app/controlplane/pkg/biz/casclient.go @@ -21,7 +21,7 @@ import ( "fmt" "io" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" conf "github.com/chainloop-dev/chainloop/app/controlplane/internal/conf/controlplane/config/v1" casJWT "github.com/chainloop-dev/chainloop/internal/robotaccount/cas" diff --git a/app/controlplane/pkg/biz/workflowcontract.go b/app/controlplane/pkg/biz/workflowcontract.go index 3f596803c..db4aa5a9d 100644 --- a/app/controlplane/pkg/biz/workflowcontract.go +++ b/app/controlplane/pkg/biz/workflowcontract.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/bufbuild/protoyaml-go" + "buf.build/go/protoyaml" "github.com/chainloop-dev/chainloop/app/controlplane/pkg/auditor/events" schemav1 "github.com/chainloop-dev/chainloop/app/controlplane/api/workflowcontract/v1" diff --git a/app/controlplane/pkg/conf/buf.gen.yaml b/app/controlplane/pkg/conf/buf.gen.yaml index 404d2d082..7b15ad0f9 100644 --- a/app/controlplane/pkg/conf/buf.gen.yaml +++ b/app/controlplane/pkg/conf/buf.gen.yaml @@ -1,5 +1,5 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative diff --git a/app/controlplane/pkg/conf/buf.lock b/app/controlplane/pkg/conf/buf.lock deleted file mode 100644 index 612343998..000000000 --- a/app/controlplane/pkg/conf/buf.lock +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: 46a4cf4ba1094a34bcd89a6c67163b4b - digest: shake256:436ce453801917c11bc7b21d66bcfae87da2aceb804a041487be1e51dc9fbc219e61ea6a552db7a7aa6d63bb5efd0f3ed5fe3d4c42d4f750d0eb35f14144e3b6 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: f0e53af8f2fc4556b94f482688b57223 - digest: shake256:de26a277fc28b8b411ecf58729d78d32fcf15090ffd998a4469225b17889bfb51442eaab04bb7a8d88d203ecdf0a9febd4ffd52c18ed1c2229160c7bd353ca95 - - remote: buf.build - owner: kratos-go - repository: kratos - commit: e1d52e944e3845c6862a566db322432d diff --git a/app/controlplane/pkg/conf/buf.yaml b/app/controlplane/pkg/conf/buf.yaml deleted file mode 100644 index 590e81281..000000000 --- a/app/controlplane/pkg/conf/buf.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 -lint: - use: - - STANDARD diff --git a/app/controlplane/pkg/data/ent/apitoken.go b/app/controlplane/pkg/data/ent/apitoken.go index a74ae30a2..518b7b6fe 100644 --- a/app/controlplane/pkg/data/ent/apitoken.go +++ b/app/controlplane/pkg/data/ent/apitoken.go @@ -101,7 +101,7 @@ func (*APIToken) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the APIToken fields. -func (at *APIToken) assignValues(columns []string, values []any) error { +func (_m *APIToken) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -111,66 +111,66 @@ func (at *APIToken) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - at.ID = *value + _m.ID = *value } case apitoken.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - at.Name = value.String + _m.Name = value.String } case apitoken.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - at.Description = value.String + _m.Description = value.String } case apitoken.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - at.CreatedAt = value.Time + _m.CreatedAt = value.Time } case apitoken.FieldExpiresAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field expires_at", values[i]) } else if value.Valid { - at.ExpiresAt = value.Time + _m.ExpiresAt = value.Time } case apitoken.FieldRevokedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field revoked_at", values[i]) } else if value.Valid { - at.RevokedAt = value.Time + _m.RevokedAt = value.Time } case apitoken.FieldLastUsedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_used_at", values[i]) } else if value.Valid { - at.LastUsedAt = value.Time + _m.LastUsedAt = value.Time } case apitoken.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - at.OrganizationID = *value + _m.OrganizationID = *value } case apitoken.FieldProjectID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field project_id", values[i]) } else if value != nil { - at.ProjectID = *value + _m.ProjectID = *value } case apitoken.FieldPolicies: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field policies", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &at.Policies); err != nil { + if err := json.Unmarshal(*value, &_m.Policies); err != nil { return fmt.Errorf("unmarshal field policies: %w", err) } } default: - at.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -178,69 +178,69 @@ func (at *APIToken) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the APIToken. // This includes values selected through modifiers, order, etc. -func (at *APIToken) Value(name string) (ent.Value, error) { - return at.selectValues.Get(name) +func (_m *APIToken) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryOrganization queries the "organization" edge of the APIToken entity. -func (at *APIToken) QueryOrganization() *OrganizationQuery { - return NewAPITokenClient(at.config).QueryOrganization(at) +func (_m *APIToken) QueryOrganization() *OrganizationQuery { + return NewAPITokenClient(_m.config).QueryOrganization(_m) } // QueryProject queries the "project" edge of the APIToken entity. -func (at *APIToken) QueryProject() *ProjectQuery { - return NewAPITokenClient(at.config).QueryProject(at) +func (_m *APIToken) QueryProject() *ProjectQuery { + return NewAPITokenClient(_m.config).QueryProject(_m) } // Update returns a builder for updating this APIToken. // Note that you need to call APIToken.Unwrap() before calling this method if this APIToken // was returned from a transaction, and the transaction was committed or rolled back. -func (at *APIToken) Update() *APITokenUpdateOne { - return NewAPITokenClient(at.config).UpdateOne(at) +func (_m *APIToken) Update() *APITokenUpdateOne { + return NewAPITokenClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the APIToken entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (at *APIToken) Unwrap() *APIToken { - _tx, ok := at.config.driver.(*txDriver) +func (_m *APIToken) Unwrap() *APIToken { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: APIToken is not a transactional entity") } - at.config.driver = _tx.drv - return at + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (at *APIToken) String() string { +func (_m *APIToken) String() string { var builder strings.Builder builder.WriteString("APIToken(") - builder.WriteString(fmt.Sprintf("id=%v, ", at.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(at.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(at.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(at.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("expires_at=") - builder.WriteString(at.ExpiresAt.Format(time.ANSIC)) + builder.WriteString(_m.ExpiresAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("revoked_at=") - builder.WriteString(at.RevokedAt.Format(time.ANSIC)) + builder.WriteString(_m.RevokedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("last_used_at=") - builder.WriteString(at.LastUsedAt.Format(time.ANSIC)) + builder.WriteString(_m.LastUsedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", at.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteString(", ") builder.WriteString("project_id=") - builder.WriteString(fmt.Sprintf("%v", at.ProjectID)) + builder.WriteString(fmt.Sprintf("%v", _m.ProjectID)) builder.WriteString(", ") builder.WriteString("policies=") - builder.WriteString(fmt.Sprintf("%v", at.Policies)) + builder.WriteString(fmt.Sprintf("%v", _m.Policies)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/apitoken_create.go b/app/controlplane/pkg/data/ent/apitoken_create.go index 39b9eebfc..28b2fdc59 100644 --- a/app/controlplane/pkg/data/ent/apitoken_create.go +++ b/app/controlplane/pkg/data/ent/apitoken_create.go @@ -28,145 +28,145 @@ type APITokenCreate struct { } // SetName sets the "name" field. -func (atc *APITokenCreate) SetName(s string) *APITokenCreate { - atc.mutation.SetName(s) - return atc +func (_c *APITokenCreate) SetName(v string) *APITokenCreate { + _c.mutation.SetName(v) + return _c } // SetDescription sets the "description" field. -func (atc *APITokenCreate) SetDescription(s string) *APITokenCreate { - atc.mutation.SetDescription(s) - return atc +func (_c *APITokenCreate) SetDescription(v string) *APITokenCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableDescription(s *string) *APITokenCreate { - if s != nil { - atc.SetDescription(*s) +func (_c *APITokenCreate) SetNillableDescription(v *string) *APITokenCreate { + if v != nil { + _c.SetDescription(*v) } - return atc + return _c } // SetCreatedAt sets the "created_at" field. -func (atc *APITokenCreate) SetCreatedAt(t time.Time) *APITokenCreate { - atc.mutation.SetCreatedAt(t) - return atc +func (_c *APITokenCreate) SetCreatedAt(v time.Time) *APITokenCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableCreatedAt(t *time.Time) *APITokenCreate { - if t != nil { - atc.SetCreatedAt(*t) +func (_c *APITokenCreate) SetNillableCreatedAt(v *time.Time) *APITokenCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return atc + return _c } // SetExpiresAt sets the "expires_at" field. -func (atc *APITokenCreate) SetExpiresAt(t time.Time) *APITokenCreate { - atc.mutation.SetExpiresAt(t) - return atc +func (_c *APITokenCreate) SetExpiresAt(v time.Time) *APITokenCreate { + _c.mutation.SetExpiresAt(v) + return _c } // SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableExpiresAt(t *time.Time) *APITokenCreate { - if t != nil { - atc.SetExpiresAt(*t) +func (_c *APITokenCreate) SetNillableExpiresAt(v *time.Time) *APITokenCreate { + if v != nil { + _c.SetExpiresAt(*v) } - return atc + return _c } // SetRevokedAt sets the "revoked_at" field. -func (atc *APITokenCreate) SetRevokedAt(t time.Time) *APITokenCreate { - atc.mutation.SetRevokedAt(t) - return atc +func (_c *APITokenCreate) SetRevokedAt(v time.Time) *APITokenCreate { + _c.mutation.SetRevokedAt(v) + return _c } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableRevokedAt(t *time.Time) *APITokenCreate { - if t != nil { - atc.SetRevokedAt(*t) +func (_c *APITokenCreate) SetNillableRevokedAt(v *time.Time) *APITokenCreate { + if v != nil { + _c.SetRevokedAt(*v) } - return atc + return _c } // SetLastUsedAt sets the "last_used_at" field. -func (atc *APITokenCreate) SetLastUsedAt(t time.Time) *APITokenCreate { - atc.mutation.SetLastUsedAt(t) - return atc +func (_c *APITokenCreate) SetLastUsedAt(v time.Time) *APITokenCreate { + _c.mutation.SetLastUsedAt(v) + return _c } // SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableLastUsedAt(t *time.Time) *APITokenCreate { - if t != nil { - atc.SetLastUsedAt(*t) +func (_c *APITokenCreate) SetNillableLastUsedAt(v *time.Time) *APITokenCreate { + if v != nil { + _c.SetLastUsedAt(*v) } - return atc + return _c } // SetOrganizationID sets the "organization_id" field. -func (atc *APITokenCreate) SetOrganizationID(u uuid.UUID) *APITokenCreate { - atc.mutation.SetOrganizationID(u) - return atc +func (_c *APITokenCreate) SetOrganizationID(v uuid.UUID) *APITokenCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetProjectID sets the "project_id" field. -func (atc *APITokenCreate) SetProjectID(u uuid.UUID) *APITokenCreate { - atc.mutation.SetProjectID(u) - return atc +func (_c *APITokenCreate) SetProjectID(v uuid.UUID) *APITokenCreate { + _c.mutation.SetProjectID(v) + return _c } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableProjectID(u *uuid.UUID) *APITokenCreate { - if u != nil { - atc.SetProjectID(*u) +func (_c *APITokenCreate) SetNillableProjectID(v *uuid.UUID) *APITokenCreate { + if v != nil { + _c.SetProjectID(*v) } - return atc + return _c } // SetPolicies sets the "policies" field. -func (atc *APITokenCreate) SetPolicies(a []*authz.Policy) *APITokenCreate { - atc.mutation.SetPolicies(a) - return atc +func (_c *APITokenCreate) SetPolicies(v []*authz.Policy) *APITokenCreate { + _c.mutation.SetPolicies(v) + return _c } // SetID sets the "id" field. -func (atc *APITokenCreate) SetID(u uuid.UUID) *APITokenCreate { - atc.mutation.SetID(u) - return atc +func (_c *APITokenCreate) SetID(v uuid.UUID) *APITokenCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (atc *APITokenCreate) SetNillableID(u *uuid.UUID) *APITokenCreate { - if u != nil { - atc.SetID(*u) +func (_c *APITokenCreate) SetNillableID(v *uuid.UUID) *APITokenCreate { + if v != nil { + _c.SetID(*v) } - return atc + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (atc *APITokenCreate) SetOrganization(o *Organization) *APITokenCreate { - return atc.SetOrganizationID(o.ID) +func (_c *APITokenCreate) SetOrganization(v *Organization) *APITokenCreate { + return _c.SetOrganizationID(v.ID) } // SetProject sets the "project" edge to the Project entity. -func (atc *APITokenCreate) SetProject(p *Project) *APITokenCreate { - return atc.SetProjectID(p.ID) +func (_c *APITokenCreate) SetProject(v *Project) *APITokenCreate { + return _c.SetProjectID(v.ID) } // Mutation returns the APITokenMutation object of the builder. -func (atc *APITokenCreate) Mutation() *APITokenMutation { - return atc.mutation +func (_c *APITokenCreate) Mutation() *APITokenMutation { + return _c.mutation } // Save creates the APIToken in the database. -func (atc *APITokenCreate) Save(ctx context.Context) (*APIToken, error) { - atc.defaults() - return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks) +func (_c *APITokenCreate) Save(ctx context.Context) (*APIToken, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (atc *APITokenCreate) SaveX(ctx context.Context) *APIToken { - v, err := atc.Save(ctx) +func (_c *APITokenCreate) SaveX(ctx context.Context) *APIToken { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -174,53 +174,53 @@ func (atc *APITokenCreate) SaveX(ctx context.Context) *APIToken { } // Exec executes the query. -func (atc *APITokenCreate) Exec(ctx context.Context) error { - _, err := atc.Save(ctx) +func (_c *APITokenCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (atc *APITokenCreate) ExecX(ctx context.Context) { - if err := atc.Exec(ctx); err != nil { +func (_c *APITokenCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (atc *APITokenCreate) defaults() { - if _, ok := atc.mutation.CreatedAt(); !ok { +func (_c *APITokenCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := apitoken.DefaultCreatedAt() - atc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := atc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := apitoken.DefaultID() - atc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (atc *APITokenCreate) check() error { - if _, ok := atc.mutation.Name(); !ok { +func (_c *APITokenCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "APIToken.name"`)} } - if _, ok := atc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "APIToken.created_at"`)} } - if _, ok := atc.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "APIToken.organization_id"`)} } - if len(atc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "APIToken.organization"`)} } return nil } -func (atc *APITokenCreate) sqlSave(ctx context.Context) (*APIToken, error) { - if err := atc.check(); err != nil { +func (_c *APITokenCreate) sqlSave(ctx context.Context) (*APIToken, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := atc.createSpec() - if err := sqlgraph.CreateNode(ctx, atc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -233,50 +233,50 @@ func (atc *APITokenCreate) sqlSave(ctx context.Context) (*APIToken, error) { return nil, err } } - atc.mutation.id = &_node.ID - atc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (atc *APITokenCreate) createSpec() (*APIToken, *sqlgraph.CreateSpec) { +func (_c *APITokenCreate) createSpec() (*APIToken, *sqlgraph.CreateSpec) { var ( - _node = &APIToken{config: atc.config} + _node = &APIToken{config: _c.config} _spec = sqlgraph.NewCreateSpec(apitoken.Table, sqlgraph.NewFieldSpec(apitoken.FieldID, field.TypeUUID)) ) - _spec.OnConflict = atc.conflict - if id, ok := atc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := atc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(apitoken.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := atc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(apitoken.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := atc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(apitoken.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := atc.mutation.ExpiresAt(); ok { + if value, ok := _c.mutation.ExpiresAt(); ok { _spec.SetField(apitoken.FieldExpiresAt, field.TypeTime, value) _node.ExpiresAt = value } - if value, ok := atc.mutation.RevokedAt(); ok { + if value, ok := _c.mutation.RevokedAt(); ok { _spec.SetField(apitoken.FieldRevokedAt, field.TypeTime, value) _node.RevokedAt = value } - if value, ok := atc.mutation.LastUsedAt(); ok { + if value, ok := _c.mutation.LastUsedAt(); ok { _spec.SetField(apitoken.FieldLastUsedAt, field.TypeTime, value) _node.LastUsedAt = value } - if value, ok := atc.mutation.Policies(); ok { + if value, ok := _c.mutation.Policies(); ok { _spec.SetField(apitoken.FieldPolicies, field.TypeJSON, value) _node.Policies = value } - if nodes := atc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -293,7 +293,7 @@ func (atc *APITokenCreate) createSpec() (*APIToken, *sqlgraph.CreateSpec) { _node.OrganizationID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := atc.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -329,10 +329,10 @@ func (atc *APITokenCreate) createSpec() (*APIToken, *sqlgraph.CreateSpec) { // SetName(v+v). // }). // Exec(ctx) -func (atc *APITokenCreate) OnConflict(opts ...sql.ConflictOption) *APITokenUpsertOne { - atc.conflict = opts +func (_c *APITokenCreate) OnConflict(opts ...sql.ConflictOption) *APITokenUpsertOne { + _c.conflict = opts return &APITokenUpsertOne{ - create: atc, + create: _c, } } @@ -342,10 +342,10 @@ func (atc *APITokenCreate) OnConflict(opts ...sql.ConflictOption) *APITokenUpser // client.APIToken.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (atc *APITokenCreate) OnConflictColumns(columns ...string) *APITokenUpsertOne { - atc.conflict = append(atc.conflict, sql.ConflictColumns(columns...)) +func (_c *APITokenCreate) OnConflictColumns(columns ...string) *APITokenUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &APITokenUpsertOne{ - create: atc, + create: _c, } } @@ -723,16 +723,16 @@ type APITokenCreateBulk struct { } // Save creates the APIToken entities in the database. -func (atcb *APITokenCreateBulk) Save(ctx context.Context) ([]*APIToken, error) { - if atcb.err != nil { - return nil, atcb.err +func (_c *APITokenCreateBulk) Save(ctx context.Context) ([]*APIToken, error) { + if _c.err != nil { + return nil, _c.err } - specs := make([]*sqlgraph.CreateSpec, len(atcb.builders)) - nodes := make([]*APIToken, len(atcb.builders)) - mutators := make([]Mutator, len(atcb.builders)) - for i := range atcb.builders { + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*APIToken, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := atcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*APITokenMutation) @@ -746,12 +746,12 @@ func (atcb *APITokenCreateBulk) Save(ctx context.Context) ([]*APIToken, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = atcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, atcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -771,7 +771,7 @@ func (atcb *APITokenCreateBulk) Save(ctx context.Context) ([]*APIToken, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, atcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -779,8 +779,8 @@ func (atcb *APITokenCreateBulk) Save(ctx context.Context) ([]*APIToken, error) { } // SaveX is like Save, but panics if an error occurs. -func (atcb *APITokenCreateBulk) SaveX(ctx context.Context) []*APIToken { - v, err := atcb.Save(ctx) +func (_c *APITokenCreateBulk) SaveX(ctx context.Context) []*APIToken { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -788,14 +788,14 @@ func (atcb *APITokenCreateBulk) SaveX(ctx context.Context) []*APIToken { } // Exec executes the query. -func (atcb *APITokenCreateBulk) Exec(ctx context.Context) error { - _, err := atcb.Save(ctx) +func (_c *APITokenCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (atcb *APITokenCreateBulk) ExecX(ctx context.Context) { - if err := atcb.Exec(ctx); err != nil { +func (_c *APITokenCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -815,10 +815,10 @@ func (atcb *APITokenCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (atcb *APITokenCreateBulk) OnConflict(opts ...sql.ConflictOption) *APITokenUpsertBulk { - atcb.conflict = opts +func (_c *APITokenCreateBulk) OnConflict(opts ...sql.ConflictOption) *APITokenUpsertBulk { + _c.conflict = opts return &APITokenUpsertBulk{ - create: atcb, + create: _c, } } @@ -828,10 +828,10 @@ func (atcb *APITokenCreateBulk) OnConflict(opts ...sql.ConflictOption) *APIToken // client.APIToken.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (atcb *APITokenCreateBulk) OnConflictColumns(columns ...string) *APITokenUpsertBulk { - atcb.conflict = append(atcb.conflict, sql.ConflictColumns(columns...)) +func (_c *APITokenCreateBulk) OnConflictColumns(columns ...string) *APITokenUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &APITokenUpsertBulk{ - create: atcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/apitoken_delete.go b/app/controlplane/pkg/data/ent/apitoken_delete.go index 6e7e7e9bb..bdad89646 100644 --- a/app/controlplane/pkg/data/ent/apitoken_delete.go +++ b/app/controlplane/pkg/data/ent/apitoken_delete.go @@ -20,56 +20,56 @@ type APITokenDelete struct { } // Where appends a list predicates to the APITokenDelete builder. -func (atd *APITokenDelete) Where(ps ...predicate.APIToken) *APITokenDelete { - atd.mutation.Where(ps...) - return atd +func (_d *APITokenDelete) Where(ps ...predicate.APIToken) *APITokenDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (atd *APITokenDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks) +func (_d *APITokenDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (atd *APITokenDelete) ExecX(ctx context.Context) int { - n, err := atd.Exec(ctx) +func (_d *APITokenDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (atd *APITokenDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *APITokenDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(apitoken.Table, sqlgraph.NewFieldSpec(apitoken.FieldID, field.TypeUUID)) - if ps := atd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, atd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - atd.mutation.done = true + _d.mutation.done = true return affected, err } // APITokenDeleteOne is the builder for deleting a single APIToken entity. type APITokenDeleteOne struct { - atd *APITokenDelete + _d *APITokenDelete } // Where appends a list predicates to the APITokenDelete builder. -func (atdo *APITokenDeleteOne) Where(ps ...predicate.APIToken) *APITokenDeleteOne { - atdo.atd.mutation.Where(ps...) - return atdo +func (_d *APITokenDeleteOne) Where(ps ...predicate.APIToken) *APITokenDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (atdo *APITokenDeleteOne) Exec(ctx context.Context) error { - n, err := atdo.atd.Exec(ctx) +func (_d *APITokenDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (atdo *APITokenDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (atdo *APITokenDeleteOne) ExecX(ctx context.Context) { - if err := atdo.Exec(ctx); err != nil { +func (_d *APITokenDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/apitoken_query.go b/app/controlplane/pkg/data/ent/apitoken_query.go index 779152434..d07f6bf1d 100644 --- a/app/controlplane/pkg/data/ent/apitoken_query.go +++ b/app/controlplane/pkg/data/ent/apitoken_query.go @@ -35,44 +35,44 @@ type APITokenQuery struct { } // Where adds a new predicate for the APITokenQuery builder. -func (atq *APITokenQuery) Where(ps ...predicate.APIToken) *APITokenQuery { - atq.predicates = append(atq.predicates, ps...) - return atq +func (_q *APITokenQuery) Where(ps ...predicate.APIToken) *APITokenQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (atq *APITokenQuery) Limit(limit int) *APITokenQuery { - atq.ctx.Limit = &limit - return atq +func (_q *APITokenQuery) Limit(limit int) *APITokenQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (atq *APITokenQuery) Offset(offset int) *APITokenQuery { - atq.ctx.Offset = &offset - return atq +func (_q *APITokenQuery) Offset(offset int) *APITokenQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (atq *APITokenQuery) Unique(unique bool) *APITokenQuery { - atq.ctx.Unique = &unique - return atq +func (_q *APITokenQuery) Unique(unique bool) *APITokenQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (atq *APITokenQuery) Order(o ...apitoken.OrderOption) *APITokenQuery { - atq.order = append(atq.order, o...) - return atq +func (_q *APITokenQuery) Order(o ...apitoken.OrderOption) *APITokenQuery { + _q.order = append(_q.order, o...) + return _q } // QueryOrganization chains the current query on the "organization" edge. -func (atq *APITokenQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: atq.config}).Query() +func (_q *APITokenQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := atq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := atq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -81,20 +81,20 @@ func (atq *APITokenQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, apitoken.OrganizationTable, apitoken.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(atq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryProject chains the current query on the "project" edge. -func (atq *APITokenQuery) QueryProject() *ProjectQuery { - query := (&ProjectClient{config: atq.config}).Query() +func (_q *APITokenQuery) QueryProject() *ProjectQuery { + query := (&ProjectClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := atq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := atq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -103,7 +103,7 @@ func (atq *APITokenQuery) QueryProject() *ProjectQuery { sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, apitoken.ProjectTable, apitoken.ProjectColumn), ) - fromU = sqlgraph.SetNeighbors(atq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -111,8 +111,8 @@ func (atq *APITokenQuery) QueryProject() *ProjectQuery { // First returns the first APIToken entity from the query. // Returns a *NotFoundError when no APIToken was found. -func (atq *APITokenQuery) First(ctx context.Context) (*APIToken, error) { - nodes, err := atq.Limit(1).All(setContextOp(ctx, atq.ctx, ent.OpQueryFirst)) +func (_q *APITokenQuery) First(ctx context.Context) (*APIToken, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -123,8 +123,8 @@ func (atq *APITokenQuery) First(ctx context.Context) (*APIToken, error) { } // FirstX is like First, but panics if an error occurs. -func (atq *APITokenQuery) FirstX(ctx context.Context) *APIToken { - node, err := atq.First(ctx) +func (_q *APITokenQuery) FirstX(ctx context.Context) *APIToken { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -133,9 +133,9 @@ func (atq *APITokenQuery) FirstX(ctx context.Context) *APIToken { // FirstID returns the first APIToken ID from the query. // Returns a *NotFoundError when no APIToken ID was found. -func (atq *APITokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *APITokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -146,8 +146,8 @@ func (atq *APITokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) } // FirstIDX is like FirstID, but panics if an error occurs. -func (atq *APITokenQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := atq.FirstID(ctx) +func (_q *APITokenQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -157,8 +157,8 @@ func (atq *APITokenQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single APIToken entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one APIToken entity is found. // Returns a *NotFoundError when no APIToken entities are found. -func (atq *APITokenQuery) Only(ctx context.Context) (*APIToken, error) { - nodes, err := atq.Limit(2).All(setContextOp(ctx, atq.ctx, ent.OpQueryOnly)) +func (_q *APITokenQuery) Only(ctx context.Context) (*APIToken, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -173,8 +173,8 @@ func (atq *APITokenQuery) Only(ctx context.Context) (*APIToken, error) { } // OnlyX is like Only, but panics if an error occurs. -func (atq *APITokenQuery) OnlyX(ctx context.Context) *APIToken { - node, err := atq.Only(ctx) +func (_q *APITokenQuery) OnlyX(ctx context.Context) *APIToken { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -184,9 +184,9 @@ func (atq *APITokenQuery) OnlyX(ctx context.Context) *APIToken { // OnlyID is like Only, but returns the only APIToken ID in the query. // Returns a *NotSingularError when more than one APIToken ID is found. // Returns a *NotFoundError when no entities are found. -func (atq *APITokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *APITokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -201,8 +201,8 @@ func (atq *APITokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (atq *APITokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := atq.OnlyID(ctx) +func (_q *APITokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -210,18 +210,18 @@ func (atq *APITokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of APITokens. -func (atq *APITokenQuery) All(ctx context.Context) ([]*APIToken, error) { - ctx = setContextOp(ctx, atq.ctx, ent.OpQueryAll) - if err := atq.prepareQuery(ctx); err != nil { +func (_q *APITokenQuery) All(ctx context.Context) ([]*APIToken, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*APIToken, *APITokenQuery]() - return withInterceptors[[]*APIToken](ctx, atq, qr, atq.inters) + return withInterceptors[[]*APIToken](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (atq *APITokenQuery) AllX(ctx context.Context) []*APIToken { - nodes, err := atq.All(ctx) +func (_q *APITokenQuery) AllX(ctx context.Context) []*APIToken { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -229,20 +229,20 @@ func (atq *APITokenQuery) AllX(ctx context.Context) []*APIToken { } // IDs executes the query and returns a list of APIToken IDs. -func (atq *APITokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if atq.ctx.Unique == nil && atq.path != nil { - atq.Unique(true) +func (_q *APITokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, atq.ctx, ent.OpQueryIDs) - if err = atq.Select(apitoken.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(apitoken.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (atq *APITokenQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := atq.IDs(ctx) +func (_q *APITokenQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -250,17 +250,17 @@ func (atq *APITokenQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (atq *APITokenQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, atq.ctx, ent.OpQueryCount) - if err := atq.prepareQuery(ctx); err != nil { +func (_q *APITokenQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, atq, querierCount[*APITokenQuery](), atq.inters) + return withInterceptors[int](ctx, _q, querierCount[*APITokenQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (atq *APITokenQuery) CountX(ctx context.Context) int { - count, err := atq.Count(ctx) +func (_q *APITokenQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -268,9 +268,9 @@ func (atq *APITokenQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (atq *APITokenQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, atq.ctx, ent.OpQueryExist) - switch _, err := atq.FirstID(ctx); { +func (_q *APITokenQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -281,8 +281,8 @@ func (atq *APITokenQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (atq *APITokenQuery) ExistX(ctx context.Context) bool { - exist, err := atq.Exist(ctx) +func (_q *APITokenQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -291,45 +291,45 @@ func (atq *APITokenQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the APITokenQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (atq *APITokenQuery) Clone() *APITokenQuery { - if atq == nil { +func (_q *APITokenQuery) Clone() *APITokenQuery { + if _q == nil { return nil } return &APITokenQuery{ - config: atq.config, - ctx: atq.ctx.Clone(), - order: append([]apitoken.OrderOption{}, atq.order...), - inters: append([]Interceptor{}, atq.inters...), - predicates: append([]predicate.APIToken{}, atq.predicates...), - withOrganization: atq.withOrganization.Clone(), - withProject: atq.withProject.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]apitoken.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.APIToken{}, _q.predicates...), + withOrganization: _q.withOrganization.Clone(), + withProject: _q.withProject.Clone(), // clone intermediate query. - sql: atq.sql.Clone(), - path: atq.path, - modifiers: append([]func(*sql.Selector){}, atq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (atq *APITokenQuery) WithOrganization(opts ...func(*OrganizationQuery)) *APITokenQuery { - query := (&OrganizationClient{config: atq.config}).Query() +func (_q *APITokenQuery) WithOrganization(opts ...func(*OrganizationQuery)) *APITokenQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - atq.withOrganization = query - return atq + _q.withOrganization = query + return _q } // WithProject tells the query-builder to eager-load the nodes that are connected to // the "project" edge. The optional arguments are used to configure the query builder of the edge. -func (atq *APITokenQuery) WithProject(opts ...func(*ProjectQuery)) *APITokenQuery { - query := (&ProjectClient{config: atq.config}).Query() +func (_q *APITokenQuery) WithProject(opts ...func(*ProjectQuery)) *APITokenQuery { + query := (&ProjectClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - atq.withProject = query - return atq + _q.withProject = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -346,10 +346,10 @@ func (atq *APITokenQuery) WithProject(opts ...func(*ProjectQuery)) *APITokenQuer // GroupBy(apitoken.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (atq *APITokenQuery) GroupBy(field string, fields ...string) *APITokenGroupBy { - atq.ctx.Fields = append([]string{field}, fields...) - grbuild := &APITokenGroupBy{build: atq} - grbuild.flds = &atq.ctx.Fields +func (_q *APITokenQuery) GroupBy(field string, fields ...string) *APITokenGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &APITokenGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = apitoken.Label grbuild.scan = grbuild.Scan return grbuild @@ -367,83 +367,83 @@ func (atq *APITokenQuery) GroupBy(field string, fields ...string) *APITokenGroup // client.APIToken.Query(). // Select(apitoken.FieldName). // Scan(ctx, &v) -func (atq *APITokenQuery) Select(fields ...string) *APITokenSelect { - atq.ctx.Fields = append(atq.ctx.Fields, fields...) - sbuild := &APITokenSelect{APITokenQuery: atq} +func (_q *APITokenQuery) Select(fields ...string) *APITokenSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &APITokenSelect{APITokenQuery: _q} sbuild.label = apitoken.Label - sbuild.flds, sbuild.scan = &atq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a APITokenSelect configured with the given aggregations. -func (atq *APITokenQuery) Aggregate(fns ...AggregateFunc) *APITokenSelect { - return atq.Select().Aggregate(fns...) +func (_q *APITokenQuery) Aggregate(fns ...AggregateFunc) *APITokenSelect { + return _q.Select().Aggregate(fns...) } -func (atq *APITokenQuery) prepareQuery(ctx context.Context) error { - for _, inter := range atq.inters { +func (_q *APITokenQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, atq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range atq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !apitoken.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if atq.path != nil { - prev, err := atq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - atq.sql = prev + _q.sql = prev } return nil } -func (atq *APITokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIToken, error) { +func (_q *APITokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIToken, error) { var ( nodes = []*APIToken{} - _spec = atq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - atq.withOrganization != nil, - atq.withProject != nil, + _q.withOrganization != nil, + _q.withProject != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*APIToken).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &APIToken{config: atq.config} + node := &APIToken{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(atq.modifiers) > 0 { - _spec.Modifiers = atq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, atq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := atq.withOrganization; query != nil { - if err := atq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *APIToken, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := atq.withProject; query != nil { - if err := atq.loadProject(ctx, query, nodes, nil, + if query := _q.withProject; query != nil { + if err := _q.loadProject(ctx, query, nodes, nil, func(n *APIToken, e *Project) { n.Edges.Project = e }); err != nil { return nil, err } @@ -451,7 +451,7 @@ func (atq *APITokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AP return nodes, nil } -func (atq *APITokenQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*APIToken, init func(*APIToken), assign func(*APIToken, *Organization)) error { +func (_q *APITokenQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*APIToken, init func(*APIToken), assign func(*APIToken, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*APIToken) for i := range nodes { @@ -480,7 +480,7 @@ func (atq *APITokenQuery) loadOrganization(ctx context.Context, query *Organizat } return nil } -func (atq *APITokenQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*APIToken, init func(*APIToken), assign func(*APIToken, *Project)) error { +func (_q *APITokenQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*APIToken, init func(*APIToken), assign func(*APIToken, *Project)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*APIToken) for i := range nodes { @@ -510,27 +510,27 @@ func (atq *APITokenQuery) loadProject(ctx context.Context, query *ProjectQuery, return nil } -func (atq *APITokenQuery) sqlCount(ctx context.Context) (int, error) { - _spec := atq.querySpec() - if len(atq.modifiers) > 0 { - _spec.Modifiers = atq.modifiers +func (_q *APITokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = atq.ctx.Fields - if len(atq.ctx.Fields) > 0 { - _spec.Unique = atq.ctx.Unique != nil && *atq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, atq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (atq *APITokenQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *APITokenQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(apitoken.Table, apitoken.Columns, sqlgraph.NewFieldSpec(apitoken.FieldID, field.TypeUUID)) - _spec.From = atq.sql - if unique := atq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if atq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := atq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, apitoken.FieldID) for i := range fields { @@ -538,27 +538,27 @@ func (atq *APITokenQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if atq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(apitoken.FieldOrganizationID) } - if atq.withProject != nil { + if _q.withProject != nil { _spec.Node.AddColumnOnce(apitoken.FieldProjectID) } } - if ps := atq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := atq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := atq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := atq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -568,36 +568,36 @@ func (atq *APITokenQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (atq *APITokenQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(atq.driver.Dialect()) +func (_q *APITokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(apitoken.Table) - columns := atq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = apitoken.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if atq.sql != nil { - selector = atq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if atq.ctx.Unique != nil && *atq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range atq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range atq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range atq.order { + for _, p := range _q.order { p(selector) } - if offset := atq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := atq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -606,33 +606,33 @@ func (atq *APITokenQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (atq *APITokenQuery) ForUpdate(opts ...sql.LockOption) *APITokenQuery { - if atq.driver.Dialect() == dialect.Postgres { - atq.Unique(false) +func (_q *APITokenQuery) ForUpdate(opts ...sql.LockOption) *APITokenQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - atq.modifiers = append(atq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return atq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (atq *APITokenQuery) ForShare(opts ...sql.LockOption) *APITokenQuery { - if atq.driver.Dialect() == dialect.Postgres { - atq.Unique(false) +func (_q *APITokenQuery) ForShare(opts ...sql.LockOption) *APITokenQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - atq.modifiers = append(atq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return atq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (atq *APITokenQuery) Modify(modifiers ...func(s *sql.Selector)) *APITokenSelect { - atq.modifiers = append(atq.modifiers, modifiers...) - return atq.Select() +func (_q *APITokenQuery) Modify(modifiers ...func(s *sql.Selector)) *APITokenSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // APITokenGroupBy is the group-by builder for APIToken entities. @@ -642,41 +642,41 @@ type APITokenGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (atgb *APITokenGroupBy) Aggregate(fns ...AggregateFunc) *APITokenGroupBy { - atgb.fns = append(atgb.fns, fns...) - return atgb +func (_g *APITokenGroupBy) Aggregate(fns ...AggregateFunc) *APITokenGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (atgb *APITokenGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, atgb.build.ctx, ent.OpQueryGroupBy) - if err := atgb.build.prepareQuery(ctx); err != nil { +func (_g *APITokenGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*APITokenQuery, *APITokenGroupBy](ctx, atgb.build, atgb, atgb.build.inters, v) + return scanWithInterceptors[*APITokenQuery, *APITokenGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (atgb *APITokenGroupBy) sqlScan(ctx context.Context, root *APITokenQuery, v any) error { +func (_g *APITokenGroupBy) sqlScan(ctx context.Context, root *APITokenQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(atgb.fns)) - for _, fn := range atgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*atgb.flds)+len(atgb.fns)) - for _, f := range *atgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*atgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := atgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -690,27 +690,27 @@ type APITokenSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ats *APITokenSelect) Aggregate(fns ...AggregateFunc) *APITokenSelect { - ats.fns = append(ats.fns, fns...) - return ats +func (_s *APITokenSelect) Aggregate(fns ...AggregateFunc) *APITokenSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ats *APITokenSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ats.ctx, ent.OpQuerySelect) - if err := ats.prepareQuery(ctx); err != nil { +func (_s *APITokenSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*APITokenQuery, *APITokenSelect](ctx, ats.APITokenQuery, ats, ats.inters, v) + return scanWithInterceptors[*APITokenQuery, *APITokenSelect](ctx, _s.APITokenQuery, _s, _s.inters, v) } -func (ats *APITokenSelect) sqlScan(ctx context.Context, root *APITokenQuery, v any) error { +func (_s *APITokenSelect) sqlScan(ctx context.Context, root *APITokenQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ats.fns)) - for _, fn := range ats.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ats.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -718,7 +718,7 @@ func (ats *APITokenSelect) sqlScan(ctx context.Context, root *APITokenQuery, v a } rows := &sql.Rows{} query, args := selector.Query() - if err := ats.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -726,7 +726,7 @@ func (ats *APITokenSelect) sqlScan(ctx context.Context, root *APITokenQuery, v a } // Modify adds a query modifier for attaching custom logic to queries. -func (ats *APITokenSelect) Modify(modifiers ...func(s *sql.Selector)) *APITokenSelect { - ats.modifiers = append(ats.modifiers, modifiers...) - return ats +func (_s *APITokenSelect) Modify(modifiers ...func(s *sql.Selector)) *APITokenSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/apitoken_update.go b/app/controlplane/pkg/data/ent/apitoken_update.go index 36065597f..f378f60b8 100644 --- a/app/controlplane/pkg/data/ent/apitoken_update.go +++ b/app/controlplane/pkg/data/ent/apitoken_update.go @@ -29,178 +29,178 @@ type APITokenUpdate struct { } // Where appends a list predicates to the APITokenUpdate builder. -func (atu *APITokenUpdate) Where(ps ...predicate.APIToken) *APITokenUpdate { - atu.mutation.Where(ps...) - return atu +func (_u *APITokenUpdate) Where(ps ...predicate.APIToken) *APITokenUpdate { + _u.mutation.Where(ps...) + return _u } // SetDescription sets the "description" field. -func (atu *APITokenUpdate) SetDescription(s string) *APITokenUpdate { - atu.mutation.SetDescription(s) - return atu +func (_u *APITokenUpdate) SetDescription(v string) *APITokenUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableDescription(s *string) *APITokenUpdate { - if s != nil { - atu.SetDescription(*s) +func (_u *APITokenUpdate) SetNillableDescription(v *string) *APITokenUpdate { + if v != nil { + _u.SetDescription(*v) } - return atu + return _u } // ClearDescription clears the value of the "description" field. -func (atu *APITokenUpdate) ClearDescription() *APITokenUpdate { - atu.mutation.ClearDescription() - return atu +func (_u *APITokenUpdate) ClearDescription() *APITokenUpdate { + _u.mutation.ClearDescription() + return _u } // SetExpiresAt sets the "expires_at" field. -func (atu *APITokenUpdate) SetExpiresAt(t time.Time) *APITokenUpdate { - atu.mutation.SetExpiresAt(t) - return atu +func (_u *APITokenUpdate) SetExpiresAt(v time.Time) *APITokenUpdate { + _u.mutation.SetExpiresAt(v) + return _u } // SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableExpiresAt(t *time.Time) *APITokenUpdate { - if t != nil { - atu.SetExpiresAt(*t) +func (_u *APITokenUpdate) SetNillableExpiresAt(v *time.Time) *APITokenUpdate { + if v != nil { + _u.SetExpiresAt(*v) } - return atu + return _u } // ClearExpiresAt clears the value of the "expires_at" field. -func (atu *APITokenUpdate) ClearExpiresAt() *APITokenUpdate { - atu.mutation.ClearExpiresAt() - return atu +func (_u *APITokenUpdate) ClearExpiresAt() *APITokenUpdate { + _u.mutation.ClearExpiresAt() + return _u } // SetRevokedAt sets the "revoked_at" field. -func (atu *APITokenUpdate) SetRevokedAt(t time.Time) *APITokenUpdate { - atu.mutation.SetRevokedAt(t) - return atu +func (_u *APITokenUpdate) SetRevokedAt(v time.Time) *APITokenUpdate { + _u.mutation.SetRevokedAt(v) + return _u } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableRevokedAt(t *time.Time) *APITokenUpdate { - if t != nil { - atu.SetRevokedAt(*t) +func (_u *APITokenUpdate) SetNillableRevokedAt(v *time.Time) *APITokenUpdate { + if v != nil { + _u.SetRevokedAt(*v) } - return atu + return _u } // ClearRevokedAt clears the value of the "revoked_at" field. -func (atu *APITokenUpdate) ClearRevokedAt() *APITokenUpdate { - atu.mutation.ClearRevokedAt() - return atu +func (_u *APITokenUpdate) ClearRevokedAt() *APITokenUpdate { + _u.mutation.ClearRevokedAt() + return _u } // SetLastUsedAt sets the "last_used_at" field. -func (atu *APITokenUpdate) SetLastUsedAt(t time.Time) *APITokenUpdate { - atu.mutation.SetLastUsedAt(t) - return atu +func (_u *APITokenUpdate) SetLastUsedAt(v time.Time) *APITokenUpdate { + _u.mutation.SetLastUsedAt(v) + return _u } // SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableLastUsedAt(t *time.Time) *APITokenUpdate { - if t != nil { - atu.SetLastUsedAt(*t) +func (_u *APITokenUpdate) SetNillableLastUsedAt(v *time.Time) *APITokenUpdate { + if v != nil { + _u.SetLastUsedAt(*v) } - return atu + return _u } // ClearLastUsedAt clears the value of the "last_used_at" field. -func (atu *APITokenUpdate) ClearLastUsedAt() *APITokenUpdate { - atu.mutation.ClearLastUsedAt() - return atu +func (_u *APITokenUpdate) ClearLastUsedAt() *APITokenUpdate { + _u.mutation.ClearLastUsedAt() + return _u } // SetOrganizationID sets the "organization_id" field. -func (atu *APITokenUpdate) SetOrganizationID(u uuid.UUID) *APITokenUpdate { - atu.mutation.SetOrganizationID(u) - return atu +func (_u *APITokenUpdate) SetOrganizationID(v uuid.UUID) *APITokenUpdate { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableOrganizationID(u *uuid.UUID) *APITokenUpdate { - if u != nil { - atu.SetOrganizationID(*u) +func (_u *APITokenUpdate) SetNillableOrganizationID(v *uuid.UUID) *APITokenUpdate { + if v != nil { + _u.SetOrganizationID(*v) } - return atu + return _u } // SetProjectID sets the "project_id" field. -func (atu *APITokenUpdate) SetProjectID(u uuid.UUID) *APITokenUpdate { - atu.mutation.SetProjectID(u) - return atu +func (_u *APITokenUpdate) SetProjectID(v uuid.UUID) *APITokenUpdate { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (atu *APITokenUpdate) SetNillableProjectID(u *uuid.UUID) *APITokenUpdate { - if u != nil { - atu.SetProjectID(*u) +func (_u *APITokenUpdate) SetNillableProjectID(v *uuid.UUID) *APITokenUpdate { + if v != nil { + _u.SetProjectID(*v) } - return atu + return _u } // ClearProjectID clears the value of the "project_id" field. -func (atu *APITokenUpdate) ClearProjectID() *APITokenUpdate { - atu.mutation.ClearProjectID() - return atu +func (_u *APITokenUpdate) ClearProjectID() *APITokenUpdate { + _u.mutation.ClearProjectID() + return _u } // SetPolicies sets the "policies" field. -func (atu *APITokenUpdate) SetPolicies(a []*authz.Policy) *APITokenUpdate { - atu.mutation.SetPolicies(a) - return atu +func (_u *APITokenUpdate) SetPolicies(v []*authz.Policy) *APITokenUpdate { + _u.mutation.SetPolicies(v) + return _u } -// AppendPolicies appends a to the "policies" field. -func (atu *APITokenUpdate) AppendPolicies(a []*authz.Policy) *APITokenUpdate { - atu.mutation.AppendPolicies(a) - return atu +// AppendPolicies appends value to the "policies" field. +func (_u *APITokenUpdate) AppendPolicies(v []*authz.Policy) *APITokenUpdate { + _u.mutation.AppendPolicies(v) + return _u } // ClearPolicies clears the value of the "policies" field. -func (atu *APITokenUpdate) ClearPolicies() *APITokenUpdate { - atu.mutation.ClearPolicies() - return atu +func (_u *APITokenUpdate) ClearPolicies() *APITokenUpdate { + _u.mutation.ClearPolicies() + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (atu *APITokenUpdate) SetOrganization(o *Organization) *APITokenUpdate { - return atu.SetOrganizationID(o.ID) +func (_u *APITokenUpdate) SetOrganization(v *Organization) *APITokenUpdate { + return _u.SetOrganizationID(v.ID) } // SetProject sets the "project" edge to the Project entity. -func (atu *APITokenUpdate) SetProject(p *Project) *APITokenUpdate { - return atu.SetProjectID(p.ID) +func (_u *APITokenUpdate) SetProject(v *Project) *APITokenUpdate { + return _u.SetProjectID(v.ID) } // Mutation returns the APITokenMutation object of the builder. -func (atu *APITokenUpdate) Mutation() *APITokenMutation { - return atu.mutation +func (_u *APITokenUpdate) Mutation() *APITokenMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (atu *APITokenUpdate) ClearOrganization() *APITokenUpdate { - atu.mutation.ClearOrganization() - return atu +func (_u *APITokenUpdate) ClearOrganization() *APITokenUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearProject clears the "project" edge to the Project entity. -func (atu *APITokenUpdate) ClearProject() *APITokenUpdate { - atu.mutation.ClearProject() - return atu +func (_u *APITokenUpdate) ClearProject() *APITokenUpdate { + _u.mutation.ClearProject() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (atu *APITokenUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks) +func (_u *APITokenUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (atu *APITokenUpdate) SaveX(ctx context.Context) int { - affected, err := atu.Save(ctx) +func (_u *APITokenUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -208,80 +208,80 @@ func (atu *APITokenUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (atu *APITokenUpdate) Exec(ctx context.Context) error { - _, err := atu.Save(ctx) +func (_u *APITokenUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (atu *APITokenUpdate) ExecX(ctx context.Context) { - if err := atu.Exec(ctx); err != nil { +func (_u *APITokenUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (atu *APITokenUpdate) check() error { - if atu.mutation.OrganizationCleared() && len(atu.mutation.OrganizationIDs()) > 0 { +func (_u *APITokenUpdate) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "APIToken.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (atu *APITokenUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *APITokenUpdate { - atu.modifiers = append(atu.modifiers, modifiers...) - return atu +func (_u *APITokenUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *APITokenUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := atu.check(); err != nil { - return n, err +func (_u *APITokenUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(apitoken.Table, apitoken.Columns, sqlgraph.NewFieldSpec(apitoken.FieldID, field.TypeUUID)) - if ps := atu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := atu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(apitoken.FieldDescription, field.TypeString, value) } - if atu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(apitoken.FieldDescription, field.TypeString) } - if value, ok := atu.mutation.ExpiresAt(); ok { + if value, ok := _u.mutation.ExpiresAt(); ok { _spec.SetField(apitoken.FieldExpiresAt, field.TypeTime, value) } - if atu.mutation.ExpiresAtCleared() { + if _u.mutation.ExpiresAtCleared() { _spec.ClearField(apitoken.FieldExpiresAt, field.TypeTime) } - if value, ok := atu.mutation.RevokedAt(); ok { + if value, ok := _u.mutation.RevokedAt(); ok { _spec.SetField(apitoken.FieldRevokedAt, field.TypeTime, value) } - if atu.mutation.RevokedAtCleared() { + if _u.mutation.RevokedAtCleared() { _spec.ClearField(apitoken.FieldRevokedAt, field.TypeTime) } - if value, ok := atu.mutation.LastUsedAt(); ok { + if value, ok := _u.mutation.LastUsedAt(); ok { _spec.SetField(apitoken.FieldLastUsedAt, field.TypeTime, value) } - if atu.mutation.LastUsedAtCleared() { + if _u.mutation.LastUsedAtCleared() { _spec.ClearField(apitoken.FieldLastUsedAt, field.TypeTime) } - if value, ok := atu.mutation.Policies(); ok { + if value, ok := _u.mutation.Policies(); ok { _spec.SetField(apitoken.FieldPolicies, field.TypeJSON, value) } - if value, ok := atu.mutation.AppendedPolicies(); ok { + if value, ok := _u.mutation.AppendedPolicies(); ok { _spec.AddModifier(func(u *sql.UpdateBuilder) { sqljson.Append(u, apitoken.FieldPolicies, value) }) } - if atu.mutation.PoliciesCleared() { + if _u.mutation.PoliciesCleared() { _spec.ClearField(apitoken.FieldPolicies, field.TypeJSON) } - if atu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -294,7 +294,7 @@ func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := atu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -310,7 +310,7 @@ func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if atu.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -323,7 +323,7 @@ func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := atu.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -339,8 +339,8 @@ func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(atu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, atu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{apitoken.Label} } else if sqlgraph.IsConstraintError(err) { @@ -348,8 +348,8 @@ func (atu *APITokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - atu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // APITokenUpdateOne is the builder for updating a single APIToken entity. @@ -362,185 +362,185 @@ type APITokenUpdateOne struct { } // SetDescription sets the "description" field. -func (atuo *APITokenUpdateOne) SetDescription(s string) *APITokenUpdateOne { - atuo.mutation.SetDescription(s) - return atuo +func (_u *APITokenUpdateOne) SetDescription(v string) *APITokenUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableDescription(s *string) *APITokenUpdateOne { - if s != nil { - atuo.SetDescription(*s) +func (_u *APITokenUpdateOne) SetNillableDescription(v *string) *APITokenUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return atuo + return _u } // ClearDescription clears the value of the "description" field. -func (atuo *APITokenUpdateOne) ClearDescription() *APITokenUpdateOne { - atuo.mutation.ClearDescription() - return atuo +func (_u *APITokenUpdateOne) ClearDescription() *APITokenUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetExpiresAt sets the "expires_at" field. -func (atuo *APITokenUpdateOne) SetExpiresAt(t time.Time) *APITokenUpdateOne { - atuo.mutation.SetExpiresAt(t) - return atuo +func (_u *APITokenUpdateOne) SetExpiresAt(v time.Time) *APITokenUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u } // SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableExpiresAt(t *time.Time) *APITokenUpdateOne { - if t != nil { - atuo.SetExpiresAt(*t) +func (_u *APITokenUpdateOne) SetNillableExpiresAt(v *time.Time) *APITokenUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) } - return atuo + return _u } // ClearExpiresAt clears the value of the "expires_at" field. -func (atuo *APITokenUpdateOne) ClearExpiresAt() *APITokenUpdateOne { - atuo.mutation.ClearExpiresAt() - return atuo +func (_u *APITokenUpdateOne) ClearExpiresAt() *APITokenUpdateOne { + _u.mutation.ClearExpiresAt() + return _u } // SetRevokedAt sets the "revoked_at" field. -func (atuo *APITokenUpdateOne) SetRevokedAt(t time.Time) *APITokenUpdateOne { - atuo.mutation.SetRevokedAt(t) - return atuo +func (_u *APITokenUpdateOne) SetRevokedAt(v time.Time) *APITokenUpdateOne { + _u.mutation.SetRevokedAt(v) + return _u } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableRevokedAt(t *time.Time) *APITokenUpdateOne { - if t != nil { - atuo.SetRevokedAt(*t) +func (_u *APITokenUpdateOne) SetNillableRevokedAt(v *time.Time) *APITokenUpdateOne { + if v != nil { + _u.SetRevokedAt(*v) } - return atuo + return _u } // ClearRevokedAt clears the value of the "revoked_at" field. -func (atuo *APITokenUpdateOne) ClearRevokedAt() *APITokenUpdateOne { - atuo.mutation.ClearRevokedAt() - return atuo +func (_u *APITokenUpdateOne) ClearRevokedAt() *APITokenUpdateOne { + _u.mutation.ClearRevokedAt() + return _u } // SetLastUsedAt sets the "last_used_at" field. -func (atuo *APITokenUpdateOne) SetLastUsedAt(t time.Time) *APITokenUpdateOne { - atuo.mutation.SetLastUsedAt(t) - return atuo +func (_u *APITokenUpdateOne) SetLastUsedAt(v time.Time) *APITokenUpdateOne { + _u.mutation.SetLastUsedAt(v) + return _u } // SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableLastUsedAt(t *time.Time) *APITokenUpdateOne { - if t != nil { - atuo.SetLastUsedAt(*t) +func (_u *APITokenUpdateOne) SetNillableLastUsedAt(v *time.Time) *APITokenUpdateOne { + if v != nil { + _u.SetLastUsedAt(*v) } - return atuo + return _u } // ClearLastUsedAt clears the value of the "last_used_at" field. -func (atuo *APITokenUpdateOne) ClearLastUsedAt() *APITokenUpdateOne { - atuo.mutation.ClearLastUsedAt() - return atuo +func (_u *APITokenUpdateOne) ClearLastUsedAt() *APITokenUpdateOne { + _u.mutation.ClearLastUsedAt() + return _u } // SetOrganizationID sets the "organization_id" field. -func (atuo *APITokenUpdateOne) SetOrganizationID(u uuid.UUID) *APITokenUpdateOne { - atuo.mutation.SetOrganizationID(u) - return atuo +func (_u *APITokenUpdateOne) SetOrganizationID(v uuid.UUID) *APITokenUpdateOne { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableOrganizationID(u *uuid.UUID) *APITokenUpdateOne { - if u != nil { - atuo.SetOrganizationID(*u) +func (_u *APITokenUpdateOne) SetNillableOrganizationID(v *uuid.UUID) *APITokenUpdateOne { + if v != nil { + _u.SetOrganizationID(*v) } - return atuo + return _u } // SetProjectID sets the "project_id" field. -func (atuo *APITokenUpdateOne) SetProjectID(u uuid.UUID) *APITokenUpdateOne { - atuo.mutation.SetProjectID(u) - return atuo +func (_u *APITokenUpdateOne) SetProjectID(v uuid.UUID) *APITokenUpdateOne { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (atuo *APITokenUpdateOne) SetNillableProjectID(u *uuid.UUID) *APITokenUpdateOne { - if u != nil { - atuo.SetProjectID(*u) +func (_u *APITokenUpdateOne) SetNillableProjectID(v *uuid.UUID) *APITokenUpdateOne { + if v != nil { + _u.SetProjectID(*v) } - return atuo + return _u } // ClearProjectID clears the value of the "project_id" field. -func (atuo *APITokenUpdateOne) ClearProjectID() *APITokenUpdateOne { - atuo.mutation.ClearProjectID() - return atuo +func (_u *APITokenUpdateOne) ClearProjectID() *APITokenUpdateOne { + _u.mutation.ClearProjectID() + return _u } // SetPolicies sets the "policies" field. -func (atuo *APITokenUpdateOne) SetPolicies(a []*authz.Policy) *APITokenUpdateOne { - atuo.mutation.SetPolicies(a) - return atuo +func (_u *APITokenUpdateOne) SetPolicies(v []*authz.Policy) *APITokenUpdateOne { + _u.mutation.SetPolicies(v) + return _u } -// AppendPolicies appends a to the "policies" field. -func (atuo *APITokenUpdateOne) AppendPolicies(a []*authz.Policy) *APITokenUpdateOne { - atuo.mutation.AppendPolicies(a) - return atuo +// AppendPolicies appends value to the "policies" field. +func (_u *APITokenUpdateOne) AppendPolicies(v []*authz.Policy) *APITokenUpdateOne { + _u.mutation.AppendPolicies(v) + return _u } // ClearPolicies clears the value of the "policies" field. -func (atuo *APITokenUpdateOne) ClearPolicies() *APITokenUpdateOne { - atuo.mutation.ClearPolicies() - return atuo +func (_u *APITokenUpdateOne) ClearPolicies() *APITokenUpdateOne { + _u.mutation.ClearPolicies() + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (atuo *APITokenUpdateOne) SetOrganization(o *Organization) *APITokenUpdateOne { - return atuo.SetOrganizationID(o.ID) +func (_u *APITokenUpdateOne) SetOrganization(v *Organization) *APITokenUpdateOne { + return _u.SetOrganizationID(v.ID) } // SetProject sets the "project" edge to the Project entity. -func (atuo *APITokenUpdateOne) SetProject(p *Project) *APITokenUpdateOne { - return atuo.SetProjectID(p.ID) +func (_u *APITokenUpdateOne) SetProject(v *Project) *APITokenUpdateOne { + return _u.SetProjectID(v.ID) } // Mutation returns the APITokenMutation object of the builder. -func (atuo *APITokenUpdateOne) Mutation() *APITokenMutation { - return atuo.mutation +func (_u *APITokenUpdateOne) Mutation() *APITokenMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (atuo *APITokenUpdateOne) ClearOrganization() *APITokenUpdateOne { - atuo.mutation.ClearOrganization() - return atuo +func (_u *APITokenUpdateOne) ClearOrganization() *APITokenUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearProject clears the "project" edge to the Project entity. -func (atuo *APITokenUpdateOne) ClearProject() *APITokenUpdateOne { - atuo.mutation.ClearProject() - return atuo +func (_u *APITokenUpdateOne) ClearProject() *APITokenUpdateOne { + _u.mutation.ClearProject() + return _u } // Where appends a list predicates to the APITokenUpdate builder. -func (atuo *APITokenUpdateOne) Where(ps ...predicate.APIToken) *APITokenUpdateOne { - atuo.mutation.Where(ps...) - return atuo +func (_u *APITokenUpdateOne) Where(ps ...predicate.APIToken) *APITokenUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (atuo *APITokenUpdateOne) Select(field string, fields ...string) *APITokenUpdateOne { - atuo.fields = append([]string{field}, fields...) - return atuo +func (_u *APITokenUpdateOne) Select(field string, fields ...string) *APITokenUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated APIToken entity. -func (atuo *APITokenUpdateOne) Save(ctx context.Context) (*APIToken, error) { - return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks) +func (_u *APITokenUpdateOne) Save(ctx context.Context) (*APIToken, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (atuo *APITokenUpdateOne) SaveX(ctx context.Context) *APIToken { - node, err := atuo.Save(ctx) +func (_u *APITokenUpdateOne) SaveX(ctx context.Context) *APIToken { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -548,43 +548,43 @@ func (atuo *APITokenUpdateOne) SaveX(ctx context.Context) *APIToken { } // Exec executes the query on the entity. -func (atuo *APITokenUpdateOne) Exec(ctx context.Context) error { - _, err := atuo.Save(ctx) +func (_u *APITokenUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (atuo *APITokenUpdateOne) ExecX(ctx context.Context) { - if err := atuo.Exec(ctx); err != nil { +func (_u *APITokenUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (atuo *APITokenUpdateOne) check() error { - if atuo.mutation.OrganizationCleared() && len(atuo.mutation.OrganizationIDs()) > 0 { +func (_u *APITokenUpdateOne) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "APIToken.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (atuo *APITokenUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *APITokenUpdateOne { - atuo.modifiers = append(atuo.modifiers, modifiers...) - return atuo +func (_u *APITokenUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *APITokenUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, err error) { - if err := atuo.check(); err != nil { +func (_u *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(apitoken.Table, apitoken.Columns, sqlgraph.NewFieldSpec(apitoken.FieldID, field.TypeUUID)) - id, ok := atuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "APIToken.id" for update`)} } _spec.Node.ID.Value = id - if fields := atuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, apitoken.FieldID) for _, f := range fields { @@ -596,49 +596,49 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } } } - if ps := atuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := atuo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(apitoken.FieldDescription, field.TypeString, value) } - if atuo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(apitoken.FieldDescription, field.TypeString) } - if value, ok := atuo.mutation.ExpiresAt(); ok { + if value, ok := _u.mutation.ExpiresAt(); ok { _spec.SetField(apitoken.FieldExpiresAt, field.TypeTime, value) } - if atuo.mutation.ExpiresAtCleared() { + if _u.mutation.ExpiresAtCleared() { _spec.ClearField(apitoken.FieldExpiresAt, field.TypeTime) } - if value, ok := atuo.mutation.RevokedAt(); ok { + if value, ok := _u.mutation.RevokedAt(); ok { _spec.SetField(apitoken.FieldRevokedAt, field.TypeTime, value) } - if atuo.mutation.RevokedAtCleared() { + if _u.mutation.RevokedAtCleared() { _spec.ClearField(apitoken.FieldRevokedAt, field.TypeTime) } - if value, ok := atuo.mutation.LastUsedAt(); ok { + if value, ok := _u.mutation.LastUsedAt(); ok { _spec.SetField(apitoken.FieldLastUsedAt, field.TypeTime, value) } - if atuo.mutation.LastUsedAtCleared() { + if _u.mutation.LastUsedAtCleared() { _spec.ClearField(apitoken.FieldLastUsedAt, field.TypeTime) } - if value, ok := atuo.mutation.Policies(); ok { + if value, ok := _u.mutation.Policies(); ok { _spec.SetField(apitoken.FieldPolicies, field.TypeJSON, value) } - if value, ok := atuo.mutation.AppendedPolicies(); ok { + if value, ok := _u.mutation.AppendedPolicies(); ok { _spec.AddModifier(func(u *sql.UpdateBuilder) { sqljson.Append(u, apitoken.FieldPolicies, value) }) } - if atuo.mutation.PoliciesCleared() { + if _u.mutation.PoliciesCleared() { _spec.ClearField(apitoken.FieldPolicies, field.TypeJSON) } - if atuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -651,7 +651,7 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := atuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -667,7 +667,7 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if atuo.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -680,7 +680,7 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := atuo.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -696,11 +696,11 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(atuo.modifiers...) - _node = &APIToken{config: atuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &APIToken{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, atuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{apitoken.Label} } else if sqlgraph.IsConstraintError(err) { @@ -708,6 +708,6 @@ func (atuo *APITokenUpdateOne) sqlSave(ctx context.Context) (_node *APIToken, er } return nil, err } - atuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/attestation.go b/app/controlplane/pkg/data/ent/attestation.go index 79b23558f..08e5dbc55 100644 --- a/app/controlplane/pkg/data/ent/attestation.go +++ b/app/controlplane/pkg/data/ent/attestation.go @@ -71,7 +71,7 @@ func (*Attestation) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Attestation fields. -func (a *Attestation) assignValues(columns []string, values []any) error { +func (_m *Attestation) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -81,28 +81,28 @@ func (a *Attestation) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - a.ID = *value + _m.ID = *value } case attestation.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - a.CreatedAt = value.Time + _m.CreatedAt = value.Time } case attestation.FieldBundle: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field bundle", values[i]) } else if value != nil { - a.Bundle = *value + _m.Bundle = *value } case attestation.FieldWorkflowrunID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field workflowrun_id", values[i]) } else if value != nil { - a.WorkflowrunID = *value + _m.WorkflowrunID = *value } default: - a.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -110,46 +110,46 @@ func (a *Attestation) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Attestation. // This includes values selected through modifiers, order, etc. -func (a *Attestation) Value(name string) (ent.Value, error) { - return a.selectValues.Get(name) +func (_m *Attestation) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryWorkflowrun queries the "workflowrun" edge of the Attestation entity. -func (a *Attestation) QueryWorkflowrun() *WorkflowRunQuery { - return NewAttestationClient(a.config).QueryWorkflowrun(a) +func (_m *Attestation) QueryWorkflowrun() *WorkflowRunQuery { + return NewAttestationClient(_m.config).QueryWorkflowrun(_m) } // Update returns a builder for updating this Attestation. // Note that you need to call Attestation.Unwrap() before calling this method if this Attestation // was returned from a transaction, and the transaction was committed or rolled back. -func (a *Attestation) Update() *AttestationUpdateOne { - return NewAttestationClient(a.config).UpdateOne(a) +func (_m *Attestation) Update() *AttestationUpdateOne { + return NewAttestationClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Attestation entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (a *Attestation) Unwrap() *Attestation { - _tx, ok := a.config.driver.(*txDriver) +func (_m *Attestation) Unwrap() *Attestation { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Attestation is not a transactional entity") } - a.config.driver = _tx.drv - return a + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (a *Attestation) String() string { +func (_m *Attestation) String() string { var builder strings.Builder builder.WriteString("Attestation(") - builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("created_at=") - builder.WriteString(a.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("bundle=") - builder.WriteString(fmt.Sprintf("%v", a.Bundle)) + builder.WriteString(fmt.Sprintf("%v", _m.Bundle)) builder.WriteString(", ") builder.WriteString("workflowrun_id=") - builder.WriteString(fmt.Sprintf("%v", a.WorkflowrunID)) + builder.WriteString(fmt.Sprintf("%v", _m.WorkflowrunID)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/attestation_create.go b/app/controlplane/pkg/data/ent/attestation_create.go index 300171d11..5e077c8eb 100644 --- a/app/controlplane/pkg/data/ent/attestation_create.go +++ b/app/controlplane/pkg/data/ent/attestation_create.go @@ -26,64 +26,64 @@ type AttestationCreate struct { } // SetCreatedAt sets the "created_at" field. -func (ac *AttestationCreate) SetCreatedAt(t time.Time) *AttestationCreate { - ac.mutation.SetCreatedAt(t) - return ac +func (_c *AttestationCreate) SetCreatedAt(v time.Time) *AttestationCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (ac *AttestationCreate) SetNillableCreatedAt(t *time.Time) *AttestationCreate { - if t != nil { - ac.SetCreatedAt(*t) +func (_c *AttestationCreate) SetNillableCreatedAt(v *time.Time) *AttestationCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return ac + return _c } // SetBundle sets the "bundle" field. -func (ac *AttestationCreate) SetBundle(b []byte) *AttestationCreate { - ac.mutation.SetBundle(b) - return ac +func (_c *AttestationCreate) SetBundle(v []byte) *AttestationCreate { + _c.mutation.SetBundle(v) + return _c } // SetWorkflowrunID sets the "workflowrun_id" field. -func (ac *AttestationCreate) SetWorkflowrunID(u uuid.UUID) *AttestationCreate { - ac.mutation.SetWorkflowrunID(u) - return ac +func (_c *AttestationCreate) SetWorkflowrunID(v uuid.UUID) *AttestationCreate { + _c.mutation.SetWorkflowrunID(v) + return _c } // SetID sets the "id" field. -func (ac *AttestationCreate) SetID(u uuid.UUID) *AttestationCreate { - ac.mutation.SetID(u) - return ac +func (_c *AttestationCreate) SetID(v uuid.UUID) *AttestationCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (ac *AttestationCreate) SetNillableID(u *uuid.UUID) *AttestationCreate { - if u != nil { - ac.SetID(*u) +func (_c *AttestationCreate) SetNillableID(v *uuid.UUID) *AttestationCreate { + if v != nil { + _c.SetID(*v) } - return ac + return _c } // SetWorkflowrun sets the "workflowrun" edge to the WorkflowRun entity. -func (ac *AttestationCreate) SetWorkflowrun(w *WorkflowRun) *AttestationCreate { - return ac.SetWorkflowrunID(w.ID) +func (_c *AttestationCreate) SetWorkflowrun(v *WorkflowRun) *AttestationCreate { + return _c.SetWorkflowrunID(v.ID) } // Mutation returns the AttestationMutation object of the builder. -func (ac *AttestationCreate) Mutation() *AttestationMutation { - return ac.mutation +func (_c *AttestationCreate) Mutation() *AttestationMutation { + return _c.mutation } // Save creates the Attestation in the database. -func (ac *AttestationCreate) Save(ctx context.Context) (*Attestation, error) { - ac.defaults() - return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) +func (_c *AttestationCreate) Save(ctx context.Context) (*Attestation, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (ac *AttestationCreate) SaveX(ctx context.Context) *Attestation { - v, err := ac.Save(ctx) +func (_c *AttestationCreate) SaveX(ctx context.Context) *Attestation { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -91,58 +91,58 @@ func (ac *AttestationCreate) SaveX(ctx context.Context) *Attestation { } // Exec executes the query. -func (ac *AttestationCreate) Exec(ctx context.Context) error { - _, err := ac.Save(ctx) +func (_c *AttestationCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ac *AttestationCreate) ExecX(ctx context.Context) { - if err := ac.Exec(ctx); err != nil { +func (_c *AttestationCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (ac *AttestationCreate) defaults() { - if _, ok := ac.mutation.CreatedAt(); !ok { +func (_c *AttestationCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := attestation.DefaultCreatedAt() - ac.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := ac.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := attestation.DefaultID() - ac.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (ac *AttestationCreate) check() error { - if _, ok := ac.mutation.CreatedAt(); !ok { +func (_c *AttestationCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Attestation.created_at"`)} } - if _, ok := ac.mutation.Bundle(); !ok { + if _, ok := _c.mutation.Bundle(); !ok { return &ValidationError{Name: "bundle", err: errors.New(`ent: missing required field "Attestation.bundle"`)} } - if v, ok := ac.mutation.Bundle(); ok { + if v, ok := _c.mutation.Bundle(); ok { if err := attestation.BundleValidator(v); err != nil { return &ValidationError{Name: "bundle", err: fmt.Errorf(`ent: validator failed for field "Attestation.bundle": %w`, err)} } } - if _, ok := ac.mutation.WorkflowrunID(); !ok { + if _, ok := _c.mutation.WorkflowrunID(); !ok { return &ValidationError{Name: "workflowrun_id", err: errors.New(`ent: missing required field "Attestation.workflowrun_id"`)} } - if len(ac.mutation.WorkflowrunIDs()) == 0 { + if len(_c.mutation.WorkflowrunIDs()) == 0 { return &ValidationError{Name: "workflowrun", err: errors.New(`ent: missing required edge "Attestation.workflowrun"`)} } return nil } -func (ac *AttestationCreate) sqlSave(ctx context.Context) (*Attestation, error) { - if err := ac.check(); err != nil { +func (_c *AttestationCreate) sqlSave(ctx context.Context) (*Attestation, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := ac.createSpec() - if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -155,30 +155,30 @@ func (ac *AttestationCreate) sqlSave(ctx context.Context) (*Attestation, error) return nil, err } } - ac.mutation.id = &_node.ID - ac.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (ac *AttestationCreate) createSpec() (*Attestation, *sqlgraph.CreateSpec) { +func (_c *AttestationCreate) createSpec() (*Attestation, *sqlgraph.CreateSpec) { var ( - _node = &Attestation{config: ac.config} + _node = &Attestation{config: _c.config} _spec = sqlgraph.NewCreateSpec(attestation.Table, sqlgraph.NewFieldSpec(attestation.FieldID, field.TypeUUID)) ) - _spec.OnConflict = ac.conflict - if id, ok := ac.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := ac.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(attestation.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := ac.mutation.Bundle(); ok { + if value, ok := _c.mutation.Bundle(); ok { _spec.SetField(attestation.FieldBundle, field.TypeBytes, value) _node.Bundle = value } - if nodes := ac.mutation.WorkflowrunIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowrunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: true, @@ -214,10 +214,10 @@ func (ac *AttestationCreate) createSpec() (*Attestation, *sqlgraph.CreateSpec) { // SetCreatedAt(v+v). // }). // Exec(ctx) -func (ac *AttestationCreate) OnConflict(opts ...sql.ConflictOption) *AttestationUpsertOne { - ac.conflict = opts +func (_c *AttestationCreate) OnConflict(opts ...sql.ConflictOption) *AttestationUpsertOne { + _c.conflict = opts return &AttestationUpsertOne{ - create: ac, + create: _c, } } @@ -227,10 +227,10 @@ func (ac *AttestationCreate) OnConflict(opts ...sql.ConflictOption) *Attestation // client.Attestation.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (ac *AttestationCreate) OnConflictColumns(columns ...string) *AttestationUpsertOne { - ac.conflict = append(ac.conflict, sql.ConflictColumns(columns...)) +func (_c *AttestationCreate) OnConflictColumns(columns ...string) *AttestationUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &AttestationUpsertOne{ - create: ac, + create: _c, } } @@ -351,16 +351,16 @@ type AttestationCreateBulk struct { } // Save creates the Attestation entities in the database. -func (acb *AttestationCreateBulk) Save(ctx context.Context) ([]*Attestation, error) { - if acb.err != nil { - return nil, acb.err - } - specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) - nodes := make([]*Attestation, len(acb.builders)) - mutators := make([]Mutator, len(acb.builders)) - for i := range acb.builders { +func (_c *AttestationCreateBulk) Save(ctx context.Context) ([]*Attestation, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Attestation, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := acb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*AttestationMutation) @@ -374,12 +374,12 @@ func (acb *AttestationCreateBulk) Save(ctx context.Context) ([]*Attestation, err var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = acb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -399,7 +399,7 @@ func (acb *AttestationCreateBulk) Save(ctx context.Context) ([]*Attestation, err }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -407,8 +407,8 @@ func (acb *AttestationCreateBulk) Save(ctx context.Context) ([]*Attestation, err } // SaveX is like Save, but panics if an error occurs. -func (acb *AttestationCreateBulk) SaveX(ctx context.Context) []*Attestation { - v, err := acb.Save(ctx) +func (_c *AttestationCreateBulk) SaveX(ctx context.Context) []*Attestation { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -416,14 +416,14 @@ func (acb *AttestationCreateBulk) SaveX(ctx context.Context) []*Attestation { } // Exec executes the query. -func (acb *AttestationCreateBulk) Exec(ctx context.Context) error { - _, err := acb.Save(ctx) +func (_c *AttestationCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (acb *AttestationCreateBulk) ExecX(ctx context.Context) { - if err := acb.Exec(ctx); err != nil { +func (_c *AttestationCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -443,10 +443,10 @@ func (acb *AttestationCreateBulk) ExecX(ctx context.Context) { // SetCreatedAt(v+v). // }). // Exec(ctx) -func (acb *AttestationCreateBulk) OnConflict(opts ...sql.ConflictOption) *AttestationUpsertBulk { - acb.conflict = opts +func (_c *AttestationCreateBulk) OnConflict(opts ...sql.ConflictOption) *AttestationUpsertBulk { + _c.conflict = opts return &AttestationUpsertBulk{ - create: acb, + create: _c, } } @@ -456,10 +456,10 @@ func (acb *AttestationCreateBulk) OnConflict(opts ...sql.ConflictOption) *Attest // client.Attestation.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (acb *AttestationCreateBulk) OnConflictColumns(columns ...string) *AttestationUpsertBulk { - acb.conflict = append(acb.conflict, sql.ConflictColumns(columns...)) +func (_c *AttestationCreateBulk) OnConflictColumns(columns ...string) *AttestationUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &AttestationUpsertBulk{ - create: acb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/attestation_delete.go b/app/controlplane/pkg/data/ent/attestation_delete.go index 55a2cf871..ff7105678 100644 --- a/app/controlplane/pkg/data/ent/attestation_delete.go +++ b/app/controlplane/pkg/data/ent/attestation_delete.go @@ -20,56 +20,56 @@ type AttestationDelete struct { } // Where appends a list predicates to the AttestationDelete builder. -func (ad *AttestationDelete) Where(ps ...predicate.Attestation) *AttestationDelete { - ad.mutation.Where(ps...) - return ad +func (_d *AttestationDelete) Where(ps ...predicate.Attestation) *AttestationDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (ad *AttestationDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) +func (_d *AttestationDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (ad *AttestationDelete) ExecX(ctx context.Context) int { - n, err := ad.Exec(ctx) +func (_d *AttestationDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (ad *AttestationDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *AttestationDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(attestation.Table, sqlgraph.NewFieldSpec(attestation.FieldID, field.TypeUUID)) - if ps := ad.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - ad.mutation.done = true + _d.mutation.done = true return affected, err } // AttestationDeleteOne is the builder for deleting a single Attestation entity. type AttestationDeleteOne struct { - ad *AttestationDelete + _d *AttestationDelete } // Where appends a list predicates to the AttestationDelete builder. -func (ado *AttestationDeleteOne) Where(ps ...predicate.Attestation) *AttestationDeleteOne { - ado.ad.mutation.Where(ps...) - return ado +func (_d *AttestationDeleteOne) Where(ps ...predicate.Attestation) *AttestationDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (ado *AttestationDeleteOne) Exec(ctx context.Context) error { - n, err := ado.ad.Exec(ctx) +func (_d *AttestationDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (ado *AttestationDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (ado *AttestationDeleteOne) ExecX(ctx context.Context) { - if err := ado.Exec(ctx); err != nil { +func (_d *AttestationDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/attestation_query.go b/app/controlplane/pkg/data/ent/attestation_query.go index 535415935..851a97c7c 100644 --- a/app/controlplane/pkg/data/ent/attestation_query.go +++ b/app/controlplane/pkg/data/ent/attestation_query.go @@ -33,44 +33,44 @@ type AttestationQuery struct { } // Where adds a new predicate for the AttestationQuery builder. -func (aq *AttestationQuery) Where(ps ...predicate.Attestation) *AttestationQuery { - aq.predicates = append(aq.predicates, ps...) - return aq +func (_q *AttestationQuery) Where(ps ...predicate.Attestation) *AttestationQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (aq *AttestationQuery) Limit(limit int) *AttestationQuery { - aq.ctx.Limit = &limit - return aq +func (_q *AttestationQuery) Limit(limit int) *AttestationQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (aq *AttestationQuery) Offset(offset int) *AttestationQuery { - aq.ctx.Offset = &offset - return aq +func (_q *AttestationQuery) Offset(offset int) *AttestationQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (aq *AttestationQuery) Unique(unique bool) *AttestationQuery { - aq.ctx.Unique = &unique - return aq +func (_q *AttestationQuery) Unique(unique bool) *AttestationQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (aq *AttestationQuery) Order(o ...attestation.OrderOption) *AttestationQuery { - aq.order = append(aq.order, o...) - return aq +func (_q *AttestationQuery) Order(o ...attestation.OrderOption) *AttestationQuery { + _q.order = append(_q.order, o...) + return _q } // QueryWorkflowrun chains the current query on the "workflowrun" edge. -func (aq *AttestationQuery) QueryWorkflowrun() *WorkflowRunQuery { - query := (&WorkflowRunClient{config: aq.config}).Query() +func (_q *AttestationQuery) QueryWorkflowrun() *WorkflowRunQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := aq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := aq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -79,7 +79,7 @@ func (aq *AttestationQuery) QueryWorkflowrun() *WorkflowRunQuery { sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2O, true, attestation.WorkflowrunTable, attestation.WorkflowrunColumn), ) - fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -87,8 +87,8 @@ func (aq *AttestationQuery) QueryWorkflowrun() *WorkflowRunQuery { // First returns the first Attestation entity from the query. // Returns a *NotFoundError when no Attestation was found. -func (aq *AttestationQuery) First(ctx context.Context) (*Attestation, error) { - nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, ent.OpQueryFirst)) +func (_q *AttestationQuery) First(ctx context.Context) (*Attestation, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -99,8 +99,8 @@ func (aq *AttestationQuery) First(ctx context.Context) (*Attestation, error) { } // FirstX is like First, but panics if an error occurs. -func (aq *AttestationQuery) FirstX(ctx context.Context) *Attestation { - node, err := aq.First(ctx) +func (_q *AttestationQuery) FirstX(ctx context.Context) *Attestation { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -109,9 +109,9 @@ func (aq *AttestationQuery) FirstX(ctx context.Context) *Attestation { // FirstID returns the first Attestation ID from the query. // Returns a *NotFoundError when no Attestation ID was found. -func (aq *AttestationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *AttestationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -122,8 +122,8 @@ func (aq *AttestationQuery) FirstID(ctx context.Context) (id uuid.UUID, err erro } // FirstIDX is like FirstID, but panics if an error occurs. -func (aq *AttestationQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := aq.FirstID(ctx) +func (_q *AttestationQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -133,8 +133,8 @@ func (aq *AttestationQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Attestation entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Attestation entity is found. // Returns a *NotFoundError when no Attestation entities are found. -func (aq *AttestationQuery) Only(ctx context.Context) (*Attestation, error) { - nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, ent.OpQueryOnly)) +func (_q *AttestationQuery) Only(ctx context.Context) (*Attestation, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -149,8 +149,8 @@ func (aq *AttestationQuery) Only(ctx context.Context) (*Attestation, error) { } // OnlyX is like Only, but panics if an error occurs. -func (aq *AttestationQuery) OnlyX(ctx context.Context) *Attestation { - node, err := aq.Only(ctx) +func (_q *AttestationQuery) OnlyX(ctx context.Context) *Attestation { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -160,9 +160,9 @@ func (aq *AttestationQuery) OnlyX(ctx context.Context) *Attestation { // OnlyID is like Only, but returns the only Attestation ID in the query. // Returns a *NotSingularError when more than one Attestation ID is found. // Returns a *NotFoundError when no entities are found. -func (aq *AttestationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *AttestationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -177,8 +177,8 @@ func (aq *AttestationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (aq *AttestationQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := aq.OnlyID(ctx) +func (_q *AttestationQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -186,18 +186,18 @@ func (aq *AttestationQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Attestations. -func (aq *AttestationQuery) All(ctx context.Context) ([]*Attestation, error) { - ctx = setContextOp(ctx, aq.ctx, ent.OpQueryAll) - if err := aq.prepareQuery(ctx); err != nil { +func (_q *AttestationQuery) All(ctx context.Context) ([]*Attestation, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Attestation, *AttestationQuery]() - return withInterceptors[[]*Attestation](ctx, aq, qr, aq.inters) + return withInterceptors[[]*Attestation](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (aq *AttestationQuery) AllX(ctx context.Context) []*Attestation { - nodes, err := aq.All(ctx) +func (_q *AttestationQuery) AllX(ctx context.Context) []*Attestation { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -205,20 +205,20 @@ func (aq *AttestationQuery) AllX(ctx context.Context) []*Attestation { } // IDs executes the query and returns a list of Attestation IDs. -func (aq *AttestationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if aq.ctx.Unique == nil && aq.path != nil { - aq.Unique(true) +func (_q *AttestationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, aq.ctx, ent.OpQueryIDs) - if err = aq.Select(attestation.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(attestation.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (aq *AttestationQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := aq.IDs(ctx) +func (_q *AttestationQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -226,17 +226,17 @@ func (aq *AttestationQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (aq *AttestationQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, aq.ctx, ent.OpQueryCount) - if err := aq.prepareQuery(ctx); err != nil { +func (_q *AttestationQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, aq, querierCount[*AttestationQuery](), aq.inters) + return withInterceptors[int](ctx, _q, querierCount[*AttestationQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (aq *AttestationQuery) CountX(ctx context.Context) int { - count, err := aq.Count(ctx) +func (_q *AttestationQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -244,9 +244,9 @@ func (aq *AttestationQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (aq *AttestationQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, aq.ctx, ent.OpQueryExist) - switch _, err := aq.FirstID(ctx); { +func (_q *AttestationQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -257,8 +257,8 @@ func (aq *AttestationQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (aq *AttestationQuery) ExistX(ctx context.Context) bool { - exist, err := aq.Exist(ctx) +func (_q *AttestationQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -267,33 +267,33 @@ func (aq *AttestationQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the AttestationQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (aq *AttestationQuery) Clone() *AttestationQuery { - if aq == nil { +func (_q *AttestationQuery) Clone() *AttestationQuery { + if _q == nil { return nil } return &AttestationQuery{ - config: aq.config, - ctx: aq.ctx.Clone(), - order: append([]attestation.OrderOption{}, aq.order...), - inters: append([]Interceptor{}, aq.inters...), - predicates: append([]predicate.Attestation{}, aq.predicates...), - withWorkflowrun: aq.withWorkflowrun.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]attestation.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Attestation{}, _q.predicates...), + withWorkflowrun: _q.withWorkflowrun.Clone(), // clone intermediate query. - sql: aq.sql.Clone(), - path: aq.path, - modifiers: append([]func(*sql.Selector){}, aq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithWorkflowrun tells the query-builder to eager-load the nodes that are connected to // the "workflowrun" edge. The optional arguments are used to configure the query builder of the edge. -func (aq *AttestationQuery) WithWorkflowrun(opts ...func(*WorkflowRunQuery)) *AttestationQuery { - query := (&WorkflowRunClient{config: aq.config}).Query() +func (_q *AttestationQuery) WithWorkflowrun(opts ...func(*WorkflowRunQuery)) *AttestationQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - aq.withWorkflowrun = query - return aq + _q.withWorkflowrun = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -310,10 +310,10 @@ func (aq *AttestationQuery) WithWorkflowrun(opts ...func(*WorkflowRunQuery)) *At // GroupBy(attestation.FieldCreatedAt). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (aq *AttestationQuery) GroupBy(field string, fields ...string) *AttestationGroupBy { - aq.ctx.Fields = append([]string{field}, fields...) - grbuild := &AttestationGroupBy{build: aq} - grbuild.flds = &aq.ctx.Fields +func (_q *AttestationQuery) GroupBy(field string, fields ...string) *AttestationGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AttestationGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = attestation.Label grbuild.scan = grbuild.Scan return grbuild @@ -331,76 +331,76 @@ func (aq *AttestationQuery) GroupBy(field string, fields ...string) *Attestation // client.Attestation.Query(). // Select(attestation.FieldCreatedAt). // Scan(ctx, &v) -func (aq *AttestationQuery) Select(fields ...string) *AttestationSelect { - aq.ctx.Fields = append(aq.ctx.Fields, fields...) - sbuild := &AttestationSelect{AttestationQuery: aq} +func (_q *AttestationQuery) Select(fields ...string) *AttestationSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AttestationSelect{AttestationQuery: _q} sbuild.label = attestation.Label - sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a AttestationSelect configured with the given aggregations. -func (aq *AttestationQuery) Aggregate(fns ...AggregateFunc) *AttestationSelect { - return aq.Select().Aggregate(fns...) +func (_q *AttestationQuery) Aggregate(fns ...AggregateFunc) *AttestationSelect { + return _q.Select().Aggregate(fns...) } -func (aq *AttestationQuery) prepareQuery(ctx context.Context) error { - for _, inter := range aq.inters { +func (_q *AttestationQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, aq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range aq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !attestation.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if aq.path != nil { - prev, err := aq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - aq.sql = prev + _q.sql = prev } return nil } -func (aq *AttestationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Attestation, error) { +func (_q *AttestationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Attestation, error) { var ( nodes = []*Attestation{} - _spec = aq.querySpec() + _spec = _q.querySpec() loadedTypes = [1]bool{ - aq.withWorkflowrun != nil, + _q.withWorkflowrun != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*Attestation).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Attestation{config: aq.config} + node := &Attestation{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(aq.modifiers) > 0 { - _spec.Modifiers = aq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := aq.withWorkflowrun; query != nil { - if err := aq.loadWorkflowrun(ctx, query, nodes, nil, + if query := _q.withWorkflowrun; query != nil { + if err := _q.loadWorkflowrun(ctx, query, nodes, nil, func(n *Attestation, e *WorkflowRun) { n.Edges.Workflowrun = e }); err != nil { return nil, err } @@ -408,7 +408,7 @@ func (aq *AttestationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return nodes, nil } -func (aq *AttestationQuery) loadWorkflowrun(ctx context.Context, query *WorkflowRunQuery, nodes []*Attestation, init func(*Attestation), assign func(*Attestation, *WorkflowRun)) error { +func (_q *AttestationQuery) loadWorkflowrun(ctx context.Context, query *WorkflowRunQuery, nodes []*Attestation, init func(*Attestation), assign func(*Attestation, *WorkflowRun)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Attestation) for i := range nodes { @@ -438,27 +438,27 @@ func (aq *AttestationQuery) loadWorkflowrun(ctx context.Context, query *Workflow return nil } -func (aq *AttestationQuery) sqlCount(ctx context.Context) (int, error) { - _spec := aq.querySpec() - if len(aq.modifiers) > 0 { - _spec.Modifiers = aq.modifiers +func (_q *AttestationQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = aq.ctx.Fields - if len(aq.ctx.Fields) > 0 { - _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, aq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (aq *AttestationQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *AttestationQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(attestation.Table, attestation.Columns, sqlgraph.NewFieldSpec(attestation.FieldID, field.TypeUUID)) - _spec.From = aq.sql - if unique := aq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if aq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := aq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, attestation.FieldID) for i := range fields { @@ -466,24 +466,24 @@ func (aq *AttestationQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if aq.withWorkflowrun != nil { + if _q.withWorkflowrun != nil { _spec.Node.AddColumnOnce(attestation.FieldWorkflowrunID) } } - if ps := aq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := aq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := aq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := aq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -493,36 +493,36 @@ func (aq *AttestationQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (aq *AttestationQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(aq.driver.Dialect()) +func (_q *AttestationQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(attestation.Table) - columns := aq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = attestation.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if aq.sql != nil { - selector = aq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if aq.ctx.Unique != nil && *aq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range aq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range aq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range aq.order { + for _, p := range _q.order { p(selector) } - if offset := aq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := aq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -531,33 +531,33 @@ func (aq *AttestationQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (aq *AttestationQuery) ForUpdate(opts ...sql.LockOption) *AttestationQuery { - if aq.driver.Dialect() == dialect.Postgres { - aq.Unique(false) +func (_q *AttestationQuery) ForUpdate(opts ...sql.LockOption) *AttestationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - aq.modifiers = append(aq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return aq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (aq *AttestationQuery) ForShare(opts ...sql.LockOption) *AttestationQuery { - if aq.driver.Dialect() == dialect.Postgres { - aq.Unique(false) +func (_q *AttestationQuery) ForShare(opts ...sql.LockOption) *AttestationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - aq.modifiers = append(aq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return aq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (aq *AttestationQuery) Modify(modifiers ...func(s *sql.Selector)) *AttestationSelect { - aq.modifiers = append(aq.modifiers, modifiers...) - return aq.Select() +func (_q *AttestationQuery) Modify(modifiers ...func(s *sql.Selector)) *AttestationSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // AttestationGroupBy is the group-by builder for Attestation entities. @@ -567,41 +567,41 @@ type AttestationGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (agb *AttestationGroupBy) Aggregate(fns ...AggregateFunc) *AttestationGroupBy { - agb.fns = append(agb.fns, fns...) - return agb +func (_g *AttestationGroupBy) Aggregate(fns ...AggregateFunc) *AttestationGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (agb *AttestationGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, agb.build.ctx, ent.OpQueryGroupBy) - if err := agb.build.prepareQuery(ctx); err != nil { +func (_g *AttestationGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*AttestationQuery, *AttestationGroupBy](ctx, agb.build, agb, agb.build.inters, v) + return scanWithInterceptors[*AttestationQuery, *AttestationGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (agb *AttestationGroupBy) sqlScan(ctx context.Context, root *AttestationQuery, v any) error { +func (_g *AttestationGroupBy) sqlScan(ctx context.Context, root *AttestationQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(agb.fns)) - for _, fn := range agb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) - for _, f := range *agb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*agb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -615,27 +615,27 @@ type AttestationSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (as *AttestationSelect) Aggregate(fns ...AggregateFunc) *AttestationSelect { - as.fns = append(as.fns, fns...) - return as +func (_s *AttestationSelect) Aggregate(fns ...AggregateFunc) *AttestationSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (as *AttestationSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, as.ctx, ent.OpQuerySelect) - if err := as.prepareQuery(ctx); err != nil { +func (_s *AttestationSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*AttestationQuery, *AttestationSelect](ctx, as.AttestationQuery, as, as.inters, v) + return scanWithInterceptors[*AttestationQuery, *AttestationSelect](ctx, _s.AttestationQuery, _s, _s.inters, v) } -func (as *AttestationSelect) sqlScan(ctx context.Context, root *AttestationQuery, v any) error { +func (_s *AttestationSelect) sqlScan(ctx context.Context, root *AttestationQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(as.fns)) - for _, fn := range as.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*as.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -643,7 +643,7 @@ func (as *AttestationSelect) sqlScan(ctx context.Context, root *AttestationQuery } rows := &sql.Rows{} query, args := selector.Query() - if err := as.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -651,7 +651,7 @@ func (as *AttestationSelect) sqlScan(ctx context.Context, root *AttestationQuery } // Modify adds a query modifier for attaching custom logic to queries. -func (as *AttestationSelect) Modify(modifiers ...func(s *sql.Selector)) *AttestationSelect { - as.modifiers = append(as.modifiers, modifiers...) - return as +func (_s *AttestationSelect) Modify(modifiers ...func(s *sql.Selector)) *AttestationSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/attestation_update.go b/app/controlplane/pkg/data/ent/attestation_update.go index 911db8f8b..5ada79322 100644 --- a/app/controlplane/pkg/data/ent/attestation_update.go +++ b/app/controlplane/pkg/data/ent/attestation_update.go @@ -23,24 +23,24 @@ type AttestationUpdate struct { } // Where appends a list predicates to the AttestationUpdate builder. -func (au *AttestationUpdate) Where(ps ...predicate.Attestation) *AttestationUpdate { - au.mutation.Where(ps...) - return au +func (_u *AttestationUpdate) Where(ps ...predicate.Attestation) *AttestationUpdate { + _u.mutation.Where(ps...) + return _u } // Mutation returns the AttestationMutation object of the builder. -func (au *AttestationUpdate) Mutation() *AttestationMutation { - return au.mutation +func (_u *AttestationUpdate) Mutation() *AttestationMutation { + return _u.mutation } // Save executes the query and returns the number of nodes affected by the update operation. -func (au *AttestationUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) +func (_u *AttestationUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (au *AttestationUpdate) SaveX(ctx context.Context) int { - affected, err := au.Save(ctx) +func (_u *AttestationUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -48,46 +48,46 @@ func (au *AttestationUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (au *AttestationUpdate) Exec(ctx context.Context) error { - _, err := au.Save(ctx) +func (_u *AttestationUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (au *AttestationUpdate) ExecX(ctx context.Context) { - if err := au.Exec(ctx); err != nil { +func (_u *AttestationUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (au *AttestationUpdate) check() error { - if au.mutation.WorkflowrunCleared() && len(au.mutation.WorkflowrunIDs()) > 0 { +func (_u *AttestationUpdate) check() error { + if _u.mutation.WorkflowrunCleared() && len(_u.mutation.WorkflowrunIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Attestation.workflowrun"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (au *AttestationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *AttestationUpdate { - au.modifiers = append(au.modifiers, modifiers...) - return au +func (_u *AttestationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *AttestationUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (au *AttestationUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := au.check(); err != nil { - return n, err +func (_u *AttestationUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(attestation.Table, attestation.Columns, sqlgraph.NewFieldSpec(attestation.FieldID, field.TypeUUID)) - if ps := au.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - _spec.AddModifiers(au.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{attestation.Label} } else if sqlgraph.IsConstraintError(err) { @@ -95,8 +95,8 @@ func (au *AttestationUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - au.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // AttestationUpdateOne is the builder for updating a single Attestation entity. @@ -109,31 +109,31 @@ type AttestationUpdateOne struct { } // Mutation returns the AttestationMutation object of the builder. -func (auo *AttestationUpdateOne) Mutation() *AttestationMutation { - return auo.mutation +func (_u *AttestationUpdateOne) Mutation() *AttestationMutation { + return _u.mutation } // Where appends a list predicates to the AttestationUpdate builder. -func (auo *AttestationUpdateOne) Where(ps ...predicate.Attestation) *AttestationUpdateOne { - auo.mutation.Where(ps...) - return auo +func (_u *AttestationUpdateOne) Where(ps ...predicate.Attestation) *AttestationUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (auo *AttestationUpdateOne) Select(field string, fields ...string) *AttestationUpdateOne { - auo.fields = append([]string{field}, fields...) - return auo +func (_u *AttestationUpdateOne) Select(field string, fields ...string) *AttestationUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Attestation entity. -func (auo *AttestationUpdateOne) Save(ctx context.Context) (*Attestation, error) { - return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) +func (_u *AttestationUpdateOne) Save(ctx context.Context) (*Attestation, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (auo *AttestationUpdateOne) SaveX(ctx context.Context) *Attestation { - node, err := auo.Save(ctx) +func (_u *AttestationUpdateOne) SaveX(ctx context.Context) *Attestation { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -141,43 +141,43 @@ func (auo *AttestationUpdateOne) SaveX(ctx context.Context) *Attestation { } // Exec executes the query on the entity. -func (auo *AttestationUpdateOne) Exec(ctx context.Context) error { - _, err := auo.Save(ctx) +func (_u *AttestationUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (auo *AttestationUpdateOne) ExecX(ctx context.Context) { - if err := auo.Exec(ctx); err != nil { +func (_u *AttestationUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (auo *AttestationUpdateOne) check() error { - if auo.mutation.WorkflowrunCleared() && len(auo.mutation.WorkflowrunIDs()) > 0 { +func (_u *AttestationUpdateOne) check() error { + if _u.mutation.WorkflowrunCleared() && len(_u.mutation.WorkflowrunIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Attestation.workflowrun"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (auo *AttestationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *AttestationUpdateOne { - auo.modifiers = append(auo.modifiers, modifiers...) - return auo +func (_u *AttestationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *AttestationUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (auo *AttestationUpdateOne) sqlSave(ctx context.Context) (_node *Attestation, err error) { - if err := auo.check(); err != nil { +func (_u *AttestationUpdateOne) sqlSave(ctx context.Context) (_node *Attestation, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(attestation.Table, attestation.Columns, sqlgraph.NewFieldSpec(attestation.FieldID, field.TypeUUID)) - id, ok := auo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Attestation.id" for update`)} } _spec.Node.ID.Value = id - if fields := auo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, attestation.FieldID) for _, f := range fields { @@ -189,18 +189,18 @@ func (auo *AttestationUpdateOne) sqlSave(ctx context.Context) (_node *Attestatio } } } - if ps := auo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - _spec.AddModifiers(auo.modifiers...) - _node = &Attestation{config: auo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Attestation{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{attestation.Label} } else if sqlgraph.IsConstraintError(err) { @@ -208,6 +208,6 @@ func (auo *AttestationUpdateOne) sqlSave(ctx context.Context) (_node *Attestatio } return nil, err } - auo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/casbackend.go b/app/controlplane/pkg/data/ent/casbackend.go index bc1225166..7e87697b1 100644 --- a/app/controlplane/pkg/data/ent/casbackend.go +++ b/app/controlplane/pkg/data/ent/casbackend.go @@ -112,7 +112,7 @@ func (*CASBackend) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the CASBackend fields. -func (cb *CASBackend) assignValues(columns []string, values []any) error { +func (_m *CASBackend) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -122,101 +122,101 @@ func (cb *CASBackend) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - cb.ID = *value + _m.ID = *value } case casbackend.FieldLocation: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field location", values[i]) } else if value.Valid { - cb.Location = value.String + _m.Location = value.String } case casbackend.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - cb.Name = value.String + _m.Name = value.String } case casbackend.FieldProvider: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field provider", values[i]) } else if value.Valid { - cb.Provider = biz.CASBackendProvider(value.String) + _m.Provider = biz.CASBackendProvider(value.String) } case casbackend.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - cb.Description = value.String + _m.Description = value.String } case casbackend.FieldSecretName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field secret_name", values[i]) } else if value.Valid { - cb.SecretName = value.String + _m.SecretName = value.String } case casbackend.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - cb.CreatedAt = value.Time + _m.CreatedAt = value.Time } case casbackend.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - cb.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case casbackend.FieldValidationStatus: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field validation_status", values[i]) } else if value.Valid { - cb.ValidationStatus = biz.CASBackendValidationStatus(value.String) + _m.ValidationStatus = biz.CASBackendValidationStatus(value.String) } case casbackend.FieldValidationError: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field validation_error", values[i]) } else if value.Valid { - cb.ValidationError = value.String + _m.ValidationError = value.String } case casbackend.FieldValidatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field validated_at", values[i]) } else if value.Valid { - cb.ValidatedAt = value.Time + _m.ValidatedAt = value.Time } case casbackend.FieldDefault: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field default", values[i]) } else if value.Valid { - cb.Default = value.Bool + _m.Default = value.Bool } case casbackend.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - cb.DeletedAt = value.Time + _m.DeletedAt = value.Time } case casbackend.FieldFallback: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field fallback", values[i]) } else if value.Valid { - cb.Fallback = value.Bool + _m.Fallback = value.Bool } case casbackend.FieldMaxBlobSizeBytes: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field max_blob_size_bytes", values[i]) } else if value.Valid { - cb.MaxBlobSizeBytes = value.Int64 + _m.MaxBlobSizeBytes = value.Int64 } case casbackend.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field organization_cas_backends", values[i]) } else if value.Valid { - cb.organization_cas_backends = new(uuid.UUID) - *cb.organization_cas_backends = *value.S.(*uuid.UUID) + _m.organization_cas_backends = new(uuid.UUID) + *_m.organization_cas_backends = *value.S.(*uuid.UUID) } default: - cb.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -224,84 +224,84 @@ func (cb *CASBackend) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the CASBackend. // This includes values selected through modifiers, order, etc. -func (cb *CASBackend) Value(name string) (ent.Value, error) { - return cb.selectValues.Get(name) +func (_m *CASBackend) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryOrganization queries the "organization" edge of the CASBackend entity. -func (cb *CASBackend) QueryOrganization() *OrganizationQuery { - return NewCASBackendClient(cb.config).QueryOrganization(cb) +func (_m *CASBackend) QueryOrganization() *OrganizationQuery { + return NewCASBackendClient(_m.config).QueryOrganization(_m) } // QueryWorkflowRun queries the "workflow_run" edge of the CASBackend entity. -func (cb *CASBackend) QueryWorkflowRun() *WorkflowRunQuery { - return NewCASBackendClient(cb.config).QueryWorkflowRun(cb) +func (_m *CASBackend) QueryWorkflowRun() *WorkflowRunQuery { + return NewCASBackendClient(_m.config).QueryWorkflowRun(_m) } // Update returns a builder for updating this CASBackend. // Note that you need to call CASBackend.Unwrap() before calling this method if this CASBackend // was returned from a transaction, and the transaction was committed or rolled back. -func (cb *CASBackend) Update() *CASBackendUpdateOne { - return NewCASBackendClient(cb.config).UpdateOne(cb) +func (_m *CASBackend) Update() *CASBackendUpdateOne { + return NewCASBackendClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the CASBackend entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (cb *CASBackend) Unwrap() *CASBackend { - _tx, ok := cb.config.driver.(*txDriver) +func (_m *CASBackend) Unwrap() *CASBackend { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: CASBackend is not a transactional entity") } - cb.config.driver = _tx.drv - return cb + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (cb *CASBackend) String() string { +func (_m *CASBackend) String() string { var builder strings.Builder builder.WriteString("CASBackend(") - builder.WriteString(fmt.Sprintf("id=%v, ", cb.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("location=") - builder.WriteString(cb.Location) + builder.WriteString(_m.Location) builder.WriteString(", ") builder.WriteString("name=") - builder.WriteString(cb.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("provider=") - builder.WriteString(fmt.Sprintf("%v", cb.Provider)) + builder.WriteString(fmt.Sprintf("%v", _m.Provider)) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(cb.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("secret_name=") - builder.WriteString(cb.SecretName) + builder.WriteString(_m.SecretName) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(cb.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(cb.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("validation_status=") - builder.WriteString(fmt.Sprintf("%v", cb.ValidationStatus)) + builder.WriteString(fmt.Sprintf("%v", _m.ValidationStatus)) builder.WriteString(", ") builder.WriteString("validation_error=") - builder.WriteString(cb.ValidationError) + builder.WriteString(_m.ValidationError) builder.WriteString(", ") builder.WriteString("validated_at=") - builder.WriteString(cb.ValidatedAt.Format(time.ANSIC)) + builder.WriteString(_m.ValidatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("default=") - builder.WriteString(fmt.Sprintf("%v", cb.Default)) + builder.WriteString(fmt.Sprintf("%v", _m.Default)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(cb.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("fallback=") - builder.WriteString(fmt.Sprintf("%v", cb.Fallback)) + builder.WriteString(fmt.Sprintf("%v", _m.Fallback)) builder.WriteString(", ") builder.WriteString("max_blob_size_bytes=") - builder.WriteString(fmt.Sprintf("%v", cb.MaxBlobSizeBytes)) + builder.WriteString(fmt.Sprintf("%v", _m.MaxBlobSizeBytes)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/casbackend_create.go b/app/controlplane/pkg/data/ent/casbackend_create.go index 2cda3258d..afdf4fae6 100644 --- a/app/controlplane/pkg/data/ent/casbackend_create.go +++ b/app/controlplane/pkg/data/ent/casbackend_create.go @@ -28,215 +28,215 @@ type CASBackendCreate struct { } // SetLocation sets the "location" field. -func (cbc *CASBackendCreate) SetLocation(s string) *CASBackendCreate { - cbc.mutation.SetLocation(s) - return cbc +func (_c *CASBackendCreate) SetLocation(v string) *CASBackendCreate { + _c.mutation.SetLocation(v) + return _c } // SetName sets the "name" field. -func (cbc *CASBackendCreate) SetName(s string) *CASBackendCreate { - cbc.mutation.SetName(s) - return cbc +func (_c *CASBackendCreate) SetName(v string) *CASBackendCreate { + _c.mutation.SetName(v) + return _c } // SetProvider sets the "provider" field. -func (cbc *CASBackendCreate) SetProvider(bbp biz.CASBackendProvider) *CASBackendCreate { - cbc.mutation.SetProvider(bbp) - return cbc +func (_c *CASBackendCreate) SetProvider(v biz.CASBackendProvider) *CASBackendCreate { + _c.mutation.SetProvider(v) + return _c } // SetDescription sets the "description" field. -func (cbc *CASBackendCreate) SetDescription(s string) *CASBackendCreate { - cbc.mutation.SetDescription(s) - return cbc +func (_c *CASBackendCreate) SetDescription(v string) *CASBackendCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableDescription(s *string) *CASBackendCreate { - if s != nil { - cbc.SetDescription(*s) +func (_c *CASBackendCreate) SetNillableDescription(v *string) *CASBackendCreate { + if v != nil { + _c.SetDescription(*v) } - return cbc + return _c } // SetSecretName sets the "secret_name" field. -func (cbc *CASBackendCreate) SetSecretName(s string) *CASBackendCreate { - cbc.mutation.SetSecretName(s) - return cbc +func (_c *CASBackendCreate) SetSecretName(v string) *CASBackendCreate { + _c.mutation.SetSecretName(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (cbc *CASBackendCreate) SetCreatedAt(t time.Time) *CASBackendCreate { - cbc.mutation.SetCreatedAt(t) - return cbc +func (_c *CASBackendCreate) SetCreatedAt(v time.Time) *CASBackendCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableCreatedAt(t *time.Time) *CASBackendCreate { - if t != nil { - cbc.SetCreatedAt(*t) +func (_c *CASBackendCreate) SetNillableCreatedAt(v *time.Time) *CASBackendCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return cbc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (cbc *CASBackendCreate) SetUpdatedAt(t time.Time) *CASBackendCreate { - cbc.mutation.SetUpdatedAt(t) - return cbc +func (_c *CASBackendCreate) SetUpdatedAt(v time.Time) *CASBackendCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableUpdatedAt(t *time.Time) *CASBackendCreate { - if t != nil { - cbc.SetUpdatedAt(*t) +func (_c *CASBackendCreate) SetNillableUpdatedAt(v *time.Time) *CASBackendCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return cbc + return _c } // SetValidationStatus sets the "validation_status" field. -func (cbc *CASBackendCreate) SetValidationStatus(bbvs biz.CASBackendValidationStatus) *CASBackendCreate { - cbc.mutation.SetValidationStatus(bbvs) - return cbc +func (_c *CASBackendCreate) SetValidationStatus(v biz.CASBackendValidationStatus) *CASBackendCreate { + _c.mutation.SetValidationStatus(v) + return _c } // SetNillableValidationStatus sets the "validation_status" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableValidationStatus(bbvs *biz.CASBackendValidationStatus) *CASBackendCreate { - if bbvs != nil { - cbc.SetValidationStatus(*bbvs) +func (_c *CASBackendCreate) SetNillableValidationStatus(v *biz.CASBackendValidationStatus) *CASBackendCreate { + if v != nil { + _c.SetValidationStatus(*v) } - return cbc + return _c } // SetValidationError sets the "validation_error" field. -func (cbc *CASBackendCreate) SetValidationError(s string) *CASBackendCreate { - cbc.mutation.SetValidationError(s) - return cbc +func (_c *CASBackendCreate) SetValidationError(v string) *CASBackendCreate { + _c.mutation.SetValidationError(v) + return _c } // SetNillableValidationError sets the "validation_error" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableValidationError(s *string) *CASBackendCreate { - if s != nil { - cbc.SetValidationError(*s) +func (_c *CASBackendCreate) SetNillableValidationError(v *string) *CASBackendCreate { + if v != nil { + _c.SetValidationError(*v) } - return cbc + return _c } // SetValidatedAt sets the "validated_at" field. -func (cbc *CASBackendCreate) SetValidatedAt(t time.Time) *CASBackendCreate { - cbc.mutation.SetValidatedAt(t) - return cbc +func (_c *CASBackendCreate) SetValidatedAt(v time.Time) *CASBackendCreate { + _c.mutation.SetValidatedAt(v) + return _c } // SetNillableValidatedAt sets the "validated_at" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableValidatedAt(t *time.Time) *CASBackendCreate { - if t != nil { - cbc.SetValidatedAt(*t) +func (_c *CASBackendCreate) SetNillableValidatedAt(v *time.Time) *CASBackendCreate { + if v != nil { + _c.SetValidatedAt(*v) } - return cbc + return _c } // SetDefault sets the "default" field. -func (cbc *CASBackendCreate) SetDefault(b bool) *CASBackendCreate { - cbc.mutation.SetDefault(b) - return cbc +func (_c *CASBackendCreate) SetDefault(v bool) *CASBackendCreate { + _c.mutation.SetDefault(v) + return _c } // SetNillableDefault sets the "default" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableDefault(b *bool) *CASBackendCreate { - if b != nil { - cbc.SetDefault(*b) +func (_c *CASBackendCreate) SetNillableDefault(v *bool) *CASBackendCreate { + if v != nil { + _c.SetDefault(*v) } - return cbc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (cbc *CASBackendCreate) SetDeletedAt(t time.Time) *CASBackendCreate { - cbc.mutation.SetDeletedAt(t) - return cbc +func (_c *CASBackendCreate) SetDeletedAt(v time.Time) *CASBackendCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableDeletedAt(t *time.Time) *CASBackendCreate { - if t != nil { - cbc.SetDeletedAt(*t) +func (_c *CASBackendCreate) SetNillableDeletedAt(v *time.Time) *CASBackendCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return cbc + return _c } // SetFallback sets the "fallback" field. -func (cbc *CASBackendCreate) SetFallback(b bool) *CASBackendCreate { - cbc.mutation.SetFallback(b) - return cbc +func (_c *CASBackendCreate) SetFallback(v bool) *CASBackendCreate { + _c.mutation.SetFallback(v) + return _c } // SetNillableFallback sets the "fallback" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableFallback(b *bool) *CASBackendCreate { - if b != nil { - cbc.SetFallback(*b) +func (_c *CASBackendCreate) SetNillableFallback(v *bool) *CASBackendCreate { + if v != nil { + _c.SetFallback(*v) } - return cbc + return _c } // SetMaxBlobSizeBytes sets the "max_blob_size_bytes" field. -func (cbc *CASBackendCreate) SetMaxBlobSizeBytes(i int64) *CASBackendCreate { - cbc.mutation.SetMaxBlobSizeBytes(i) - return cbc +func (_c *CASBackendCreate) SetMaxBlobSizeBytes(v int64) *CASBackendCreate { + _c.mutation.SetMaxBlobSizeBytes(v) + return _c } // SetID sets the "id" field. -func (cbc *CASBackendCreate) SetID(u uuid.UUID) *CASBackendCreate { - cbc.mutation.SetID(u) - return cbc +func (_c *CASBackendCreate) SetID(v uuid.UUID) *CASBackendCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (cbc *CASBackendCreate) SetNillableID(u *uuid.UUID) *CASBackendCreate { - if u != nil { - cbc.SetID(*u) +func (_c *CASBackendCreate) SetNillableID(v *uuid.UUID) *CASBackendCreate { + if v != nil { + _c.SetID(*v) } - return cbc + return _c } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (cbc *CASBackendCreate) SetOrganizationID(id uuid.UUID) *CASBackendCreate { - cbc.mutation.SetOrganizationID(id) - return cbc +func (_c *CASBackendCreate) SetOrganizationID(id uuid.UUID) *CASBackendCreate { + _c.mutation.SetOrganizationID(id) + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (cbc *CASBackendCreate) SetOrganization(o *Organization) *CASBackendCreate { - return cbc.SetOrganizationID(o.ID) +func (_c *CASBackendCreate) SetOrganization(v *Organization) *CASBackendCreate { + return _c.SetOrganizationID(v.ID) } // AddWorkflowRunIDs adds the "workflow_run" edge to the WorkflowRun entity by IDs. -func (cbc *CASBackendCreate) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendCreate { - cbc.mutation.AddWorkflowRunIDs(ids...) - return cbc +func (_c *CASBackendCreate) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendCreate { + _c.mutation.AddWorkflowRunIDs(ids...) + return _c } // AddWorkflowRun adds the "workflow_run" edges to the WorkflowRun entity. -func (cbc *CASBackendCreate) AddWorkflowRun(w ...*WorkflowRun) *CASBackendCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *CASBackendCreate) AddWorkflowRun(v ...*WorkflowRun) *CASBackendCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return cbc.AddWorkflowRunIDs(ids...) + return _c.AddWorkflowRunIDs(ids...) } // Mutation returns the CASBackendMutation object of the builder. -func (cbc *CASBackendCreate) Mutation() *CASBackendMutation { - return cbc.mutation +func (_c *CASBackendCreate) Mutation() *CASBackendMutation { + return _c.mutation } // Save creates the CASBackend in the database. -func (cbc *CASBackendCreate) Save(ctx context.Context) (*CASBackend, error) { - cbc.defaults() - return withHooks(ctx, cbc.sqlSave, cbc.mutation, cbc.hooks) +func (_c *CASBackendCreate) Save(ctx context.Context) (*CASBackend, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (cbc *CASBackendCreate) SaveX(ctx context.Context) *CASBackend { - v, err := cbc.Save(ctx) +func (_c *CASBackendCreate) SaveX(ctx context.Context) *CASBackend { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -244,107 +244,107 @@ func (cbc *CASBackendCreate) SaveX(ctx context.Context) *CASBackend { } // Exec executes the query. -func (cbc *CASBackendCreate) Exec(ctx context.Context) error { - _, err := cbc.Save(ctx) +func (_c *CASBackendCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cbc *CASBackendCreate) ExecX(ctx context.Context) { - if err := cbc.Exec(ctx); err != nil { +func (_c *CASBackendCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (cbc *CASBackendCreate) defaults() { - if _, ok := cbc.mutation.CreatedAt(); !ok { +func (_c *CASBackendCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := casbackend.DefaultCreatedAt() - cbc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := cbc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := casbackend.DefaultUpdatedAt() - cbc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := cbc.mutation.ValidationStatus(); !ok { + if _, ok := _c.mutation.ValidationStatus(); !ok { v := casbackend.DefaultValidationStatus - cbc.mutation.SetValidationStatus(v) + _c.mutation.SetValidationStatus(v) } - if _, ok := cbc.mutation.ValidatedAt(); !ok { + if _, ok := _c.mutation.ValidatedAt(); !ok { v := casbackend.DefaultValidatedAt() - cbc.mutation.SetValidatedAt(v) + _c.mutation.SetValidatedAt(v) } - if _, ok := cbc.mutation.Default(); !ok { + if _, ok := _c.mutation.Default(); !ok { v := casbackend.DefaultDefault - cbc.mutation.SetDefault(v) + _c.mutation.SetDefault(v) } - if _, ok := cbc.mutation.Fallback(); !ok { + if _, ok := _c.mutation.Fallback(); !ok { v := casbackend.DefaultFallback - cbc.mutation.SetFallback(v) + _c.mutation.SetFallback(v) } - if _, ok := cbc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := casbackend.DefaultID() - cbc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (cbc *CASBackendCreate) check() error { - if _, ok := cbc.mutation.Location(); !ok { +func (_c *CASBackendCreate) check() error { + if _, ok := _c.mutation.Location(); !ok { return &ValidationError{Name: "location", err: errors.New(`ent: missing required field "CASBackend.location"`)} } - if _, ok := cbc.mutation.Name(); !ok { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "CASBackend.name"`)} } - if _, ok := cbc.mutation.Provider(); !ok { + if _, ok := _c.mutation.Provider(); !ok { return &ValidationError{Name: "provider", err: errors.New(`ent: missing required field "CASBackend.provider"`)} } - if v, ok := cbc.mutation.Provider(); ok { + if v, ok := _c.mutation.Provider(); ok { if err := casbackend.ProviderValidator(v); err != nil { return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "CASBackend.provider": %w`, err)} } } - if _, ok := cbc.mutation.SecretName(); !ok { + if _, ok := _c.mutation.SecretName(); !ok { return &ValidationError{Name: "secret_name", err: errors.New(`ent: missing required field "CASBackend.secret_name"`)} } - if _, ok := cbc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "CASBackend.created_at"`)} } - if _, ok := cbc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "CASBackend.updated_at"`)} } - if _, ok := cbc.mutation.ValidationStatus(); !ok { + if _, ok := _c.mutation.ValidationStatus(); !ok { return &ValidationError{Name: "validation_status", err: errors.New(`ent: missing required field "CASBackend.validation_status"`)} } - if v, ok := cbc.mutation.ValidationStatus(); ok { + if v, ok := _c.mutation.ValidationStatus(); ok { if err := casbackend.ValidationStatusValidator(v); err != nil { return &ValidationError{Name: "validation_status", err: fmt.Errorf(`ent: validator failed for field "CASBackend.validation_status": %w`, err)} } } - if _, ok := cbc.mutation.ValidatedAt(); !ok { + if _, ok := _c.mutation.ValidatedAt(); !ok { return &ValidationError{Name: "validated_at", err: errors.New(`ent: missing required field "CASBackend.validated_at"`)} } - if _, ok := cbc.mutation.Default(); !ok { + if _, ok := _c.mutation.Default(); !ok { return &ValidationError{Name: "default", err: errors.New(`ent: missing required field "CASBackend.default"`)} } - if _, ok := cbc.mutation.Fallback(); !ok { + if _, ok := _c.mutation.Fallback(); !ok { return &ValidationError{Name: "fallback", err: errors.New(`ent: missing required field "CASBackend.fallback"`)} } - if _, ok := cbc.mutation.MaxBlobSizeBytes(); !ok { + if _, ok := _c.mutation.MaxBlobSizeBytes(); !ok { return &ValidationError{Name: "max_blob_size_bytes", err: errors.New(`ent: missing required field "CASBackend.max_blob_size_bytes"`)} } - if len(cbc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "CASBackend.organization"`)} } return nil } -func (cbc *CASBackendCreate) sqlSave(ctx context.Context) (*CASBackend, error) { - if err := cbc.check(); err != nil { +func (_c *CASBackendCreate) sqlSave(ctx context.Context) (*CASBackend, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := cbc.createSpec() - if err := sqlgraph.CreateNode(ctx, cbc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -357,78 +357,78 @@ func (cbc *CASBackendCreate) sqlSave(ctx context.Context) (*CASBackend, error) { return nil, err } } - cbc.mutation.id = &_node.ID - cbc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (cbc *CASBackendCreate) createSpec() (*CASBackend, *sqlgraph.CreateSpec) { +func (_c *CASBackendCreate) createSpec() (*CASBackend, *sqlgraph.CreateSpec) { var ( - _node = &CASBackend{config: cbc.config} + _node = &CASBackend{config: _c.config} _spec = sqlgraph.NewCreateSpec(casbackend.Table, sqlgraph.NewFieldSpec(casbackend.FieldID, field.TypeUUID)) ) - _spec.OnConflict = cbc.conflict - if id, ok := cbc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := cbc.mutation.Location(); ok { + if value, ok := _c.mutation.Location(); ok { _spec.SetField(casbackend.FieldLocation, field.TypeString, value) _node.Location = value } - if value, ok := cbc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(casbackend.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := cbc.mutation.Provider(); ok { + if value, ok := _c.mutation.Provider(); ok { _spec.SetField(casbackend.FieldProvider, field.TypeEnum, value) _node.Provider = value } - if value, ok := cbc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(casbackend.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := cbc.mutation.SecretName(); ok { + if value, ok := _c.mutation.SecretName(); ok { _spec.SetField(casbackend.FieldSecretName, field.TypeString, value) _node.SecretName = value } - if value, ok := cbc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(casbackend.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := cbc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(casbackend.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := cbc.mutation.ValidationStatus(); ok { + if value, ok := _c.mutation.ValidationStatus(); ok { _spec.SetField(casbackend.FieldValidationStatus, field.TypeEnum, value) _node.ValidationStatus = value } - if value, ok := cbc.mutation.ValidationError(); ok { + if value, ok := _c.mutation.ValidationError(); ok { _spec.SetField(casbackend.FieldValidationError, field.TypeString, value) _node.ValidationError = value } - if value, ok := cbc.mutation.ValidatedAt(); ok { + if value, ok := _c.mutation.ValidatedAt(); ok { _spec.SetField(casbackend.FieldValidatedAt, field.TypeTime, value) _node.ValidatedAt = value } - if value, ok := cbc.mutation.Default(); ok { + if value, ok := _c.mutation.Default(); ok { _spec.SetField(casbackend.FieldDefault, field.TypeBool, value) _node.Default = value } - if value, ok := cbc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(casbackend.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := cbc.mutation.Fallback(); ok { + if value, ok := _c.mutation.Fallback(); ok { _spec.SetField(casbackend.FieldFallback, field.TypeBool, value) _node.Fallback = value } - if value, ok := cbc.mutation.MaxBlobSizeBytes(); ok { + if value, ok := _c.mutation.MaxBlobSizeBytes(); ok { _spec.SetField(casbackend.FieldMaxBlobSizeBytes, field.TypeInt64, value) _node.MaxBlobSizeBytes = value } - if nodes := cbc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -445,7 +445,7 @@ func (cbc *CASBackendCreate) createSpec() (*CASBackend, *sqlgraph.CreateSpec) { _node.organization_cas_backends = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := cbc.mutation.WorkflowRunIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -480,10 +480,10 @@ func (cbc *CASBackendCreate) createSpec() (*CASBackend, *sqlgraph.CreateSpec) { // SetLocation(v+v). // }). // Exec(ctx) -func (cbc *CASBackendCreate) OnConflict(opts ...sql.ConflictOption) *CASBackendUpsertOne { - cbc.conflict = opts +func (_c *CASBackendCreate) OnConflict(opts ...sql.ConflictOption) *CASBackendUpsertOne { + _c.conflict = opts return &CASBackendUpsertOne{ - create: cbc, + create: _c, } } @@ -493,10 +493,10 @@ func (cbc *CASBackendCreate) OnConflict(opts ...sql.ConflictOption) *CASBackendU // client.CASBackend.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (cbc *CASBackendCreate) OnConflictColumns(columns ...string) *CASBackendUpsertOne { - cbc.conflict = append(cbc.conflict, sql.ConflictColumns(columns...)) +func (_c *CASBackendCreate) OnConflictColumns(columns ...string) *CASBackendUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &CASBackendUpsertOne{ - create: cbc, + create: _c, } } @@ -909,16 +909,16 @@ type CASBackendCreateBulk struct { } // Save creates the CASBackend entities in the database. -func (cbcb *CASBackendCreateBulk) Save(ctx context.Context) ([]*CASBackend, error) { - if cbcb.err != nil { - return nil, cbcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(cbcb.builders)) - nodes := make([]*CASBackend, len(cbcb.builders)) - mutators := make([]Mutator, len(cbcb.builders)) - for i := range cbcb.builders { +func (_c *CASBackendCreateBulk) Save(ctx context.Context) ([]*CASBackend, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*CASBackend, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := cbcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*CASBackendMutation) @@ -932,12 +932,12 @@ func (cbcb *CASBackendCreateBulk) Save(ctx context.Context) ([]*CASBackend, erro var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, cbcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = cbcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, cbcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -957,7 +957,7 @@ func (cbcb *CASBackendCreateBulk) Save(ctx context.Context) ([]*CASBackend, erro }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, cbcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -965,8 +965,8 @@ func (cbcb *CASBackendCreateBulk) Save(ctx context.Context) ([]*CASBackend, erro } // SaveX is like Save, but panics if an error occurs. -func (cbcb *CASBackendCreateBulk) SaveX(ctx context.Context) []*CASBackend { - v, err := cbcb.Save(ctx) +func (_c *CASBackendCreateBulk) SaveX(ctx context.Context) []*CASBackend { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -974,14 +974,14 @@ func (cbcb *CASBackendCreateBulk) SaveX(ctx context.Context) []*CASBackend { } // Exec executes the query. -func (cbcb *CASBackendCreateBulk) Exec(ctx context.Context) error { - _, err := cbcb.Save(ctx) +func (_c *CASBackendCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cbcb *CASBackendCreateBulk) ExecX(ctx context.Context) { - if err := cbcb.Exec(ctx); err != nil { +func (_c *CASBackendCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -1001,10 +1001,10 @@ func (cbcb *CASBackendCreateBulk) ExecX(ctx context.Context) { // SetLocation(v+v). // }). // Exec(ctx) -func (cbcb *CASBackendCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASBackendUpsertBulk { - cbcb.conflict = opts +func (_c *CASBackendCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASBackendUpsertBulk { + _c.conflict = opts return &CASBackendUpsertBulk{ - create: cbcb, + create: _c, } } @@ -1014,10 +1014,10 @@ func (cbcb *CASBackendCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASBac // client.CASBackend.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (cbcb *CASBackendCreateBulk) OnConflictColumns(columns ...string) *CASBackendUpsertBulk { - cbcb.conflict = append(cbcb.conflict, sql.ConflictColumns(columns...)) +func (_c *CASBackendCreateBulk) OnConflictColumns(columns ...string) *CASBackendUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &CASBackendUpsertBulk{ - create: cbcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/casbackend_delete.go b/app/controlplane/pkg/data/ent/casbackend_delete.go index 57cc8a0b6..f598519aa 100644 --- a/app/controlplane/pkg/data/ent/casbackend_delete.go +++ b/app/controlplane/pkg/data/ent/casbackend_delete.go @@ -20,56 +20,56 @@ type CASBackendDelete struct { } // Where appends a list predicates to the CASBackendDelete builder. -func (cbd *CASBackendDelete) Where(ps ...predicate.CASBackend) *CASBackendDelete { - cbd.mutation.Where(ps...) - return cbd +func (_d *CASBackendDelete) Where(ps ...predicate.CASBackend) *CASBackendDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (cbd *CASBackendDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, cbd.sqlExec, cbd.mutation, cbd.hooks) +func (_d *CASBackendDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (cbd *CASBackendDelete) ExecX(ctx context.Context) int { - n, err := cbd.Exec(ctx) +func (_d *CASBackendDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (cbd *CASBackendDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *CASBackendDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(casbackend.Table, sqlgraph.NewFieldSpec(casbackend.FieldID, field.TypeUUID)) - if ps := cbd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, cbd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - cbd.mutation.done = true + _d.mutation.done = true return affected, err } // CASBackendDeleteOne is the builder for deleting a single CASBackend entity. type CASBackendDeleteOne struct { - cbd *CASBackendDelete + _d *CASBackendDelete } // Where appends a list predicates to the CASBackendDelete builder. -func (cbdo *CASBackendDeleteOne) Where(ps ...predicate.CASBackend) *CASBackendDeleteOne { - cbdo.cbd.mutation.Where(ps...) - return cbdo +func (_d *CASBackendDeleteOne) Where(ps ...predicate.CASBackend) *CASBackendDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (cbdo *CASBackendDeleteOne) Exec(ctx context.Context) error { - n, err := cbdo.cbd.Exec(ctx) +func (_d *CASBackendDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (cbdo *CASBackendDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (cbdo *CASBackendDeleteOne) ExecX(ctx context.Context) { - if err := cbdo.Exec(ctx); err != nil { +func (_d *CASBackendDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/casbackend_query.go b/app/controlplane/pkg/data/ent/casbackend_query.go index ca73a8b16..ab18dab53 100644 --- a/app/controlplane/pkg/data/ent/casbackend_query.go +++ b/app/controlplane/pkg/data/ent/casbackend_query.go @@ -37,44 +37,44 @@ type CASBackendQuery struct { } // Where adds a new predicate for the CASBackendQuery builder. -func (cbq *CASBackendQuery) Where(ps ...predicate.CASBackend) *CASBackendQuery { - cbq.predicates = append(cbq.predicates, ps...) - return cbq +func (_q *CASBackendQuery) Where(ps ...predicate.CASBackend) *CASBackendQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (cbq *CASBackendQuery) Limit(limit int) *CASBackendQuery { - cbq.ctx.Limit = &limit - return cbq +func (_q *CASBackendQuery) Limit(limit int) *CASBackendQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (cbq *CASBackendQuery) Offset(offset int) *CASBackendQuery { - cbq.ctx.Offset = &offset - return cbq +func (_q *CASBackendQuery) Offset(offset int) *CASBackendQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (cbq *CASBackendQuery) Unique(unique bool) *CASBackendQuery { - cbq.ctx.Unique = &unique - return cbq +func (_q *CASBackendQuery) Unique(unique bool) *CASBackendQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (cbq *CASBackendQuery) Order(o ...casbackend.OrderOption) *CASBackendQuery { - cbq.order = append(cbq.order, o...) - return cbq +func (_q *CASBackendQuery) Order(o ...casbackend.OrderOption) *CASBackendQuery { + _q.order = append(_q.order, o...) + return _q } // QueryOrganization chains the current query on the "organization" edge. -func (cbq *CASBackendQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: cbq.config}).Query() +func (_q *CASBackendQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := cbq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := cbq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -83,20 +83,20 @@ func (cbq *CASBackendQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, casbackend.OrganizationTable, casbackend.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(cbq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflowRun chains the current query on the "workflow_run" edge. -func (cbq *CASBackendQuery) QueryWorkflowRun() *WorkflowRunQuery { - query := (&WorkflowRunClient{config: cbq.config}).Query() +func (_q *CASBackendQuery) QueryWorkflowRun() *WorkflowRunQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := cbq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := cbq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -105,7 +105,7 @@ func (cbq *CASBackendQuery) QueryWorkflowRun() *WorkflowRunQuery { sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, casbackend.WorkflowRunTable, casbackend.WorkflowRunPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(cbq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -113,8 +113,8 @@ func (cbq *CASBackendQuery) QueryWorkflowRun() *WorkflowRunQuery { // First returns the first CASBackend entity from the query. // Returns a *NotFoundError when no CASBackend was found. -func (cbq *CASBackendQuery) First(ctx context.Context) (*CASBackend, error) { - nodes, err := cbq.Limit(1).All(setContextOp(ctx, cbq.ctx, ent.OpQueryFirst)) +func (_q *CASBackendQuery) First(ctx context.Context) (*CASBackend, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -125,8 +125,8 @@ func (cbq *CASBackendQuery) First(ctx context.Context) (*CASBackend, error) { } // FirstX is like First, but panics if an error occurs. -func (cbq *CASBackendQuery) FirstX(ctx context.Context) *CASBackend { - node, err := cbq.First(ctx) +func (_q *CASBackendQuery) FirstX(ctx context.Context) *CASBackend { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -135,9 +135,9 @@ func (cbq *CASBackendQuery) FirstX(ctx context.Context) *CASBackend { // FirstID returns the first CASBackend ID from the query. // Returns a *NotFoundError when no CASBackend ID was found. -func (cbq *CASBackendQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *CASBackendQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cbq.Limit(1).IDs(setContextOp(ctx, cbq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -148,8 +148,8 @@ func (cbq *CASBackendQuery) FirstID(ctx context.Context) (id uuid.UUID, err erro } // FirstIDX is like FirstID, but panics if an error occurs. -func (cbq *CASBackendQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := cbq.FirstID(ctx) +func (_q *CASBackendQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -159,8 +159,8 @@ func (cbq *CASBackendQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single CASBackend entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one CASBackend entity is found. // Returns a *NotFoundError when no CASBackend entities are found. -func (cbq *CASBackendQuery) Only(ctx context.Context) (*CASBackend, error) { - nodes, err := cbq.Limit(2).All(setContextOp(ctx, cbq.ctx, ent.OpQueryOnly)) +func (_q *CASBackendQuery) Only(ctx context.Context) (*CASBackend, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -175,8 +175,8 @@ func (cbq *CASBackendQuery) Only(ctx context.Context) (*CASBackend, error) { } // OnlyX is like Only, but panics if an error occurs. -func (cbq *CASBackendQuery) OnlyX(ctx context.Context) *CASBackend { - node, err := cbq.Only(ctx) +func (_q *CASBackendQuery) OnlyX(ctx context.Context) *CASBackend { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -186,9 +186,9 @@ func (cbq *CASBackendQuery) OnlyX(ctx context.Context) *CASBackend { // OnlyID is like Only, but returns the only CASBackend ID in the query. // Returns a *NotSingularError when more than one CASBackend ID is found. // Returns a *NotFoundError when no entities are found. -func (cbq *CASBackendQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *CASBackendQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cbq.Limit(2).IDs(setContextOp(ctx, cbq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -203,8 +203,8 @@ func (cbq *CASBackendQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (cbq *CASBackendQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := cbq.OnlyID(ctx) +func (_q *CASBackendQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -212,18 +212,18 @@ func (cbq *CASBackendQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of CASBackends. -func (cbq *CASBackendQuery) All(ctx context.Context) ([]*CASBackend, error) { - ctx = setContextOp(ctx, cbq.ctx, ent.OpQueryAll) - if err := cbq.prepareQuery(ctx); err != nil { +func (_q *CASBackendQuery) All(ctx context.Context) ([]*CASBackend, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*CASBackend, *CASBackendQuery]() - return withInterceptors[[]*CASBackend](ctx, cbq, qr, cbq.inters) + return withInterceptors[[]*CASBackend](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (cbq *CASBackendQuery) AllX(ctx context.Context) []*CASBackend { - nodes, err := cbq.All(ctx) +func (_q *CASBackendQuery) AllX(ctx context.Context) []*CASBackend { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -231,20 +231,20 @@ func (cbq *CASBackendQuery) AllX(ctx context.Context) []*CASBackend { } // IDs executes the query and returns a list of CASBackend IDs. -func (cbq *CASBackendQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if cbq.ctx.Unique == nil && cbq.path != nil { - cbq.Unique(true) +func (_q *CASBackendQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, cbq.ctx, ent.OpQueryIDs) - if err = cbq.Select(casbackend.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(casbackend.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (cbq *CASBackendQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := cbq.IDs(ctx) +func (_q *CASBackendQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -252,17 +252,17 @@ func (cbq *CASBackendQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (cbq *CASBackendQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, cbq.ctx, ent.OpQueryCount) - if err := cbq.prepareQuery(ctx); err != nil { +func (_q *CASBackendQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, cbq, querierCount[*CASBackendQuery](), cbq.inters) + return withInterceptors[int](ctx, _q, querierCount[*CASBackendQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (cbq *CASBackendQuery) CountX(ctx context.Context) int { - count, err := cbq.Count(ctx) +func (_q *CASBackendQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -270,9 +270,9 @@ func (cbq *CASBackendQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (cbq *CASBackendQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, cbq.ctx, ent.OpQueryExist) - switch _, err := cbq.FirstID(ctx); { +func (_q *CASBackendQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -283,8 +283,8 @@ func (cbq *CASBackendQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (cbq *CASBackendQuery) ExistX(ctx context.Context) bool { - exist, err := cbq.Exist(ctx) +func (_q *CASBackendQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -293,45 +293,45 @@ func (cbq *CASBackendQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the CASBackendQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (cbq *CASBackendQuery) Clone() *CASBackendQuery { - if cbq == nil { +func (_q *CASBackendQuery) Clone() *CASBackendQuery { + if _q == nil { return nil } return &CASBackendQuery{ - config: cbq.config, - ctx: cbq.ctx.Clone(), - order: append([]casbackend.OrderOption{}, cbq.order...), - inters: append([]Interceptor{}, cbq.inters...), - predicates: append([]predicate.CASBackend{}, cbq.predicates...), - withOrganization: cbq.withOrganization.Clone(), - withWorkflowRun: cbq.withWorkflowRun.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]casbackend.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.CASBackend{}, _q.predicates...), + withOrganization: _q.withOrganization.Clone(), + withWorkflowRun: _q.withWorkflowRun.Clone(), // clone intermediate query. - sql: cbq.sql.Clone(), - path: cbq.path, - modifiers: append([]func(*sql.Selector){}, cbq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (cbq *CASBackendQuery) WithOrganization(opts ...func(*OrganizationQuery)) *CASBackendQuery { - query := (&OrganizationClient{config: cbq.config}).Query() +func (_q *CASBackendQuery) WithOrganization(opts ...func(*OrganizationQuery)) *CASBackendQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - cbq.withOrganization = query - return cbq + _q.withOrganization = query + return _q } // WithWorkflowRun tells the query-builder to eager-load the nodes that are connected to // the "workflow_run" edge. The optional arguments are used to configure the query builder of the edge. -func (cbq *CASBackendQuery) WithWorkflowRun(opts ...func(*WorkflowRunQuery)) *CASBackendQuery { - query := (&WorkflowRunClient{config: cbq.config}).Query() +func (_q *CASBackendQuery) WithWorkflowRun(opts ...func(*WorkflowRunQuery)) *CASBackendQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - cbq.withWorkflowRun = query - return cbq + _q.withWorkflowRun = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -348,10 +348,10 @@ func (cbq *CASBackendQuery) WithWorkflowRun(opts ...func(*WorkflowRunQuery)) *CA // GroupBy(casbackend.FieldLocation). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (cbq *CASBackendQuery) GroupBy(field string, fields ...string) *CASBackendGroupBy { - cbq.ctx.Fields = append([]string{field}, fields...) - grbuild := &CASBackendGroupBy{build: cbq} - grbuild.flds = &cbq.ctx.Fields +func (_q *CASBackendQuery) GroupBy(field string, fields ...string) *CASBackendGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &CASBackendGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = casbackend.Label grbuild.scan = grbuild.Scan return grbuild @@ -369,56 +369,56 @@ func (cbq *CASBackendQuery) GroupBy(field string, fields ...string) *CASBackendG // client.CASBackend.Query(). // Select(casbackend.FieldLocation). // Scan(ctx, &v) -func (cbq *CASBackendQuery) Select(fields ...string) *CASBackendSelect { - cbq.ctx.Fields = append(cbq.ctx.Fields, fields...) - sbuild := &CASBackendSelect{CASBackendQuery: cbq} +func (_q *CASBackendQuery) Select(fields ...string) *CASBackendSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &CASBackendSelect{CASBackendQuery: _q} sbuild.label = casbackend.Label - sbuild.flds, sbuild.scan = &cbq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a CASBackendSelect configured with the given aggregations. -func (cbq *CASBackendQuery) Aggregate(fns ...AggregateFunc) *CASBackendSelect { - return cbq.Select().Aggregate(fns...) +func (_q *CASBackendQuery) Aggregate(fns ...AggregateFunc) *CASBackendSelect { + return _q.Select().Aggregate(fns...) } -func (cbq *CASBackendQuery) prepareQuery(ctx context.Context) error { - for _, inter := range cbq.inters { +func (_q *CASBackendQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, cbq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range cbq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !casbackend.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if cbq.path != nil { - prev, err := cbq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - cbq.sql = prev + _q.sql = prev } return nil } -func (cbq *CASBackendQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CASBackend, error) { +func (_q *CASBackendQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CASBackend, error) { var ( nodes = []*CASBackend{} - withFKs = cbq.withFKs - _spec = cbq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [2]bool{ - cbq.withOrganization != nil, - cbq.withWorkflowRun != nil, + _q.withOrganization != nil, + _q.withWorkflowRun != nil, } ) - if cbq.withOrganization != nil { + if _q.withOrganization != nil { withFKs = true } if withFKs { @@ -428,31 +428,31 @@ func (cbq *CASBackendQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return (*CASBackend).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &CASBackend{config: cbq.config} + node := &CASBackend{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(cbq.modifiers) > 0 { - _spec.Modifiers = cbq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, cbq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := cbq.withOrganization; query != nil { - if err := cbq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *CASBackend, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := cbq.withWorkflowRun; query != nil { - if err := cbq.loadWorkflowRun(ctx, query, nodes, + if query := _q.withWorkflowRun; query != nil { + if err := _q.loadWorkflowRun(ctx, query, nodes, func(n *CASBackend) { n.Edges.WorkflowRun = []*WorkflowRun{} }, func(n *CASBackend, e *WorkflowRun) { n.Edges.WorkflowRun = append(n.Edges.WorkflowRun, e) }); err != nil { return nil, err @@ -461,7 +461,7 @@ func (cbq *CASBackendQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return nodes, nil } -func (cbq *CASBackendQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*CASBackend, init func(*CASBackend), assign func(*CASBackend, *Organization)) error { +func (_q *CASBackendQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*CASBackend, init func(*CASBackend), assign func(*CASBackend, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*CASBackend) for i := range nodes { @@ -493,7 +493,7 @@ func (cbq *CASBackendQuery) loadOrganization(ctx context.Context, query *Organiz } return nil } -func (cbq *CASBackendQuery) loadWorkflowRun(ctx context.Context, query *WorkflowRunQuery, nodes []*CASBackend, init func(*CASBackend), assign func(*CASBackend, *WorkflowRun)) error { +func (_q *CASBackendQuery) loadWorkflowRun(ctx context.Context, query *WorkflowRunQuery, nodes []*CASBackend, init func(*CASBackend), assign func(*CASBackend, *WorkflowRun)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*CASBackend) nids := make(map[uuid.UUID]map[*CASBackend]struct{}) @@ -555,27 +555,27 @@ func (cbq *CASBackendQuery) loadWorkflowRun(ctx context.Context, query *Workflow return nil } -func (cbq *CASBackendQuery) sqlCount(ctx context.Context) (int, error) { - _spec := cbq.querySpec() - if len(cbq.modifiers) > 0 { - _spec.Modifiers = cbq.modifiers +func (_q *CASBackendQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = cbq.ctx.Fields - if len(cbq.ctx.Fields) > 0 { - _spec.Unique = cbq.ctx.Unique != nil && *cbq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, cbq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (cbq *CASBackendQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *CASBackendQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(casbackend.Table, casbackend.Columns, sqlgraph.NewFieldSpec(casbackend.FieldID, field.TypeUUID)) - _spec.From = cbq.sql - if unique := cbq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if cbq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := cbq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, casbackend.FieldID) for i := range fields { @@ -584,20 +584,20 @@ func (cbq *CASBackendQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := cbq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := cbq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := cbq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := cbq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -607,36 +607,36 @@ func (cbq *CASBackendQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (cbq *CASBackendQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(cbq.driver.Dialect()) +func (_q *CASBackendQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(casbackend.Table) - columns := cbq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = casbackend.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if cbq.sql != nil { - selector = cbq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if cbq.ctx.Unique != nil && *cbq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range cbq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range cbq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range cbq.order { + for _, p := range _q.order { p(selector) } - if offset := cbq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := cbq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -645,33 +645,33 @@ func (cbq *CASBackendQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (cbq *CASBackendQuery) ForUpdate(opts ...sql.LockOption) *CASBackendQuery { - if cbq.driver.Dialect() == dialect.Postgres { - cbq.Unique(false) +func (_q *CASBackendQuery) ForUpdate(opts ...sql.LockOption) *CASBackendQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - cbq.modifiers = append(cbq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return cbq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (cbq *CASBackendQuery) ForShare(opts ...sql.LockOption) *CASBackendQuery { - if cbq.driver.Dialect() == dialect.Postgres { - cbq.Unique(false) +func (_q *CASBackendQuery) ForShare(opts ...sql.LockOption) *CASBackendQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - cbq.modifiers = append(cbq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return cbq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (cbq *CASBackendQuery) Modify(modifiers ...func(s *sql.Selector)) *CASBackendSelect { - cbq.modifiers = append(cbq.modifiers, modifiers...) - return cbq.Select() +func (_q *CASBackendQuery) Modify(modifiers ...func(s *sql.Selector)) *CASBackendSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // CASBackendGroupBy is the group-by builder for CASBackend entities. @@ -681,41 +681,41 @@ type CASBackendGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (cbgb *CASBackendGroupBy) Aggregate(fns ...AggregateFunc) *CASBackendGroupBy { - cbgb.fns = append(cbgb.fns, fns...) - return cbgb +func (_g *CASBackendGroupBy) Aggregate(fns ...AggregateFunc) *CASBackendGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (cbgb *CASBackendGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cbgb.build.ctx, ent.OpQueryGroupBy) - if err := cbgb.build.prepareQuery(ctx); err != nil { +func (_g *CASBackendGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*CASBackendQuery, *CASBackendGroupBy](ctx, cbgb.build, cbgb, cbgb.build.inters, v) + return scanWithInterceptors[*CASBackendQuery, *CASBackendGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (cbgb *CASBackendGroupBy) sqlScan(ctx context.Context, root *CASBackendQuery, v any) error { +func (_g *CASBackendGroupBy) sqlScan(ctx context.Context, root *CASBackendQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(cbgb.fns)) - for _, fn := range cbgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*cbgb.flds)+len(cbgb.fns)) - for _, f := range *cbgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*cbgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cbgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -729,27 +729,27 @@ type CASBackendSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (cbs *CASBackendSelect) Aggregate(fns ...AggregateFunc) *CASBackendSelect { - cbs.fns = append(cbs.fns, fns...) - return cbs +func (_s *CASBackendSelect) Aggregate(fns ...AggregateFunc) *CASBackendSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (cbs *CASBackendSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cbs.ctx, ent.OpQuerySelect) - if err := cbs.prepareQuery(ctx); err != nil { +func (_s *CASBackendSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*CASBackendQuery, *CASBackendSelect](ctx, cbs.CASBackendQuery, cbs, cbs.inters, v) + return scanWithInterceptors[*CASBackendQuery, *CASBackendSelect](ctx, _s.CASBackendQuery, _s, _s.inters, v) } -func (cbs *CASBackendSelect) sqlScan(ctx context.Context, root *CASBackendQuery, v any) error { +func (_s *CASBackendSelect) sqlScan(ctx context.Context, root *CASBackendQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(cbs.fns)) - for _, fn := range cbs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*cbs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -757,7 +757,7 @@ func (cbs *CASBackendSelect) sqlScan(ctx context.Context, root *CASBackendQuery, } rows := &sql.Rows{} query, args := selector.Query() - if err := cbs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -765,7 +765,7 @@ func (cbs *CASBackendSelect) sqlScan(ctx context.Context, root *CASBackendQuery, } // Modify adds a query modifier for attaching custom logic to queries. -func (cbs *CASBackendSelect) Modify(modifiers ...func(s *sql.Selector)) *CASBackendSelect { - cbs.modifiers = append(cbs.modifiers, modifiers...) - return cbs +func (_s *CASBackendSelect) Modify(modifiers ...func(s *sql.Selector)) *CASBackendSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/casbackend_update.go b/app/controlplane/pkg/data/ent/casbackend_update.go index c39097819..c5d35ab00 100644 --- a/app/controlplane/pkg/data/ent/casbackend_update.go +++ b/app/controlplane/pkg/data/ent/casbackend_update.go @@ -28,221 +28,221 @@ type CASBackendUpdate struct { } // Where appends a list predicates to the CASBackendUpdate builder. -func (cbu *CASBackendUpdate) Where(ps ...predicate.CASBackend) *CASBackendUpdate { - cbu.mutation.Where(ps...) - return cbu +func (_u *CASBackendUpdate) Where(ps ...predicate.CASBackend) *CASBackendUpdate { + _u.mutation.Where(ps...) + return _u } // SetDescription sets the "description" field. -func (cbu *CASBackendUpdate) SetDescription(s string) *CASBackendUpdate { - cbu.mutation.SetDescription(s) - return cbu +func (_u *CASBackendUpdate) SetDescription(v string) *CASBackendUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableDescription(s *string) *CASBackendUpdate { - if s != nil { - cbu.SetDescription(*s) +func (_u *CASBackendUpdate) SetNillableDescription(v *string) *CASBackendUpdate { + if v != nil { + _u.SetDescription(*v) } - return cbu + return _u } // ClearDescription clears the value of the "description" field. -func (cbu *CASBackendUpdate) ClearDescription() *CASBackendUpdate { - cbu.mutation.ClearDescription() - return cbu +func (_u *CASBackendUpdate) ClearDescription() *CASBackendUpdate { + _u.mutation.ClearDescription() + return _u } // SetSecretName sets the "secret_name" field. -func (cbu *CASBackendUpdate) SetSecretName(s string) *CASBackendUpdate { - cbu.mutation.SetSecretName(s) - return cbu +func (_u *CASBackendUpdate) SetSecretName(v string) *CASBackendUpdate { + _u.mutation.SetSecretName(v) + return _u } // SetNillableSecretName sets the "secret_name" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableSecretName(s *string) *CASBackendUpdate { - if s != nil { - cbu.SetSecretName(*s) +func (_u *CASBackendUpdate) SetNillableSecretName(v *string) *CASBackendUpdate { + if v != nil { + _u.SetSecretName(*v) } - return cbu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (cbu *CASBackendUpdate) SetUpdatedAt(t time.Time) *CASBackendUpdate { - cbu.mutation.SetUpdatedAt(t) - return cbu +func (_u *CASBackendUpdate) SetUpdatedAt(v time.Time) *CASBackendUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetValidationStatus sets the "validation_status" field. -func (cbu *CASBackendUpdate) SetValidationStatus(bbvs biz.CASBackendValidationStatus) *CASBackendUpdate { - cbu.mutation.SetValidationStatus(bbvs) - return cbu +func (_u *CASBackendUpdate) SetValidationStatus(v biz.CASBackendValidationStatus) *CASBackendUpdate { + _u.mutation.SetValidationStatus(v) + return _u } // SetNillableValidationStatus sets the "validation_status" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableValidationStatus(bbvs *biz.CASBackendValidationStatus) *CASBackendUpdate { - if bbvs != nil { - cbu.SetValidationStatus(*bbvs) +func (_u *CASBackendUpdate) SetNillableValidationStatus(v *biz.CASBackendValidationStatus) *CASBackendUpdate { + if v != nil { + _u.SetValidationStatus(*v) } - return cbu + return _u } // SetValidationError sets the "validation_error" field. -func (cbu *CASBackendUpdate) SetValidationError(s string) *CASBackendUpdate { - cbu.mutation.SetValidationError(s) - return cbu +func (_u *CASBackendUpdate) SetValidationError(v string) *CASBackendUpdate { + _u.mutation.SetValidationError(v) + return _u } // SetNillableValidationError sets the "validation_error" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableValidationError(s *string) *CASBackendUpdate { - if s != nil { - cbu.SetValidationError(*s) +func (_u *CASBackendUpdate) SetNillableValidationError(v *string) *CASBackendUpdate { + if v != nil { + _u.SetValidationError(*v) } - return cbu + return _u } // ClearValidationError clears the value of the "validation_error" field. -func (cbu *CASBackendUpdate) ClearValidationError() *CASBackendUpdate { - cbu.mutation.ClearValidationError() - return cbu +func (_u *CASBackendUpdate) ClearValidationError() *CASBackendUpdate { + _u.mutation.ClearValidationError() + return _u } // SetValidatedAt sets the "validated_at" field. -func (cbu *CASBackendUpdate) SetValidatedAt(t time.Time) *CASBackendUpdate { - cbu.mutation.SetValidatedAt(t) - return cbu +func (_u *CASBackendUpdate) SetValidatedAt(v time.Time) *CASBackendUpdate { + _u.mutation.SetValidatedAt(v) + return _u } // SetNillableValidatedAt sets the "validated_at" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableValidatedAt(t *time.Time) *CASBackendUpdate { - if t != nil { - cbu.SetValidatedAt(*t) +func (_u *CASBackendUpdate) SetNillableValidatedAt(v *time.Time) *CASBackendUpdate { + if v != nil { + _u.SetValidatedAt(*v) } - return cbu + return _u } // SetDefault sets the "default" field. -func (cbu *CASBackendUpdate) SetDefault(b bool) *CASBackendUpdate { - cbu.mutation.SetDefault(b) - return cbu +func (_u *CASBackendUpdate) SetDefault(v bool) *CASBackendUpdate { + _u.mutation.SetDefault(v) + return _u } // SetNillableDefault sets the "default" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableDefault(b *bool) *CASBackendUpdate { - if b != nil { - cbu.SetDefault(*b) +func (_u *CASBackendUpdate) SetNillableDefault(v *bool) *CASBackendUpdate { + if v != nil { + _u.SetDefault(*v) } - return cbu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (cbu *CASBackendUpdate) SetDeletedAt(t time.Time) *CASBackendUpdate { - cbu.mutation.SetDeletedAt(t) - return cbu +func (_u *CASBackendUpdate) SetDeletedAt(v time.Time) *CASBackendUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableDeletedAt(t *time.Time) *CASBackendUpdate { - if t != nil { - cbu.SetDeletedAt(*t) +func (_u *CASBackendUpdate) SetNillableDeletedAt(v *time.Time) *CASBackendUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return cbu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (cbu *CASBackendUpdate) ClearDeletedAt() *CASBackendUpdate { - cbu.mutation.ClearDeletedAt() - return cbu +func (_u *CASBackendUpdate) ClearDeletedAt() *CASBackendUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetMaxBlobSizeBytes sets the "max_blob_size_bytes" field. -func (cbu *CASBackendUpdate) SetMaxBlobSizeBytes(i int64) *CASBackendUpdate { - cbu.mutation.ResetMaxBlobSizeBytes() - cbu.mutation.SetMaxBlobSizeBytes(i) - return cbu +func (_u *CASBackendUpdate) SetMaxBlobSizeBytes(v int64) *CASBackendUpdate { + _u.mutation.ResetMaxBlobSizeBytes() + _u.mutation.SetMaxBlobSizeBytes(v) + return _u } // SetNillableMaxBlobSizeBytes sets the "max_blob_size_bytes" field if the given value is not nil. -func (cbu *CASBackendUpdate) SetNillableMaxBlobSizeBytes(i *int64) *CASBackendUpdate { - if i != nil { - cbu.SetMaxBlobSizeBytes(*i) +func (_u *CASBackendUpdate) SetNillableMaxBlobSizeBytes(v *int64) *CASBackendUpdate { + if v != nil { + _u.SetMaxBlobSizeBytes(*v) } - return cbu + return _u } -// AddMaxBlobSizeBytes adds i to the "max_blob_size_bytes" field. -func (cbu *CASBackendUpdate) AddMaxBlobSizeBytes(i int64) *CASBackendUpdate { - cbu.mutation.AddMaxBlobSizeBytes(i) - return cbu +// AddMaxBlobSizeBytes adds value to the "max_blob_size_bytes" field. +func (_u *CASBackendUpdate) AddMaxBlobSizeBytes(v int64) *CASBackendUpdate { + _u.mutation.AddMaxBlobSizeBytes(v) + return _u } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (cbu *CASBackendUpdate) SetOrganizationID(id uuid.UUID) *CASBackendUpdate { - cbu.mutation.SetOrganizationID(id) - return cbu +func (_u *CASBackendUpdate) SetOrganizationID(id uuid.UUID) *CASBackendUpdate { + _u.mutation.SetOrganizationID(id) + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (cbu *CASBackendUpdate) SetOrganization(o *Organization) *CASBackendUpdate { - return cbu.SetOrganizationID(o.ID) +func (_u *CASBackendUpdate) SetOrganization(v *Organization) *CASBackendUpdate { + return _u.SetOrganizationID(v.ID) } // AddWorkflowRunIDs adds the "workflow_run" edge to the WorkflowRun entity by IDs. -func (cbu *CASBackendUpdate) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdate { - cbu.mutation.AddWorkflowRunIDs(ids...) - return cbu +func (_u *CASBackendUpdate) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdate { + _u.mutation.AddWorkflowRunIDs(ids...) + return _u } // AddWorkflowRun adds the "workflow_run" edges to the WorkflowRun entity. -func (cbu *CASBackendUpdate) AddWorkflowRun(w ...*WorkflowRun) *CASBackendUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *CASBackendUpdate) AddWorkflowRun(v ...*WorkflowRun) *CASBackendUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return cbu.AddWorkflowRunIDs(ids...) + return _u.AddWorkflowRunIDs(ids...) } // Mutation returns the CASBackendMutation object of the builder. -func (cbu *CASBackendUpdate) Mutation() *CASBackendMutation { - return cbu.mutation +func (_u *CASBackendUpdate) Mutation() *CASBackendMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (cbu *CASBackendUpdate) ClearOrganization() *CASBackendUpdate { - cbu.mutation.ClearOrganization() - return cbu +func (_u *CASBackendUpdate) ClearOrganization() *CASBackendUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearWorkflowRun clears all "workflow_run" edges to the WorkflowRun entity. -func (cbu *CASBackendUpdate) ClearWorkflowRun() *CASBackendUpdate { - cbu.mutation.ClearWorkflowRun() - return cbu +func (_u *CASBackendUpdate) ClearWorkflowRun() *CASBackendUpdate { + _u.mutation.ClearWorkflowRun() + return _u } // RemoveWorkflowRunIDs removes the "workflow_run" edge to WorkflowRun entities by IDs. -func (cbu *CASBackendUpdate) RemoveWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdate { - cbu.mutation.RemoveWorkflowRunIDs(ids...) - return cbu +func (_u *CASBackendUpdate) RemoveWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdate { + _u.mutation.RemoveWorkflowRunIDs(ids...) + return _u } // RemoveWorkflowRun removes "workflow_run" edges to WorkflowRun entities. -func (cbu *CASBackendUpdate) RemoveWorkflowRun(w ...*WorkflowRun) *CASBackendUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *CASBackendUpdate) RemoveWorkflowRun(v ...*WorkflowRun) *CASBackendUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return cbu.RemoveWorkflowRunIDs(ids...) + return _u.RemoveWorkflowRunIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (cbu *CASBackendUpdate) Save(ctx context.Context) (int, error) { - cbu.defaults() - return withHooks(ctx, cbu.sqlSave, cbu.mutation, cbu.hooks) +func (_u *CASBackendUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (cbu *CASBackendUpdate) SaveX(ctx context.Context) int { - affected, err := cbu.Save(ctx) +func (_u *CASBackendUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -250,97 +250,97 @@ func (cbu *CASBackendUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (cbu *CASBackendUpdate) Exec(ctx context.Context) error { - _, err := cbu.Save(ctx) +func (_u *CASBackendUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cbu *CASBackendUpdate) ExecX(ctx context.Context) { - if err := cbu.Exec(ctx); err != nil { +func (_u *CASBackendUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (cbu *CASBackendUpdate) defaults() { - if _, ok := cbu.mutation.UpdatedAt(); !ok { +func (_u *CASBackendUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { v := casbackend.UpdateDefaultUpdatedAt() - cbu.mutation.SetUpdatedAt(v) + _u.mutation.SetUpdatedAt(v) } } // check runs all checks and user-defined validators on the builder. -func (cbu *CASBackendUpdate) check() error { - if v, ok := cbu.mutation.ValidationStatus(); ok { +func (_u *CASBackendUpdate) check() error { + if v, ok := _u.mutation.ValidationStatus(); ok { if err := casbackend.ValidationStatusValidator(v); err != nil { return &ValidationError{Name: "validation_status", err: fmt.Errorf(`ent: validator failed for field "CASBackend.validation_status": %w`, err)} } } - if cbu.mutation.OrganizationCleared() && len(cbu.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASBackend.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (cbu *CASBackendUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASBackendUpdate { - cbu.modifiers = append(cbu.modifiers, modifiers...) - return cbu +func (_u *CASBackendUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASBackendUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := cbu.check(); err != nil { - return n, err +func (_u *CASBackendUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(casbackend.Table, casbackend.Columns, sqlgraph.NewFieldSpec(casbackend.FieldID, field.TypeUUID)) - if ps := cbu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := cbu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(casbackend.FieldDescription, field.TypeString, value) } - if cbu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(casbackend.FieldDescription, field.TypeString) } - if value, ok := cbu.mutation.SecretName(); ok { + if value, ok := _u.mutation.SecretName(); ok { _spec.SetField(casbackend.FieldSecretName, field.TypeString, value) } - if value, ok := cbu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(casbackend.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := cbu.mutation.ValidationStatus(); ok { + if value, ok := _u.mutation.ValidationStatus(); ok { _spec.SetField(casbackend.FieldValidationStatus, field.TypeEnum, value) } - if value, ok := cbu.mutation.ValidationError(); ok { + if value, ok := _u.mutation.ValidationError(); ok { _spec.SetField(casbackend.FieldValidationError, field.TypeString, value) } - if cbu.mutation.ValidationErrorCleared() { + if _u.mutation.ValidationErrorCleared() { _spec.ClearField(casbackend.FieldValidationError, field.TypeString) } - if value, ok := cbu.mutation.ValidatedAt(); ok { + if value, ok := _u.mutation.ValidatedAt(); ok { _spec.SetField(casbackend.FieldValidatedAt, field.TypeTime, value) } - if value, ok := cbu.mutation.Default(); ok { + if value, ok := _u.mutation.Default(); ok { _spec.SetField(casbackend.FieldDefault, field.TypeBool, value) } - if value, ok := cbu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(casbackend.FieldDeletedAt, field.TypeTime, value) } - if cbu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(casbackend.FieldDeletedAt, field.TypeTime) } - if value, ok := cbu.mutation.MaxBlobSizeBytes(); ok { + if value, ok := _u.mutation.MaxBlobSizeBytes(); ok { _spec.SetField(casbackend.FieldMaxBlobSizeBytes, field.TypeInt64, value) } - if value, ok := cbu.mutation.AddedMaxBlobSizeBytes(); ok { + if value, ok := _u.mutation.AddedMaxBlobSizeBytes(); ok { _spec.AddField(casbackend.FieldMaxBlobSizeBytes, field.TypeInt64, value) } - if cbu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -353,7 +353,7 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -369,7 +369,7 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if cbu.mutation.WorkflowRunCleared() { + if _u.mutation.WorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -382,7 +382,7 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbu.mutation.RemovedWorkflowRunIDs(); len(nodes) > 0 && !cbu.mutation.WorkflowRunCleared() { + if nodes := _u.mutation.RemovedWorkflowRunIDs(); len(nodes) > 0 && !_u.mutation.WorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -398,7 +398,7 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbu.mutation.WorkflowRunIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -414,8 +414,8 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(cbu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, cbu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{casbackend.Label} } else if sqlgraph.IsConstraintError(err) { @@ -423,8 +423,8 @@ func (cbu *CASBackendUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - cbu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // CASBackendUpdateOne is the builder for updating a single CASBackend entity. @@ -437,228 +437,228 @@ type CASBackendUpdateOne struct { } // SetDescription sets the "description" field. -func (cbuo *CASBackendUpdateOne) SetDescription(s string) *CASBackendUpdateOne { - cbuo.mutation.SetDescription(s) - return cbuo +func (_u *CASBackendUpdateOne) SetDescription(v string) *CASBackendUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableDescription(s *string) *CASBackendUpdateOne { - if s != nil { - cbuo.SetDescription(*s) +func (_u *CASBackendUpdateOne) SetNillableDescription(v *string) *CASBackendUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return cbuo + return _u } // ClearDescription clears the value of the "description" field. -func (cbuo *CASBackendUpdateOne) ClearDescription() *CASBackendUpdateOne { - cbuo.mutation.ClearDescription() - return cbuo +func (_u *CASBackendUpdateOne) ClearDescription() *CASBackendUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetSecretName sets the "secret_name" field. -func (cbuo *CASBackendUpdateOne) SetSecretName(s string) *CASBackendUpdateOne { - cbuo.mutation.SetSecretName(s) - return cbuo +func (_u *CASBackendUpdateOne) SetSecretName(v string) *CASBackendUpdateOne { + _u.mutation.SetSecretName(v) + return _u } // SetNillableSecretName sets the "secret_name" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableSecretName(s *string) *CASBackendUpdateOne { - if s != nil { - cbuo.SetSecretName(*s) +func (_u *CASBackendUpdateOne) SetNillableSecretName(v *string) *CASBackendUpdateOne { + if v != nil { + _u.SetSecretName(*v) } - return cbuo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (cbuo *CASBackendUpdateOne) SetUpdatedAt(t time.Time) *CASBackendUpdateOne { - cbuo.mutation.SetUpdatedAt(t) - return cbuo +func (_u *CASBackendUpdateOne) SetUpdatedAt(v time.Time) *CASBackendUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetValidationStatus sets the "validation_status" field. -func (cbuo *CASBackendUpdateOne) SetValidationStatus(bbvs biz.CASBackendValidationStatus) *CASBackendUpdateOne { - cbuo.mutation.SetValidationStatus(bbvs) - return cbuo +func (_u *CASBackendUpdateOne) SetValidationStatus(v biz.CASBackendValidationStatus) *CASBackendUpdateOne { + _u.mutation.SetValidationStatus(v) + return _u } // SetNillableValidationStatus sets the "validation_status" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableValidationStatus(bbvs *biz.CASBackendValidationStatus) *CASBackendUpdateOne { - if bbvs != nil { - cbuo.SetValidationStatus(*bbvs) +func (_u *CASBackendUpdateOne) SetNillableValidationStatus(v *biz.CASBackendValidationStatus) *CASBackendUpdateOne { + if v != nil { + _u.SetValidationStatus(*v) } - return cbuo + return _u } // SetValidationError sets the "validation_error" field. -func (cbuo *CASBackendUpdateOne) SetValidationError(s string) *CASBackendUpdateOne { - cbuo.mutation.SetValidationError(s) - return cbuo +func (_u *CASBackendUpdateOne) SetValidationError(v string) *CASBackendUpdateOne { + _u.mutation.SetValidationError(v) + return _u } // SetNillableValidationError sets the "validation_error" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableValidationError(s *string) *CASBackendUpdateOne { - if s != nil { - cbuo.SetValidationError(*s) +func (_u *CASBackendUpdateOne) SetNillableValidationError(v *string) *CASBackendUpdateOne { + if v != nil { + _u.SetValidationError(*v) } - return cbuo + return _u } // ClearValidationError clears the value of the "validation_error" field. -func (cbuo *CASBackendUpdateOne) ClearValidationError() *CASBackendUpdateOne { - cbuo.mutation.ClearValidationError() - return cbuo +func (_u *CASBackendUpdateOne) ClearValidationError() *CASBackendUpdateOne { + _u.mutation.ClearValidationError() + return _u } // SetValidatedAt sets the "validated_at" field. -func (cbuo *CASBackendUpdateOne) SetValidatedAt(t time.Time) *CASBackendUpdateOne { - cbuo.mutation.SetValidatedAt(t) - return cbuo +func (_u *CASBackendUpdateOne) SetValidatedAt(v time.Time) *CASBackendUpdateOne { + _u.mutation.SetValidatedAt(v) + return _u } // SetNillableValidatedAt sets the "validated_at" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableValidatedAt(t *time.Time) *CASBackendUpdateOne { - if t != nil { - cbuo.SetValidatedAt(*t) +func (_u *CASBackendUpdateOne) SetNillableValidatedAt(v *time.Time) *CASBackendUpdateOne { + if v != nil { + _u.SetValidatedAt(*v) } - return cbuo + return _u } // SetDefault sets the "default" field. -func (cbuo *CASBackendUpdateOne) SetDefault(b bool) *CASBackendUpdateOne { - cbuo.mutation.SetDefault(b) - return cbuo +func (_u *CASBackendUpdateOne) SetDefault(v bool) *CASBackendUpdateOne { + _u.mutation.SetDefault(v) + return _u } // SetNillableDefault sets the "default" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableDefault(b *bool) *CASBackendUpdateOne { - if b != nil { - cbuo.SetDefault(*b) +func (_u *CASBackendUpdateOne) SetNillableDefault(v *bool) *CASBackendUpdateOne { + if v != nil { + _u.SetDefault(*v) } - return cbuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (cbuo *CASBackendUpdateOne) SetDeletedAt(t time.Time) *CASBackendUpdateOne { - cbuo.mutation.SetDeletedAt(t) - return cbuo +func (_u *CASBackendUpdateOne) SetDeletedAt(v time.Time) *CASBackendUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableDeletedAt(t *time.Time) *CASBackendUpdateOne { - if t != nil { - cbuo.SetDeletedAt(*t) +func (_u *CASBackendUpdateOne) SetNillableDeletedAt(v *time.Time) *CASBackendUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return cbuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (cbuo *CASBackendUpdateOne) ClearDeletedAt() *CASBackendUpdateOne { - cbuo.mutation.ClearDeletedAt() - return cbuo +func (_u *CASBackendUpdateOne) ClearDeletedAt() *CASBackendUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetMaxBlobSizeBytes sets the "max_blob_size_bytes" field. -func (cbuo *CASBackendUpdateOne) SetMaxBlobSizeBytes(i int64) *CASBackendUpdateOne { - cbuo.mutation.ResetMaxBlobSizeBytes() - cbuo.mutation.SetMaxBlobSizeBytes(i) - return cbuo +func (_u *CASBackendUpdateOne) SetMaxBlobSizeBytes(v int64) *CASBackendUpdateOne { + _u.mutation.ResetMaxBlobSizeBytes() + _u.mutation.SetMaxBlobSizeBytes(v) + return _u } // SetNillableMaxBlobSizeBytes sets the "max_blob_size_bytes" field if the given value is not nil. -func (cbuo *CASBackendUpdateOne) SetNillableMaxBlobSizeBytes(i *int64) *CASBackendUpdateOne { - if i != nil { - cbuo.SetMaxBlobSizeBytes(*i) +func (_u *CASBackendUpdateOne) SetNillableMaxBlobSizeBytes(v *int64) *CASBackendUpdateOne { + if v != nil { + _u.SetMaxBlobSizeBytes(*v) } - return cbuo + return _u } -// AddMaxBlobSizeBytes adds i to the "max_blob_size_bytes" field. -func (cbuo *CASBackendUpdateOne) AddMaxBlobSizeBytes(i int64) *CASBackendUpdateOne { - cbuo.mutation.AddMaxBlobSizeBytes(i) - return cbuo +// AddMaxBlobSizeBytes adds value to the "max_blob_size_bytes" field. +func (_u *CASBackendUpdateOne) AddMaxBlobSizeBytes(v int64) *CASBackendUpdateOne { + _u.mutation.AddMaxBlobSizeBytes(v) + return _u } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (cbuo *CASBackendUpdateOne) SetOrganizationID(id uuid.UUID) *CASBackendUpdateOne { - cbuo.mutation.SetOrganizationID(id) - return cbuo +func (_u *CASBackendUpdateOne) SetOrganizationID(id uuid.UUID) *CASBackendUpdateOne { + _u.mutation.SetOrganizationID(id) + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (cbuo *CASBackendUpdateOne) SetOrganization(o *Organization) *CASBackendUpdateOne { - return cbuo.SetOrganizationID(o.ID) +func (_u *CASBackendUpdateOne) SetOrganization(v *Organization) *CASBackendUpdateOne { + return _u.SetOrganizationID(v.ID) } // AddWorkflowRunIDs adds the "workflow_run" edge to the WorkflowRun entity by IDs. -func (cbuo *CASBackendUpdateOne) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdateOne { - cbuo.mutation.AddWorkflowRunIDs(ids...) - return cbuo +func (_u *CASBackendUpdateOne) AddWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdateOne { + _u.mutation.AddWorkflowRunIDs(ids...) + return _u } // AddWorkflowRun adds the "workflow_run" edges to the WorkflowRun entity. -func (cbuo *CASBackendUpdateOne) AddWorkflowRun(w ...*WorkflowRun) *CASBackendUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *CASBackendUpdateOne) AddWorkflowRun(v ...*WorkflowRun) *CASBackendUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return cbuo.AddWorkflowRunIDs(ids...) + return _u.AddWorkflowRunIDs(ids...) } // Mutation returns the CASBackendMutation object of the builder. -func (cbuo *CASBackendUpdateOne) Mutation() *CASBackendMutation { - return cbuo.mutation +func (_u *CASBackendUpdateOne) Mutation() *CASBackendMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (cbuo *CASBackendUpdateOne) ClearOrganization() *CASBackendUpdateOne { - cbuo.mutation.ClearOrganization() - return cbuo +func (_u *CASBackendUpdateOne) ClearOrganization() *CASBackendUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearWorkflowRun clears all "workflow_run" edges to the WorkflowRun entity. -func (cbuo *CASBackendUpdateOne) ClearWorkflowRun() *CASBackendUpdateOne { - cbuo.mutation.ClearWorkflowRun() - return cbuo +func (_u *CASBackendUpdateOne) ClearWorkflowRun() *CASBackendUpdateOne { + _u.mutation.ClearWorkflowRun() + return _u } // RemoveWorkflowRunIDs removes the "workflow_run" edge to WorkflowRun entities by IDs. -func (cbuo *CASBackendUpdateOne) RemoveWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdateOne { - cbuo.mutation.RemoveWorkflowRunIDs(ids...) - return cbuo +func (_u *CASBackendUpdateOne) RemoveWorkflowRunIDs(ids ...uuid.UUID) *CASBackendUpdateOne { + _u.mutation.RemoveWorkflowRunIDs(ids...) + return _u } // RemoveWorkflowRun removes "workflow_run" edges to WorkflowRun entities. -func (cbuo *CASBackendUpdateOne) RemoveWorkflowRun(w ...*WorkflowRun) *CASBackendUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *CASBackendUpdateOne) RemoveWorkflowRun(v ...*WorkflowRun) *CASBackendUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return cbuo.RemoveWorkflowRunIDs(ids...) + return _u.RemoveWorkflowRunIDs(ids...) } // Where appends a list predicates to the CASBackendUpdate builder. -func (cbuo *CASBackendUpdateOne) Where(ps ...predicate.CASBackend) *CASBackendUpdateOne { - cbuo.mutation.Where(ps...) - return cbuo +func (_u *CASBackendUpdateOne) Where(ps ...predicate.CASBackend) *CASBackendUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (cbuo *CASBackendUpdateOne) Select(field string, fields ...string) *CASBackendUpdateOne { - cbuo.fields = append([]string{field}, fields...) - return cbuo +func (_u *CASBackendUpdateOne) Select(field string, fields ...string) *CASBackendUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated CASBackend entity. -func (cbuo *CASBackendUpdateOne) Save(ctx context.Context) (*CASBackend, error) { - cbuo.defaults() - return withHooks(ctx, cbuo.sqlSave, cbuo.mutation, cbuo.hooks) +func (_u *CASBackendUpdateOne) Save(ctx context.Context) (*CASBackend, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (cbuo *CASBackendUpdateOne) SaveX(ctx context.Context) *CASBackend { - node, err := cbuo.Save(ctx) +func (_u *CASBackendUpdateOne) SaveX(ctx context.Context) *CASBackend { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -666,56 +666,56 @@ func (cbuo *CASBackendUpdateOne) SaveX(ctx context.Context) *CASBackend { } // Exec executes the query on the entity. -func (cbuo *CASBackendUpdateOne) Exec(ctx context.Context) error { - _, err := cbuo.Save(ctx) +func (_u *CASBackendUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cbuo *CASBackendUpdateOne) ExecX(ctx context.Context) { - if err := cbuo.Exec(ctx); err != nil { +func (_u *CASBackendUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (cbuo *CASBackendUpdateOne) defaults() { - if _, ok := cbuo.mutation.UpdatedAt(); !ok { +func (_u *CASBackendUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { v := casbackend.UpdateDefaultUpdatedAt() - cbuo.mutation.SetUpdatedAt(v) + _u.mutation.SetUpdatedAt(v) } } // check runs all checks and user-defined validators on the builder. -func (cbuo *CASBackendUpdateOne) check() error { - if v, ok := cbuo.mutation.ValidationStatus(); ok { +func (_u *CASBackendUpdateOne) check() error { + if v, ok := _u.mutation.ValidationStatus(); ok { if err := casbackend.ValidationStatusValidator(v); err != nil { return &ValidationError{Name: "validation_status", err: fmt.Errorf(`ent: validator failed for field "CASBackend.validation_status": %w`, err)} } } - if cbuo.mutation.OrganizationCleared() && len(cbuo.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASBackend.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (cbuo *CASBackendUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASBackendUpdateOne { - cbuo.modifiers = append(cbuo.modifiers, modifiers...) - return cbuo +func (_u *CASBackendUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASBackendUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend, err error) { - if err := cbuo.check(); err != nil { +func (_u *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(casbackend.Table, casbackend.Columns, sqlgraph.NewFieldSpec(casbackend.FieldID, field.TypeUUID)) - id, ok := cbuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "CASBackend.id" for update`)} } _spec.Node.ID.Value = id - if fields := cbuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, casbackend.FieldID) for _, f := range fields { @@ -727,53 +727,53 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } } } - if ps := cbuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := cbuo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(casbackend.FieldDescription, field.TypeString, value) } - if cbuo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(casbackend.FieldDescription, field.TypeString) } - if value, ok := cbuo.mutation.SecretName(); ok { + if value, ok := _u.mutation.SecretName(); ok { _spec.SetField(casbackend.FieldSecretName, field.TypeString, value) } - if value, ok := cbuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(casbackend.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := cbuo.mutation.ValidationStatus(); ok { + if value, ok := _u.mutation.ValidationStatus(); ok { _spec.SetField(casbackend.FieldValidationStatus, field.TypeEnum, value) } - if value, ok := cbuo.mutation.ValidationError(); ok { + if value, ok := _u.mutation.ValidationError(); ok { _spec.SetField(casbackend.FieldValidationError, field.TypeString, value) } - if cbuo.mutation.ValidationErrorCleared() { + if _u.mutation.ValidationErrorCleared() { _spec.ClearField(casbackend.FieldValidationError, field.TypeString) } - if value, ok := cbuo.mutation.ValidatedAt(); ok { + if value, ok := _u.mutation.ValidatedAt(); ok { _spec.SetField(casbackend.FieldValidatedAt, field.TypeTime, value) } - if value, ok := cbuo.mutation.Default(); ok { + if value, ok := _u.mutation.Default(); ok { _spec.SetField(casbackend.FieldDefault, field.TypeBool, value) } - if value, ok := cbuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(casbackend.FieldDeletedAt, field.TypeTime, value) } - if cbuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(casbackend.FieldDeletedAt, field.TypeTime) } - if value, ok := cbuo.mutation.MaxBlobSizeBytes(); ok { + if value, ok := _u.mutation.MaxBlobSizeBytes(); ok { _spec.SetField(casbackend.FieldMaxBlobSizeBytes, field.TypeInt64, value) } - if value, ok := cbuo.mutation.AddedMaxBlobSizeBytes(); ok { + if value, ok := _u.mutation.AddedMaxBlobSizeBytes(); ok { _spec.AddField(casbackend.FieldMaxBlobSizeBytes, field.TypeInt64, value) } - if cbuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -786,7 +786,7 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -802,7 +802,7 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if cbuo.mutation.WorkflowRunCleared() { + if _u.mutation.WorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -815,7 +815,7 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbuo.mutation.RemovedWorkflowRunIDs(); len(nodes) > 0 && !cbuo.mutation.WorkflowRunCleared() { + if nodes := _u.mutation.RemovedWorkflowRunIDs(); len(nodes) > 0 && !_u.mutation.WorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -831,7 +831,7 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := cbuo.mutation.WorkflowRunIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -847,11 +847,11 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(cbuo.modifiers...) - _node = &CASBackend{config: cbuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &CASBackend{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, cbuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{casbackend.Label} } else if sqlgraph.IsConstraintError(err) { @@ -859,6 +859,6 @@ func (cbuo *CASBackendUpdateOne) sqlSave(ctx context.Context) (_node *CASBackend } return nil, err } - cbuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/casmapping.go b/app/controlplane/pkg/data/ent/casmapping.go index 34beed33b..cc5b2e253 100644 --- a/app/controlplane/pkg/data/ent/casmapping.go +++ b/app/controlplane/pkg/data/ent/casmapping.go @@ -106,7 +106,7 @@ func (*CASMapping) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the CASMapping fields. -func (cm *CASMapping) assignValues(columns []string, values []any) error { +func (_m *CASMapping) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -116,47 +116,47 @@ func (cm *CASMapping) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - cm.ID = *value + _m.ID = *value } case casmapping.FieldDigest: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field digest", values[i]) } else if value.Valid { - cm.Digest = value.String + _m.Digest = value.String } case casmapping.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - cm.CreatedAt = value.Time + _m.CreatedAt = value.Time } case casmapping.FieldWorkflowRunID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field workflow_run_id", values[i]) } else if value != nil { - cm.WorkflowRunID = *value + _m.WorkflowRunID = *value } case casmapping.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - cm.OrganizationID = *value + _m.OrganizationID = *value } case casmapping.FieldProjectID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field project_id", values[i]) } else if value != nil { - cm.ProjectID = *value + _m.ProjectID = *value } case casmapping.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field cas_mapping_cas_backend", values[i]) } else if value.Valid { - cm.cas_mapping_cas_backend = new(uuid.UUID) - *cm.cas_mapping_cas_backend = *value.S.(*uuid.UUID) + _m.cas_mapping_cas_backend = new(uuid.UUID) + *_m.cas_mapping_cas_backend = *value.S.(*uuid.UUID) } default: - cm.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -164,62 +164,62 @@ func (cm *CASMapping) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the CASMapping. // This includes values selected through modifiers, order, etc. -func (cm *CASMapping) Value(name string) (ent.Value, error) { - return cm.selectValues.Get(name) +func (_m *CASMapping) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryCasBackend queries the "cas_backend" edge of the CASMapping entity. -func (cm *CASMapping) QueryCasBackend() *CASBackendQuery { - return NewCASMappingClient(cm.config).QueryCasBackend(cm) +func (_m *CASMapping) QueryCasBackend() *CASBackendQuery { + return NewCASMappingClient(_m.config).QueryCasBackend(_m) } // QueryOrganization queries the "organization" edge of the CASMapping entity. -func (cm *CASMapping) QueryOrganization() *OrganizationQuery { - return NewCASMappingClient(cm.config).QueryOrganization(cm) +func (_m *CASMapping) QueryOrganization() *OrganizationQuery { + return NewCASMappingClient(_m.config).QueryOrganization(_m) } // QueryProject queries the "project" edge of the CASMapping entity. -func (cm *CASMapping) QueryProject() *ProjectQuery { - return NewCASMappingClient(cm.config).QueryProject(cm) +func (_m *CASMapping) QueryProject() *ProjectQuery { + return NewCASMappingClient(_m.config).QueryProject(_m) } // Update returns a builder for updating this CASMapping. // Note that you need to call CASMapping.Unwrap() before calling this method if this CASMapping // was returned from a transaction, and the transaction was committed or rolled back. -func (cm *CASMapping) Update() *CASMappingUpdateOne { - return NewCASMappingClient(cm.config).UpdateOne(cm) +func (_m *CASMapping) Update() *CASMappingUpdateOne { + return NewCASMappingClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the CASMapping entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (cm *CASMapping) Unwrap() *CASMapping { - _tx, ok := cm.config.driver.(*txDriver) +func (_m *CASMapping) Unwrap() *CASMapping { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: CASMapping is not a transactional entity") } - cm.config.driver = _tx.drv - return cm + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (cm *CASMapping) String() string { +func (_m *CASMapping) String() string { var builder strings.Builder builder.WriteString("CASMapping(") - builder.WriteString(fmt.Sprintf("id=%v, ", cm.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("digest=") - builder.WriteString(cm.Digest) + builder.WriteString(_m.Digest) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(cm.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("workflow_run_id=") - builder.WriteString(fmt.Sprintf("%v", cm.WorkflowRunID)) + builder.WriteString(fmt.Sprintf("%v", _m.WorkflowRunID)) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", cm.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteString(", ") builder.WriteString("project_id=") - builder.WriteString(fmt.Sprintf("%v", cm.ProjectID)) + builder.WriteString(fmt.Sprintf("%v", _m.ProjectID)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/casmapping_create.go b/app/controlplane/pkg/data/ent/casmapping_create.go index 87a35815b..f58c68657 100644 --- a/app/controlplane/pkg/data/ent/casmapping_create.go +++ b/app/controlplane/pkg/data/ent/casmapping_create.go @@ -28,108 +28,108 @@ type CASMappingCreate struct { } // SetDigest sets the "digest" field. -func (cmc *CASMappingCreate) SetDigest(s string) *CASMappingCreate { - cmc.mutation.SetDigest(s) - return cmc +func (_c *CASMappingCreate) SetDigest(v string) *CASMappingCreate { + _c.mutation.SetDigest(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (cmc *CASMappingCreate) SetCreatedAt(t time.Time) *CASMappingCreate { - cmc.mutation.SetCreatedAt(t) - return cmc +func (_c *CASMappingCreate) SetCreatedAt(v time.Time) *CASMappingCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (cmc *CASMappingCreate) SetNillableCreatedAt(t *time.Time) *CASMappingCreate { - if t != nil { - cmc.SetCreatedAt(*t) +func (_c *CASMappingCreate) SetNillableCreatedAt(v *time.Time) *CASMappingCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return cmc + return _c } // SetWorkflowRunID sets the "workflow_run_id" field. -func (cmc *CASMappingCreate) SetWorkflowRunID(u uuid.UUID) *CASMappingCreate { - cmc.mutation.SetWorkflowRunID(u) - return cmc +func (_c *CASMappingCreate) SetWorkflowRunID(v uuid.UUID) *CASMappingCreate { + _c.mutation.SetWorkflowRunID(v) + return _c } // SetNillableWorkflowRunID sets the "workflow_run_id" field if the given value is not nil. -func (cmc *CASMappingCreate) SetNillableWorkflowRunID(u *uuid.UUID) *CASMappingCreate { - if u != nil { - cmc.SetWorkflowRunID(*u) +func (_c *CASMappingCreate) SetNillableWorkflowRunID(v *uuid.UUID) *CASMappingCreate { + if v != nil { + _c.SetWorkflowRunID(*v) } - return cmc + return _c } // SetOrganizationID sets the "organization_id" field. -func (cmc *CASMappingCreate) SetOrganizationID(u uuid.UUID) *CASMappingCreate { - cmc.mutation.SetOrganizationID(u) - return cmc +func (_c *CASMappingCreate) SetOrganizationID(v uuid.UUID) *CASMappingCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetProjectID sets the "project_id" field. -func (cmc *CASMappingCreate) SetProjectID(u uuid.UUID) *CASMappingCreate { - cmc.mutation.SetProjectID(u) - return cmc +func (_c *CASMappingCreate) SetProjectID(v uuid.UUID) *CASMappingCreate { + _c.mutation.SetProjectID(v) + return _c } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (cmc *CASMappingCreate) SetNillableProjectID(u *uuid.UUID) *CASMappingCreate { - if u != nil { - cmc.SetProjectID(*u) +func (_c *CASMappingCreate) SetNillableProjectID(v *uuid.UUID) *CASMappingCreate { + if v != nil { + _c.SetProjectID(*v) } - return cmc + return _c } // SetID sets the "id" field. -func (cmc *CASMappingCreate) SetID(u uuid.UUID) *CASMappingCreate { - cmc.mutation.SetID(u) - return cmc +func (_c *CASMappingCreate) SetID(v uuid.UUID) *CASMappingCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (cmc *CASMappingCreate) SetNillableID(u *uuid.UUID) *CASMappingCreate { - if u != nil { - cmc.SetID(*u) +func (_c *CASMappingCreate) SetNillableID(v *uuid.UUID) *CASMappingCreate { + if v != nil { + _c.SetID(*v) } - return cmc + return _c } // SetCasBackendID sets the "cas_backend" edge to the CASBackend entity by ID. -func (cmc *CASMappingCreate) SetCasBackendID(id uuid.UUID) *CASMappingCreate { - cmc.mutation.SetCasBackendID(id) - return cmc +func (_c *CASMappingCreate) SetCasBackendID(id uuid.UUID) *CASMappingCreate { + _c.mutation.SetCasBackendID(id) + return _c } // SetCasBackend sets the "cas_backend" edge to the CASBackend entity. -func (cmc *CASMappingCreate) SetCasBackend(c *CASBackend) *CASMappingCreate { - return cmc.SetCasBackendID(c.ID) +func (_c *CASMappingCreate) SetCasBackend(v *CASBackend) *CASMappingCreate { + return _c.SetCasBackendID(v.ID) } // SetOrganization sets the "organization" edge to the Organization entity. -func (cmc *CASMappingCreate) SetOrganization(o *Organization) *CASMappingCreate { - return cmc.SetOrganizationID(o.ID) +func (_c *CASMappingCreate) SetOrganization(v *Organization) *CASMappingCreate { + return _c.SetOrganizationID(v.ID) } // SetProject sets the "project" edge to the Project entity. -func (cmc *CASMappingCreate) SetProject(p *Project) *CASMappingCreate { - return cmc.SetProjectID(p.ID) +func (_c *CASMappingCreate) SetProject(v *Project) *CASMappingCreate { + return _c.SetProjectID(v.ID) } // Mutation returns the CASMappingMutation object of the builder. -func (cmc *CASMappingCreate) Mutation() *CASMappingMutation { - return cmc.mutation +func (_c *CASMappingCreate) Mutation() *CASMappingMutation { + return _c.mutation } // Save creates the CASMapping in the database. -func (cmc *CASMappingCreate) Save(ctx context.Context) (*CASMapping, error) { - cmc.defaults() - return withHooks(ctx, cmc.sqlSave, cmc.mutation, cmc.hooks) +func (_c *CASMappingCreate) Save(ctx context.Context) (*CASMapping, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (cmc *CASMappingCreate) SaveX(ctx context.Context) *CASMapping { - v, err := cmc.Save(ctx) +func (_c *CASMappingCreate) SaveX(ctx context.Context) *CASMapping { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -137,56 +137,56 @@ func (cmc *CASMappingCreate) SaveX(ctx context.Context) *CASMapping { } // Exec executes the query. -func (cmc *CASMappingCreate) Exec(ctx context.Context) error { - _, err := cmc.Save(ctx) +func (_c *CASMappingCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cmc *CASMappingCreate) ExecX(ctx context.Context) { - if err := cmc.Exec(ctx); err != nil { +func (_c *CASMappingCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (cmc *CASMappingCreate) defaults() { - if _, ok := cmc.mutation.CreatedAt(); !ok { +func (_c *CASMappingCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := casmapping.DefaultCreatedAt() - cmc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := cmc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := casmapping.DefaultID() - cmc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (cmc *CASMappingCreate) check() error { - if _, ok := cmc.mutation.Digest(); !ok { +func (_c *CASMappingCreate) check() error { + if _, ok := _c.mutation.Digest(); !ok { return &ValidationError{Name: "digest", err: errors.New(`ent: missing required field "CASMapping.digest"`)} } - if _, ok := cmc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "CASMapping.created_at"`)} } - if _, ok := cmc.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "CASMapping.organization_id"`)} } - if len(cmc.mutation.CasBackendIDs()) == 0 { + if len(_c.mutation.CasBackendIDs()) == 0 { return &ValidationError{Name: "cas_backend", err: errors.New(`ent: missing required edge "CASMapping.cas_backend"`)} } - if len(cmc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "CASMapping.organization"`)} } return nil } -func (cmc *CASMappingCreate) sqlSave(ctx context.Context) (*CASMapping, error) { - if err := cmc.check(); err != nil { +func (_c *CASMappingCreate) sqlSave(ctx context.Context) (*CASMapping, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := cmc.createSpec() - if err := sqlgraph.CreateNode(ctx, cmc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -199,34 +199,34 @@ func (cmc *CASMappingCreate) sqlSave(ctx context.Context) (*CASMapping, error) { return nil, err } } - cmc.mutation.id = &_node.ID - cmc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (cmc *CASMappingCreate) createSpec() (*CASMapping, *sqlgraph.CreateSpec) { +func (_c *CASMappingCreate) createSpec() (*CASMapping, *sqlgraph.CreateSpec) { var ( - _node = &CASMapping{config: cmc.config} + _node = &CASMapping{config: _c.config} _spec = sqlgraph.NewCreateSpec(casmapping.Table, sqlgraph.NewFieldSpec(casmapping.FieldID, field.TypeUUID)) ) - _spec.OnConflict = cmc.conflict - if id, ok := cmc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := cmc.mutation.Digest(); ok { + if value, ok := _c.mutation.Digest(); ok { _spec.SetField(casmapping.FieldDigest, field.TypeString, value) _node.Digest = value } - if value, ok := cmc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(casmapping.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := cmc.mutation.WorkflowRunID(); ok { + if value, ok := _c.mutation.WorkflowRunID(); ok { _spec.SetField(casmapping.FieldWorkflowRunID, field.TypeUUID, value) _node.WorkflowRunID = value } - if nodes := cmc.mutation.CasBackendIDs(); len(nodes) > 0 { + if nodes := _c.mutation.CasBackendIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -243,7 +243,7 @@ func (cmc *CASMappingCreate) createSpec() (*CASMapping, *sqlgraph.CreateSpec) { _node.cas_mapping_cas_backend = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := cmc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -260,7 +260,7 @@ func (cmc *CASMappingCreate) createSpec() (*CASMapping, *sqlgraph.CreateSpec) { _node.OrganizationID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := cmc.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -296,10 +296,10 @@ func (cmc *CASMappingCreate) createSpec() (*CASMapping, *sqlgraph.CreateSpec) { // SetDigest(v+v). // }). // Exec(ctx) -func (cmc *CASMappingCreate) OnConflict(opts ...sql.ConflictOption) *CASMappingUpsertOne { - cmc.conflict = opts +func (_c *CASMappingCreate) OnConflict(opts ...sql.ConflictOption) *CASMappingUpsertOne { + _c.conflict = opts return &CASMappingUpsertOne{ - create: cmc, + create: _c, } } @@ -309,10 +309,10 @@ func (cmc *CASMappingCreate) OnConflict(opts ...sql.ConflictOption) *CASMappingU // client.CASMapping.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (cmc *CASMappingCreate) OnConflictColumns(columns ...string) *CASMappingUpsertOne { - cmc.conflict = append(cmc.conflict, sql.ConflictColumns(columns...)) +func (_c *CASMappingCreate) OnConflictColumns(columns ...string) *CASMappingUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &CASMappingUpsertOne{ - create: cmc, + create: _c, } } @@ -439,16 +439,16 @@ type CASMappingCreateBulk struct { } // Save creates the CASMapping entities in the database. -func (cmcb *CASMappingCreateBulk) Save(ctx context.Context) ([]*CASMapping, error) { - if cmcb.err != nil { - return nil, cmcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(cmcb.builders)) - nodes := make([]*CASMapping, len(cmcb.builders)) - mutators := make([]Mutator, len(cmcb.builders)) - for i := range cmcb.builders { +func (_c *CASMappingCreateBulk) Save(ctx context.Context) ([]*CASMapping, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*CASMapping, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := cmcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*CASMappingMutation) @@ -462,12 +462,12 @@ func (cmcb *CASMappingCreateBulk) Save(ctx context.Context) ([]*CASMapping, erro var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, cmcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = cmcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, cmcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -487,7 +487,7 @@ func (cmcb *CASMappingCreateBulk) Save(ctx context.Context) ([]*CASMapping, erro }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, cmcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -495,8 +495,8 @@ func (cmcb *CASMappingCreateBulk) Save(ctx context.Context) ([]*CASMapping, erro } // SaveX is like Save, but panics if an error occurs. -func (cmcb *CASMappingCreateBulk) SaveX(ctx context.Context) []*CASMapping { - v, err := cmcb.Save(ctx) +func (_c *CASMappingCreateBulk) SaveX(ctx context.Context) []*CASMapping { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -504,14 +504,14 @@ func (cmcb *CASMappingCreateBulk) SaveX(ctx context.Context) []*CASMapping { } // Exec executes the query. -func (cmcb *CASMappingCreateBulk) Exec(ctx context.Context) error { - _, err := cmcb.Save(ctx) +func (_c *CASMappingCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cmcb *CASMappingCreateBulk) ExecX(ctx context.Context) { - if err := cmcb.Exec(ctx); err != nil { +func (_c *CASMappingCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -531,10 +531,10 @@ func (cmcb *CASMappingCreateBulk) ExecX(ctx context.Context) { // SetDigest(v+v). // }). // Exec(ctx) -func (cmcb *CASMappingCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASMappingUpsertBulk { - cmcb.conflict = opts +func (_c *CASMappingCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASMappingUpsertBulk { + _c.conflict = opts return &CASMappingUpsertBulk{ - create: cmcb, + create: _c, } } @@ -544,10 +544,10 @@ func (cmcb *CASMappingCreateBulk) OnConflict(opts ...sql.ConflictOption) *CASMap // client.CASMapping.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (cmcb *CASMappingCreateBulk) OnConflictColumns(columns ...string) *CASMappingUpsertBulk { - cmcb.conflict = append(cmcb.conflict, sql.ConflictColumns(columns...)) +func (_c *CASMappingCreateBulk) OnConflictColumns(columns ...string) *CASMappingUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &CASMappingUpsertBulk{ - create: cmcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/casmapping_delete.go b/app/controlplane/pkg/data/ent/casmapping_delete.go index 4424a4449..443e4e1f6 100644 --- a/app/controlplane/pkg/data/ent/casmapping_delete.go +++ b/app/controlplane/pkg/data/ent/casmapping_delete.go @@ -20,56 +20,56 @@ type CASMappingDelete struct { } // Where appends a list predicates to the CASMappingDelete builder. -func (cmd *CASMappingDelete) Where(ps ...predicate.CASMapping) *CASMappingDelete { - cmd.mutation.Where(ps...) - return cmd +func (_d *CASMappingDelete) Where(ps ...predicate.CASMapping) *CASMappingDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (cmd *CASMappingDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, cmd.sqlExec, cmd.mutation, cmd.hooks) +func (_d *CASMappingDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (cmd *CASMappingDelete) ExecX(ctx context.Context) int { - n, err := cmd.Exec(ctx) +func (_d *CASMappingDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (cmd *CASMappingDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *CASMappingDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(casmapping.Table, sqlgraph.NewFieldSpec(casmapping.FieldID, field.TypeUUID)) - if ps := cmd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, cmd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - cmd.mutation.done = true + _d.mutation.done = true return affected, err } // CASMappingDeleteOne is the builder for deleting a single CASMapping entity. type CASMappingDeleteOne struct { - cmd *CASMappingDelete + _d *CASMappingDelete } // Where appends a list predicates to the CASMappingDelete builder. -func (cmdo *CASMappingDeleteOne) Where(ps ...predicate.CASMapping) *CASMappingDeleteOne { - cmdo.cmd.mutation.Where(ps...) - return cmdo +func (_d *CASMappingDeleteOne) Where(ps ...predicate.CASMapping) *CASMappingDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (cmdo *CASMappingDeleteOne) Exec(ctx context.Context) error { - n, err := cmdo.cmd.Exec(ctx) +func (_d *CASMappingDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (cmdo *CASMappingDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (cmdo *CASMappingDeleteOne) ExecX(ctx context.Context) { - if err := cmdo.Exec(ctx); err != nil { +func (_d *CASMappingDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/casmapping_query.go b/app/controlplane/pkg/data/ent/casmapping_query.go index 8660d4bcb..b3c5b7357 100644 --- a/app/controlplane/pkg/data/ent/casmapping_query.go +++ b/app/controlplane/pkg/data/ent/casmapping_query.go @@ -38,44 +38,44 @@ type CASMappingQuery struct { } // Where adds a new predicate for the CASMappingQuery builder. -func (cmq *CASMappingQuery) Where(ps ...predicate.CASMapping) *CASMappingQuery { - cmq.predicates = append(cmq.predicates, ps...) - return cmq +func (_q *CASMappingQuery) Where(ps ...predicate.CASMapping) *CASMappingQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (cmq *CASMappingQuery) Limit(limit int) *CASMappingQuery { - cmq.ctx.Limit = &limit - return cmq +func (_q *CASMappingQuery) Limit(limit int) *CASMappingQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (cmq *CASMappingQuery) Offset(offset int) *CASMappingQuery { - cmq.ctx.Offset = &offset - return cmq +func (_q *CASMappingQuery) Offset(offset int) *CASMappingQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (cmq *CASMappingQuery) Unique(unique bool) *CASMappingQuery { - cmq.ctx.Unique = &unique - return cmq +func (_q *CASMappingQuery) Unique(unique bool) *CASMappingQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (cmq *CASMappingQuery) Order(o ...casmapping.OrderOption) *CASMappingQuery { - cmq.order = append(cmq.order, o...) - return cmq +func (_q *CASMappingQuery) Order(o ...casmapping.OrderOption) *CASMappingQuery { + _q.order = append(_q.order, o...) + return _q } // QueryCasBackend chains the current query on the "cas_backend" edge. -func (cmq *CASMappingQuery) QueryCasBackend() *CASBackendQuery { - query := (&CASBackendClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) QueryCasBackend() *CASBackendQuery { + query := (&CASBackendClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := cmq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := cmq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -84,20 +84,20 @@ func (cmq *CASMappingQuery) QueryCasBackend() *CASBackendQuery { sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.CasBackendTable, casmapping.CasBackendColumn), ) - fromU = sqlgraph.SetNeighbors(cmq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryOrganization chains the current query on the "organization" edge. -func (cmq *CASMappingQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := cmq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := cmq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -106,20 +106,20 @@ func (cmq *CASMappingQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.OrganizationTable, casmapping.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(cmq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryProject chains the current query on the "project" edge. -func (cmq *CASMappingQuery) QueryProject() *ProjectQuery { - query := (&ProjectClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) QueryProject() *ProjectQuery { + query := (&ProjectClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := cmq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := cmq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -128,7 +128,7 @@ func (cmq *CASMappingQuery) QueryProject() *ProjectQuery { sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.ProjectTable, casmapping.ProjectColumn), ) - fromU = sqlgraph.SetNeighbors(cmq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -136,8 +136,8 @@ func (cmq *CASMappingQuery) QueryProject() *ProjectQuery { // First returns the first CASMapping entity from the query. // Returns a *NotFoundError when no CASMapping was found. -func (cmq *CASMappingQuery) First(ctx context.Context) (*CASMapping, error) { - nodes, err := cmq.Limit(1).All(setContextOp(ctx, cmq.ctx, ent.OpQueryFirst)) +func (_q *CASMappingQuery) First(ctx context.Context) (*CASMapping, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -148,8 +148,8 @@ func (cmq *CASMappingQuery) First(ctx context.Context) (*CASMapping, error) { } // FirstX is like First, but panics if an error occurs. -func (cmq *CASMappingQuery) FirstX(ctx context.Context) *CASMapping { - node, err := cmq.First(ctx) +func (_q *CASMappingQuery) FirstX(ctx context.Context) *CASMapping { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,9 +158,9 @@ func (cmq *CASMappingQuery) FirstX(ctx context.Context) *CASMapping { // FirstID returns the first CASMapping ID from the query. // Returns a *NotFoundError when no CASMapping ID was found. -func (cmq *CASMappingQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *CASMappingQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cmq.Limit(1).IDs(setContextOp(ctx, cmq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -171,8 +171,8 @@ func (cmq *CASMappingQuery) FirstID(ctx context.Context) (id uuid.UUID, err erro } // FirstIDX is like FirstID, but panics if an error occurs. -func (cmq *CASMappingQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := cmq.FirstID(ctx) +func (_q *CASMappingQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -182,8 +182,8 @@ func (cmq *CASMappingQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single CASMapping entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one CASMapping entity is found. // Returns a *NotFoundError when no CASMapping entities are found. -func (cmq *CASMappingQuery) Only(ctx context.Context) (*CASMapping, error) { - nodes, err := cmq.Limit(2).All(setContextOp(ctx, cmq.ctx, ent.OpQueryOnly)) +func (_q *CASMappingQuery) Only(ctx context.Context) (*CASMapping, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -198,8 +198,8 @@ func (cmq *CASMappingQuery) Only(ctx context.Context) (*CASMapping, error) { } // OnlyX is like Only, but panics if an error occurs. -func (cmq *CASMappingQuery) OnlyX(ctx context.Context) *CASMapping { - node, err := cmq.Only(ctx) +func (_q *CASMappingQuery) OnlyX(ctx context.Context) *CASMapping { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -209,9 +209,9 @@ func (cmq *CASMappingQuery) OnlyX(ctx context.Context) *CASMapping { // OnlyID is like Only, but returns the only CASMapping ID in the query. // Returns a *NotSingularError when more than one CASMapping ID is found. // Returns a *NotFoundError when no entities are found. -func (cmq *CASMappingQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *CASMappingQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cmq.Limit(2).IDs(setContextOp(ctx, cmq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -226,8 +226,8 @@ func (cmq *CASMappingQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (cmq *CASMappingQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := cmq.OnlyID(ctx) +func (_q *CASMappingQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -235,18 +235,18 @@ func (cmq *CASMappingQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of CASMappings. -func (cmq *CASMappingQuery) All(ctx context.Context) ([]*CASMapping, error) { - ctx = setContextOp(ctx, cmq.ctx, ent.OpQueryAll) - if err := cmq.prepareQuery(ctx); err != nil { +func (_q *CASMappingQuery) All(ctx context.Context) ([]*CASMapping, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*CASMapping, *CASMappingQuery]() - return withInterceptors[[]*CASMapping](ctx, cmq, qr, cmq.inters) + return withInterceptors[[]*CASMapping](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (cmq *CASMappingQuery) AllX(ctx context.Context) []*CASMapping { - nodes, err := cmq.All(ctx) +func (_q *CASMappingQuery) AllX(ctx context.Context) []*CASMapping { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -254,20 +254,20 @@ func (cmq *CASMappingQuery) AllX(ctx context.Context) []*CASMapping { } // IDs executes the query and returns a list of CASMapping IDs. -func (cmq *CASMappingQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if cmq.ctx.Unique == nil && cmq.path != nil { - cmq.Unique(true) +func (_q *CASMappingQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, cmq.ctx, ent.OpQueryIDs) - if err = cmq.Select(casmapping.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(casmapping.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (cmq *CASMappingQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := cmq.IDs(ctx) +func (_q *CASMappingQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -275,17 +275,17 @@ func (cmq *CASMappingQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (cmq *CASMappingQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, cmq.ctx, ent.OpQueryCount) - if err := cmq.prepareQuery(ctx); err != nil { +func (_q *CASMappingQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, cmq, querierCount[*CASMappingQuery](), cmq.inters) + return withInterceptors[int](ctx, _q, querierCount[*CASMappingQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (cmq *CASMappingQuery) CountX(ctx context.Context) int { - count, err := cmq.Count(ctx) +func (_q *CASMappingQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -293,9 +293,9 @@ func (cmq *CASMappingQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (cmq *CASMappingQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, cmq.ctx, ent.OpQueryExist) - switch _, err := cmq.FirstID(ctx); { +func (_q *CASMappingQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -306,8 +306,8 @@ func (cmq *CASMappingQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (cmq *CASMappingQuery) ExistX(ctx context.Context) bool { - exist, err := cmq.Exist(ctx) +func (_q *CASMappingQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -316,57 +316,57 @@ func (cmq *CASMappingQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the CASMappingQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (cmq *CASMappingQuery) Clone() *CASMappingQuery { - if cmq == nil { +func (_q *CASMappingQuery) Clone() *CASMappingQuery { + if _q == nil { return nil } return &CASMappingQuery{ - config: cmq.config, - ctx: cmq.ctx.Clone(), - order: append([]casmapping.OrderOption{}, cmq.order...), - inters: append([]Interceptor{}, cmq.inters...), - predicates: append([]predicate.CASMapping{}, cmq.predicates...), - withCasBackend: cmq.withCasBackend.Clone(), - withOrganization: cmq.withOrganization.Clone(), - withProject: cmq.withProject.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]casmapping.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.CASMapping{}, _q.predicates...), + withCasBackend: _q.withCasBackend.Clone(), + withOrganization: _q.withOrganization.Clone(), + withProject: _q.withProject.Clone(), // clone intermediate query. - sql: cmq.sql.Clone(), - path: cmq.path, - modifiers: append([]func(*sql.Selector){}, cmq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithCasBackend tells the query-builder to eager-load the nodes that are connected to // the "cas_backend" edge. The optional arguments are used to configure the query builder of the edge. -func (cmq *CASMappingQuery) WithCasBackend(opts ...func(*CASBackendQuery)) *CASMappingQuery { - query := (&CASBackendClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) WithCasBackend(opts ...func(*CASBackendQuery)) *CASMappingQuery { + query := (&CASBackendClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - cmq.withCasBackend = query - return cmq + _q.withCasBackend = query + return _q } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (cmq *CASMappingQuery) WithOrganization(opts ...func(*OrganizationQuery)) *CASMappingQuery { - query := (&OrganizationClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) WithOrganization(opts ...func(*OrganizationQuery)) *CASMappingQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - cmq.withOrganization = query - return cmq + _q.withOrganization = query + return _q } // WithProject tells the query-builder to eager-load the nodes that are connected to // the "project" edge. The optional arguments are used to configure the query builder of the edge. -func (cmq *CASMappingQuery) WithProject(opts ...func(*ProjectQuery)) *CASMappingQuery { - query := (&ProjectClient{config: cmq.config}).Query() +func (_q *CASMappingQuery) WithProject(opts ...func(*ProjectQuery)) *CASMappingQuery { + query := (&ProjectClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - cmq.withProject = query - return cmq + _q.withProject = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -383,10 +383,10 @@ func (cmq *CASMappingQuery) WithProject(opts ...func(*ProjectQuery)) *CASMapping // GroupBy(casmapping.FieldDigest). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (cmq *CASMappingQuery) GroupBy(field string, fields ...string) *CASMappingGroupBy { - cmq.ctx.Fields = append([]string{field}, fields...) - grbuild := &CASMappingGroupBy{build: cmq} - grbuild.flds = &cmq.ctx.Fields +func (_q *CASMappingQuery) GroupBy(field string, fields ...string) *CASMappingGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &CASMappingGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = casmapping.Label grbuild.scan = grbuild.Scan return grbuild @@ -404,57 +404,57 @@ func (cmq *CASMappingQuery) GroupBy(field string, fields ...string) *CASMappingG // client.CASMapping.Query(). // Select(casmapping.FieldDigest). // Scan(ctx, &v) -func (cmq *CASMappingQuery) Select(fields ...string) *CASMappingSelect { - cmq.ctx.Fields = append(cmq.ctx.Fields, fields...) - sbuild := &CASMappingSelect{CASMappingQuery: cmq} +func (_q *CASMappingQuery) Select(fields ...string) *CASMappingSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &CASMappingSelect{CASMappingQuery: _q} sbuild.label = casmapping.Label - sbuild.flds, sbuild.scan = &cmq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a CASMappingSelect configured with the given aggregations. -func (cmq *CASMappingQuery) Aggregate(fns ...AggregateFunc) *CASMappingSelect { - return cmq.Select().Aggregate(fns...) +func (_q *CASMappingQuery) Aggregate(fns ...AggregateFunc) *CASMappingSelect { + return _q.Select().Aggregate(fns...) } -func (cmq *CASMappingQuery) prepareQuery(ctx context.Context) error { - for _, inter := range cmq.inters { +func (_q *CASMappingQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, cmq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range cmq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !casmapping.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if cmq.path != nil { - prev, err := cmq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - cmq.sql = prev + _q.sql = prev } return nil } -func (cmq *CASMappingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CASMapping, error) { +func (_q *CASMappingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CASMapping, error) { var ( nodes = []*CASMapping{} - withFKs = cmq.withFKs - _spec = cmq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [3]bool{ - cmq.withCasBackend != nil, - cmq.withOrganization != nil, - cmq.withProject != nil, + _q.withCasBackend != nil, + _q.withOrganization != nil, + _q.withProject != nil, } ) - if cmq.withCasBackend != nil { + if _q.withCasBackend != nil { withFKs = true } if withFKs { @@ -464,37 +464,37 @@ func (cmq *CASMappingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return (*CASMapping).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &CASMapping{config: cmq.config} + node := &CASMapping{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(cmq.modifiers) > 0 { - _spec.Modifiers = cmq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, cmq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := cmq.withCasBackend; query != nil { - if err := cmq.loadCasBackend(ctx, query, nodes, nil, + if query := _q.withCasBackend; query != nil { + if err := _q.loadCasBackend(ctx, query, nodes, nil, func(n *CASMapping, e *CASBackend) { n.Edges.CasBackend = e }); err != nil { return nil, err } } - if query := cmq.withOrganization; query != nil { - if err := cmq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *CASMapping, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := cmq.withProject; query != nil { - if err := cmq.loadProject(ctx, query, nodes, nil, + if query := _q.withProject; query != nil { + if err := _q.loadProject(ctx, query, nodes, nil, func(n *CASMapping, e *Project) { n.Edges.Project = e }); err != nil { return nil, err } @@ -502,7 +502,7 @@ func (cmq *CASMappingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return nodes, nil } -func (cmq *CASMappingQuery) loadCasBackend(ctx context.Context, query *CASBackendQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *CASBackend)) error { +func (_q *CASMappingQuery) loadCasBackend(ctx context.Context, query *CASBackendQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *CASBackend)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*CASMapping) for i := range nodes { @@ -534,7 +534,7 @@ func (cmq *CASMappingQuery) loadCasBackend(ctx context.Context, query *CASBacken } return nil } -func (cmq *CASMappingQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *Organization)) error { +func (_q *CASMappingQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*CASMapping) for i := range nodes { @@ -563,7 +563,7 @@ func (cmq *CASMappingQuery) loadOrganization(ctx context.Context, query *Organiz } return nil } -func (cmq *CASMappingQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *Project)) error { +func (_q *CASMappingQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*CASMapping, init func(*CASMapping), assign func(*CASMapping, *Project)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*CASMapping) for i := range nodes { @@ -593,27 +593,27 @@ func (cmq *CASMappingQuery) loadProject(ctx context.Context, query *ProjectQuery return nil } -func (cmq *CASMappingQuery) sqlCount(ctx context.Context) (int, error) { - _spec := cmq.querySpec() - if len(cmq.modifiers) > 0 { - _spec.Modifiers = cmq.modifiers +func (_q *CASMappingQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = cmq.ctx.Fields - if len(cmq.ctx.Fields) > 0 { - _spec.Unique = cmq.ctx.Unique != nil && *cmq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, cmq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (cmq *CASMappingQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *CASMappingQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(casmapping.Table, casmapping.Columns, sqlgraph.NewFieldSpec(casmapping.FieldID, field.TypeUUID)) - _spec.From = cmq.sql - if unique := cmq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if cmq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := cmq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, casmapping.FieldID) for i := range fields { @@ -621,27 +621,27 @@ func (cmq *CASMappingQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if cmq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(casmapping.FieldOrganizationID) } - if cmq.withProject != nil { + if _q.withProject != nil { _spec.Node.AddColumnOnce(casmapping.FieldProjectID) } } - if ps := cmq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := cmq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := cmq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := cmq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -651,36 +651,36 @@ func (cmq *CASMappingQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (cmq *CASMappingQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(cmq.driver.Dialect()) +func (_q *CASMappingQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(casmapping.Table) - columns := cmq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = casmapping.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if cmq.sql != nil { - selector = cmq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if cmq.ctx.Unique != nil && *cmq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range cmq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range cmq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range cmq.order { + for _, p := range _q.order { p(selector) } - if offset := cmq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := cmq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -689,33 +689,33 @@ func (cmq *CASMappingQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (cmq *CASMappingQuery) ForUpdate(opts ...sql.LockOption) *CASMappingQuery { - if cmq.driver.Dialect() == dialect.Postgres { - cmq.Unique(false) +func (_q *CASMappingQuery) ForUpdate(opts ...sql.LockOption) *CASMappingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - cmq.modifiers = append(cmq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return cmq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (cmq *CASMappingQuery) ForShare(opts ...sql.LockOption) *CASMappingQuery { - if cmq.driver.Dialect() == dialect.Postgres { - cmq.Unique(false) +func (_q *CASMappingQuery) ForShare(opts ...sql.LockOption) *CASMappingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - cmq.modifiers = append(cmq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return cmq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (cmq *CASMappingQuery) Modify(modifiers ...func(s *sql.Selector)) *CASMappingSelect { - cmq.modifiers = append(cmq.modifiers, modifiers...) - return cmq.Select() +func (_q *CASMappingQuery) Modify(modifiers ...func(s *sql.Selector)) *CASMappingSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // CASMappingGroupBy is the group-by builder for CASMapping entities. @@ -725,41 +725,41 @@ type CASMappingGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (cmgb *CASMappingGroupBy) Aggregate(fns ...AggregateFunc) *CASMappingGroupBy { - cmgb.fns = append(cmgb.fns, fns...) - return cmgb +func (_g *CASMappingGroupBy) Aggregate(fns ...AggregateFunc) *CASMappingGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (cmgb *CASMappingGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cmgb.build.ctx, ent.OpQueryGroupBy) - if err := cmgb.build.prepareQuery(ctx); err != nil { +func (_g *CASMappingGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*CASMappingQuery, *CASMappingGroupBy](ctx, cmgb.build, cmgb, cmgb.build.inters, v) + return scanWithInterceptors[*CASMappingQuery, *CASMappingGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (cmgb *CASMappingGroupBy) sqlScan(ctx context.Context, root *CASMappingQuery, v any) error { +func (_g *CASMappingGroupBy) sqlScan(ctx context.Context, root *CASMappingQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(cmgb.fns)) - for _, fn := range cmgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*cmgb.flds)+len(cmgb.fns)) - for _, f := range *cmgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*cmgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cmgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -773,27 +773,27 @@ type CASMappingSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (cms *CASMappingSelect) Aggregate(fns ...AggregateFunc) *CASMappingSelect { - cms.fns = append(cms.fns, fns...) - return cms +func (_s *CASMappingSelect) Aggregate(fns ...AggregateFunc) *CASMappingSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (cms *CASMappingSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cms.ctx, ent.OpQuerySelect) - if err := cms.prepareQuery(ctx); err != nil { +func (_s *CASMappingSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*CASMappingQuery, *CASMappingSelect](ctx, cms.CASMappingQuery, cms, cms.inters, v) + return scanWithInterceptors[*CASMappingQuery, *CASMappingSelect](ctx, _s.CASMappingQuery, _s, _s.inters, v) } -func (cms *CASMappingSelect) sqlScan(ctx context.Context, root *CASMappingQuery, v any) error { +func (_s *CASMappingSelect) sqlScan(ctx context.Context, root *CASMappingQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(cms.fns)) - for _, fn := range cms.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*cms.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -801,7 +801,7 @@ func (cms *CASMappingSelect) sqlScan(ctx context.Context, root *CASMappingQuery, } rows := &sql.Rows{} query, args := selector.Query() - if err := cms.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -809,7 +809,7 @@ func (cms *CASMappingSelect) sqlScan(ctx context.Context, root *CASMappingQuery, } // Modify adds a query modifier for attaching custom logic to queries. -func (cms *CASMappingSelect) Modify(modifiers ...func(s *sql.Selector)) *CASMappingSelect { - cms.modifiers = append(cms.modifiers, modifiers...) - return cms +func (_s *CASMappingSelect) Modify(modifiers ...func(s *sql.Selector)) *CASMappingSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/casmapping_update.go b/app/controlplane/pkg/data/ent/casmapping_update.go index 74ca792c3..5d0e59a97 100644 --- a/app/controlplane/pkg/data/ent/casmapping_update.go +++ b/app/controlplane/pkg/data/ent/casmapping_update.go @@ -23,24 +23,24 @@ type CASMappingUpdate struct { } // Where appends a list predicates to the CASMappingUpdate builder. -func (cmu *CASMappingUpdate) Where(ps ...predicate.CASMapping) *CASMappingUpdate { - cmu.mutation.Where(ps...) - return cmu +func (_u *CASMappingUpdate) Where(ps ...predicate.CASMapping) *CASMappingUpdate { + _u.mutation.Where(ps...) + return _u } // Mutation returns the CASMappingMutation object of the builder. -func (cmu *CASMappingUpdate) Mutation() *CASMappingMutation { - return cmu.mutation +func (_u *CASMappingUpdate) Mutation() *CASMappingMutation { + return _u.mutation } // Save executes the query and returns the number of nodes affected by the update operation. -func (cmu *CASMappingUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, cmu.sqlSave, cmu.mutation, cmu.hooks) +func (_u *CASMappingUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (cmu *CASMappingUpdate) SaveX(ctx context.Context) int { - affected, err := cmu.Save(ctx) +func (_u *CASMappingUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -48,52 +48,52 @@ func (cmu *CASMappingUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (cmu *CASMappingUpdate) Exec(ctx context.Context) error { - _, err := cmu.Save(ctx) +func (_u *CASMappingUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cmu *CASMappingUpdate) ExecX(ctx context.Context) { - if err := cmu.Exec(ctx); err != nil { +func (_u *CASMappingUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (cmu *CASMappingUpdate) check() error { - if cmu.mutation.CasBackendCleared() && len(cmu.mutation.CasBackendIDs()) > 0 { +func (_u *CASMappingUpdate) check() error { + if _u.mutation.CasBackendCleared() && len(_u.mutation.CasBackendIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASMapping.cas_backend"`) } - if cmu.mutation.OrganizationCleared() && len(cmu.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASMapping.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (cmu *CASMappingUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASMappingUpdate { - cmu.modifiers = append(cmu.modifiers, modifiers...) - return cmu +func (_u *CASMappingUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASMappingUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (cmu *CASMappingUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := cmu.check(); err != nil { - return n, err +func (_u *CASMappingUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(casmapping.Table, casmapping.Columns, sqlgraph.NewFieldSpec(casmapping.FieldID, field.TypeUUID)) - if ps := cmu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if cmu.mutation.WorkflowRunIDCleared() { + if _u.mutation.WorkflowRunIDCleared() { _spec.ClearField(casmapping.FieldWorkflowRunID, field.TypeUUID) } - _spec.AddModifiers(cmu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, cmu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{casmapping.Label} } else if sqlgraph.IsConstraintError(err) { @@ -101,8 +101,8 @@ func (cmu *CASMappingUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - cmu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // CASMappingUpdateOne is the builder for updating a single CASMapping entity. @@ -115,31 +115,31 @@ type CASMappingUpdateOne struct { } // Mutation returns the CASMappingMutation object of the builder. -func (cmuo *CASMappingUpdateOne) Mutation() *CASMappingMutation { - return cmuo.mutation +func (_u *CASMappingUpdateOne) Mutation() *CASMappingMutation { + return _u.mutation } // Where appends a list predicates to the CASMappingUpdate builder. -func (cmuo *CASMappingUpdateOne) Where(ps ...predicate.CASMapping) *CASMappingUpdateOne { - cmuo.mutation.Where(ps...) - return cmuo +func (_u *CASMappingUpdateOne) Where(ps ...predicate.CASMapping) *CASMappingUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (cmuo *CASMappingUpdateOne) Select(field string, fields ...string) *CASMappingUpdateOne { - cmuo.fields = append([]string{field}, fields...) - return cmuo +func (_u *CASMappingUpdateOne) Select(field string, fields ...string) *CASMappingUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated CASMapping entity. -func (cmuo *CASMappingUpdateOne) Save(ctx context.Context) (*CASMapping, error) { - return withHooks(ctx, cmuo.sqlSave, cmuo.mutation, cmuo.hooks) +func (_u *CASMappingUpdateOne) Save(ctx context.Context) (*CASMapping, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (cmuo *CASMappingUpdateOne) SaveX(ctx context.Context) *CASMapping { - node, err := cmuo.Save(ctx) +func (_u *CASMappingUpdateOne) SaveX(ctx context.Context) *CASMapping { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -147,46 +147,46 @@ func (cmuo *CASMappingUpdateOne) SaveX(ctx context.Context) *CASMapping { } // Exec executes the query on the entity. -func (cmuo *CASMappingUpdateOne) Exec(ctx context.Context) error { - _, err := cmuo.Save(ctx) +func (_u *CASMappingUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (cmuo *CASMappingUpdateOne) ExecX(ctx context.Context) { - if err := cmuo.Exec(ctx); err != nil { +func (_u *CASMappingUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (cmuo *CASMappingUpdateOne) check() error { - if cmuo.mutation.CasBackendCleared() && len(cmuo.mutation.CasBackendIDs()) > 0 { +func (_u *CASMappingUpdateOne) check() error { + if _u.mutation.CasBackendCleared() && len(_u.mutation.CasBackendIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASMapping.cas_backend"`) } - if cmuo.mutation.OrganizationCleared() && len(cmuo.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "CASMapping.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (cmuo *CASMappingUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASMappingUpdateOne { - cmuo.modifiers = append(cmuo.modifiers, modifiers...) - return cmuo +func (_u *CASMappingUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *CASMappingUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (cmuo *CASMappingUpdateOne) sqlSave(ctx context.Context) (_node *CASMapping, err error) { - if err := cmuo.check(); err != nil { +func (_u *CASMappingUpdateOne) sqlSave(ctx context.Context) (_node *CASMapping, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(casmapping.Table, casmapping.Columns, sqlgraph.NewFieldSpec(casmapping.FieldID, field.TypeUUID)) - id, ok := cmuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "CASMapping.id" for update`)} } _spec.Node.ID.Value = id - if fields := cmuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, casmapping.FieldID) for _, f := range fields { @@ -198,21 +198,21 @@ func (cmuo *CASMappingUpdateOne) sqlSave(ctx context.Context) (_node *CASMapping } } } - if ps := cmuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if cmuo.mutation.WorkflowRunIDCleared() { + if _u.mutation.WorkflowRunIDCleared() { _spec.ClearField(casmapping.FieldWorkflowRunID, field.TypeUUID) } - _spec.AddModifiers(cmuo.modifiers...) - _node = &CASMapping{config: cmuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &CASMapping{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, cmuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{casmapping.Label} } else if sqlgraph.IsConstraintError(err) { @@ -220,6 +220,6 @@ func (cmuo *CASMappingUpdateOne) sqlSave(ctx context.Context) (_node *CASMapping } return nil, err } - cmuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/client.go b/app/controlplane/pkg/data/ent/client.go index a5d7652ed..6999ee81a 100644 --- a/app/controlplane/pkg/data/ent/client.go +++ b/app/controlplane/pkg/data/ent/client.go @@ -421,8 +421,8 @@ func (c *APITokenClient) Update() *APITokenUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *APITokenClient) UpdateOne(at *APIToken) *APITokenUpdateOne { - mutation := newAPITokenMutation(c.config, OpUpdateOne, withAPIToken(at)) +func (c *APITokenClient) UpdateOne(_m *APIToken) *APITokenUpdateOne { + mutation := newAPITokenMutation(c.config, OpUpdateOne, withAPIToken(_m)) return &APITokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -439,8 +439,8 @@ func (c *APITokenClient) Delete() *APITokenDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *APITokenClient) DeleteOne(at *APIToken) *APITokenDeleteOne { - return c.DeleteOneID(at.ID) +func (c *APITokenClient) DeleteOne(_m *APIToken) *APITokenDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -475,32 +475,32 @@ func (c *APITokenClient) GetX(ctx context.Context, id uuid.UUID) *APIToken { } // QueryOrganization queries the organization edge of a APIToken. -func (c *APITokenClient) QueryOrganization(at *APIToken) *OrganizationQuery { +func (c *APITokenClient) QueryOrganization(_m *APIToken) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := at.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(apitoken.Table, apitoken.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, apitoken.OrganizationTable, apitoken.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(at.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryProject queries the project edge of a APIToken. -func (c *APITokenClient) QueryProject(at *APIToken) *ProjectQuery { +func (c *APITokenClient) QueryProject(_m *APIToken) *ProjectQuery { query := (&ProjectClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := at.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(apitoken.Table, apitoken.FieldID, id), sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, apitoken.ProjectTable, apitoken.ProjectColumn), ) - fromV = sqlgraph.Neighbors(at.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -586,8 +586,8 @@ func (c *AttestationClient) Update() *AttestationUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *AttestationClient) UpdateOne(a *Attestation) *AttestationUpdateOne { - mutation := newAttestationMutation(c.config, OpUpdateOne, withAttestation(a)) +func (c *AttestationClient) UpdateOne(_m *Attestation) *AttestationUpdateOne { + mutation := newAttestationMutation(c.config, OpUpdateOne, withAttestation(_m)) return &AttestationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -604,8 +604,8 @@ func (c *AttestationClient) Delete() *AttestationDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *AttestationClient) DeleteOne(a *Attestation) *AttestationDeleteOne { - return c.DeleteOneID(a.ID) +func (c *AttestationClient) DeleteOne(_m *Attestation) *AttestationDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -640,16 +640,16 @@ func (c *AttestationClient) GetX(ctx context.Context, id uuid.UUID) *Attestation } // QueryWorkflowrun queries the workflowrun edge of a Attestation. -func (c *AttestationClient) QueryWorkflowrun(a *Attestation) *WorkflowRunQuery { +func (c *AttestationClient) QueryWorkflowrun(_m *Attestation) *WorkflowRunQuery { query := (&WorkflowRunClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := a.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(attestation.Table, attestation.FieldID, id), sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2O, true, attestation.WorkflowrunTable, attestation.WorkflowrunColumn), ) - fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -735,8 +735,8 @@ func (c *CASBackendClient) Update() *CASBackendUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *CASBackendClient) UpdateOne(cb *CASBackend) *CASBackendUpdateOne { - mutation := newCASBackendMutation(c.config, OpUpdateOne, withCASBackend(cb)) +func (c *CASBackendClient) UpdateOne(_m *CASBackend) *CASBackendUpdateOne { + mutation := newCASBackendMutation(c.config, OpUpdateOne, withCASBackend(_m)) return &CASBackendUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -753,8 +753,8 @@ func (c *CASBackendClient) Delete() *CASBackendDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *CASBackendClient) DeleteOne(cb *CASBackend) *CASBackendDeleteOne { - return c.DeleteOneID(cb.ID) +func (c *CASBackendClient) DeleteOne(_m *CASBackend) *CASBackendDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -789,32 +789,32 @@ func (c *CASBackendClient) GetX(ctx context.Context, id uuid.UUID) *CASBackend { } // QueryOrganization queries the organization edge of a CASBackend. -func (c *CASBackendClient) QueryOrganization(cb *CASBackend) *OrganizationQuery { +func (c *CASBackendClient) QueryOrganization(_m *CASBackend) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := cb.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(casbackend.Table, casbackend.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, casbackend.OrganizationTable, casbackend.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(cb.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflowRun queries the workflow_run edge of a CASBackend. -func (c *CASBackendClient) QueryWorkflowRun(cb *CASBackend) *WorkflowRunQuery { +func (c *CASBackendClient) QueryWorkflowRun(_m *CASBackend) *WorkflowRunQuery { query := (&WorkflowRunClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := cb.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(casbackend.Table, casbackend.FieldID, id), sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, casbackend.WorkflowRunTable, casbackend.WorkflowRunPrimaryKey...), ) - fromV = sqlgraph.Neighbors(cb.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -900,8 +900,8 @@ func (c *CASMappingClient) Update() *CASMappingUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *CASMappingClient) UpdateOne(cm *CASMapping) *CASMappingUpdateOne { - mutation := newCASMappingMutation(c.config, OpUpdateOne, withCASMapping(cm)) +func (c *CASMappingClient) UpdateOne(_m *CASMapping) *CASMappingUpdateOne { + mutation := newCASMappingMutation(c.config, OpUpdateOne, withCASMapping(_m)) return &CASMappingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -918,8 +918,8 @@ func (c *CASMappingClient) Delete() *CASMappingDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *CASMappingClient) DeleteOne(cm *CASMapping) *CASMappingDeleteOne { - return c.DeleteOneID(cm.ID) +func (c *CASMappingClient) DeleteOne(_m *CASMapping) *CASMappingDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -954,48 +954,48 @@ func (c *CASMappingClient) GetX(ctx context.Context, id uuid.UUID) *CASMapping { } // QueryCasBackend queries the cas_backend edge of a CASMapping. -func (c *CASMappingClient) QueryCasBackend(cm *CASMapping) *CASBackendQuery { +func (c *CASMappingClient) QueryCasBackend(_m *CASMapping) *CASBackendQuery { query := (&CASBackendClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := cm.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(casmapping.Table, casmapping.FieldID, id), sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.CasBackendTable, casmapping.CasBackendColumn), ) - fromV = sqlgraph.Neighbors(cm.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryOrganization queries the organization edge of a CASMapping. -func (c *CASMappingClient) QueryOrganization(cm *CASMapping) *OrganizationQuery { +func (c *CASMappingClient) QueryOrganization(_m *CASMapping) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := cm.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(casmapping.Table, casmapping.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.OrganizationTable, casmapping.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(cm.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryProject queries the project edge of a CASMapping. -func (c *CASMappingClient) QueryProject(cm *CASMapping) *ProjectQuery { +func (c *CASMappingClient) QueryProject(_m *CASMapping) *ProjectQuery { query := (&ProjectClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := cm.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(casmapping.Table, casmapping.FieldID, id), sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, casmapping.ProjectTable, casmapping.ProjectColumn), ) - fromV = sqlgraph.Neighbors(cm.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1081,8 +1081,8 @@ func (c *GroupClient) Update() *GroupUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *GroupClient) UpdateOne(gr *Group) *GroupUpdateOne { - mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(gr)) +func (c *GroupClient) UpdateOne(_m *Group) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(_m)) return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1099,8 +1099,8 @@ func (c *GroupClient) Delete() *GroupDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *GroupClient) DeleteOne(gr *Group) *GroupDeleteOne { - return c.DeleteOneID(gr.ID) +func (c *GroupClient) DeleteOne(_m *Group) *GroupDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1135,32 +1135,32 @@ func (c *GroupClient) GetX(ctx context.Context, id uuid.UUID) *Group { } // QueryGroupMemberships queries the group_memberships edge of a Group. -func (c *GroupClient) QueryGroupMemberships(gr *Group) *GroupMembershipQuery { +func (c *GroupClient) QueryGroupMemberships(_m *Group) *GroupMembershipQuery { query := (&GroupMembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := gr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(group.Table, group.FieldID, id), sqlgraph.To(groupmembership.Table, groupmembership.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, group.GroupMembershipsTable, group.GroupMembershipsColumn), ) - fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryOrganization queries the organization edge of a Group. -func (c *GroupClient) QueryOrganization(gr *Group) *OrganizationQuery { +func (c *GroupClient) QueryOrganization(_m *Group) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := gr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(group.Table, group.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, group.OrganizationTable, group.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1246,8 +1246,8 @@ func (c *GroupMembershipClient) Update() *GroupMembershipUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *GroupMembershipClient) UpdateOne(gm *GroupMembership) *GroupMembershipUpdateOne { - mutation := newGroupMembershipMutation(c.config, OpUpdateOne, withGroupMembership(gm)) +func (c *GroupMembershipClient) UpdateOne(_m *GroupMembership) *GroupMembershipUpdateOne { + mutation := newGroupMembershipMutation(c.config, OpUpdateOne, withGroupMembership(_m)) return &GroupMembershipUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1264,8 +1264,8 @@ func (c *GroupMembershipClient) Delete() *GroupMembershipDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *GroupMembershipClient) DeleteOne(gm *GroupMembership) *GroupMembershipDeleteOne { - return c.DeleteOneID(gm.ID) +func (c *GroupMembershipClient) DeleteOne(_m *GroupMembership) *GroupMembershipDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1300,32 +1300,32 @@ func (c *GroupMembershipClient) GetX(ctx context.Context, id uuid.UUID) *GroupMe } // QueryGroup queries the group edge of a GroupMembership. -func (c *GroupMembershipClient) QueryGroup(gm *GroupMembership) *GroupQuery { +func (c *GroupMembershipClient) QueryGroup(_m *GroupMembership) *GroupQuery { query := (&GroupClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := gm.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(groupmembership.Table, groupmembership.FieldID, id), sqlgraph.To(group.Table, group.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, groupmembership.GroupTable, groupmembership.GroupColumn), ) - fromV = sqlgraph.Neighbors(gm.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryUser queries the user edge of a GroupMembership. -func (c *GroupMembershipClient) QueryUser(gm *GroupMembership) *UserQuery { +func (c *GroupMembershipClient) QueryUser(_m *GroupMembership) *UserQuery { query := (&UserClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := gm.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(groupmembership.Table, groupmembership.FieldID, id), sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, groupmembership.UserTable, groupmembership.UserColumn), ) - fromV = sqlgraph.Neighbors(gm.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1411,8 +1411,8 @@ func (c *IntegrationClient) Update() *IntegrationUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *IntegrationClient) UpdateOne(i *Integration) *IntegrationUpdateOne { - mutation := newIntegrationMutation(c.config, OpUpdateOne, withIntegration(i)) +func (c *IntegrationClient) UpdateOne(_m *Integration) *IntegrationUpdateOne { + mutation := newIntegrationMutation(c.config, OpUpdateOne, withIntegration(_m)) return &IntegrationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1429,8 +1429,8 @@ func (c *IntegrationClient) Delete() *IntegrationDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *IntegrationClient) DeleteOne(i *Integration) *IntegrationDeleteOne { - return c.DeleteOneID(i.ID) +func (c *IntegrationClient) DeleteOne(_m *Integration) *IntegrationDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1465,32 +1465,32 @@ func (c *IntegrationClient) GetX(ctx context.Context, id uuid.UUID) *Integration } // QueryAttachments queries the attachments edge of a Integration. -func (c *IntegrationClient) QueryAttachments(i *Integration) *IntegrationAttachmentQuery { +func (c *IntegrationClient) QueryAttachments(_m *Integration) *IntegrationAttachmentQuery { query := (&IntegrationAttachmentClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := i.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(integration.Table, integration.FieldID, id), sqlgraph.To(integrationattachment.Table, integrationattachment.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, integration.AttachmentsTable, integration.AttachmentsColumn), ) - fromV = sqlgraph.Neighbors(i.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryOrganization queries the organization edge of a Integration. -func (c *IntegrationClient) QueryOrganization(i *Integration) *OrganizationQuery { +func (c *IntegrationClient) QueryOrganization(_m *Integration) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := i.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(integration.Table, integration.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, integration.OrganizationTable, integration.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(i.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1576,8 +1576,8 @@ func (c *IntegrationAttachmentClient) Update() *IntegrationAttachmentUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *IntegrationAttachmentClient) UpdateOne(ia *IntegrationAttachment) *IntegrationAttachmentUpdateOne { - mutation := newIntegrationAttachmentMutation(c.config, OpUpdateOne, withIntegrationAttachment(ia)) +func (c *IntegrationAttachmentClient) UpdateOne(_m *IntegrationAttachment) *IntegrationAttachmentUpdateOne { + mutation := newIntegrationAttachmentMutation(c.config, OpUpdateOne, withIntegrationAttachment(_m)) return &IntegrationAttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1594,8 +1594,8 @@ func (c *IntegrationAttachmentClient) Delete() *IntegrationAttachmentDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *IntegrationAttachmentClient) DeleteOne(ia *IntegrationAttachment) *IntegrationAttachmentDeleteOne { - return c.DeleteOneID(ia.ID) +func (c *IntegrationAttachmentClient) DeleteOne(_m *IntegrationAttachment) *IntegrationAttachmentDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1630,32 +1630,32 @@ func (c *IntegrationAttachmentClient) GetX(ctx context.Context, id uuid.UUID) *I } // QueryIntegration queries the integration edge of a IntegrationAttachment. -func (c *IntegrationAttachmentClient) QueryIntegration(ia *IntegrationAttachment) *IntegrationQuery { +func (c *IntegrationAttachmentClient) QueryIntegration(_m *IntegrationAttachment) *IntegrationQuery { query := (&IntegrationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := ia.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(integrationattachment.Table, integrationattachment.FieldID, id), sqlgraph.To(integration.Table, integration.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, integrationattachment.IntegrationTable, integrationattachment.IntegrationColumn), ) - fromV = sqlgraph.Neighbors(ia.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflow queries the workflow edge of a IntegrationAttachment. -func (c *IntegrationAttachmentClient) QueryWorkflow(ia *IntegrationAttachment) *WorkflowQuery { +func (c *IntegrationAttachmentClient) QueryWorkflow(_m *IntegrationAttachment) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := ia.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(integrationattachment.Table, integrationattachment.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, integrationattachment.WorkflowTable, integrationattachment.WorkflowColumn), ) - fromV = sqlgraph.Neighbors(ia.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1741,8 +1741,8 @@ func (c *MembershipClient) Update() *MembershipUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *MembershipClient) UpdateOne(m *Membership) *MembershipUpdateOne { - mutation := newMembershipMutation(c.config, OpUpdateOne, withMembership(m)) +func (c *MembershipClient) UpdateOne(_m *Membership) *MembershipUpdateOne { + mutation := newMembershipMutation(c.config, OpUpdateOne, withMembership(_m)) return &MembershipUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1759,8 +1759,8 @@ func (c *MembershipClient) Delete() *MembershipDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *MembershipClient) DeleteOne(m *Membership) *MembershipDeleteOne { - return c.DeleteOneID(m.ID) +func (c *MembershipClient) DeleteOne(_m *Membership) *MembershipDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1795,64 +1795,64 @@ func (c *MembershipClient) GetX(ctx context.Context, id uuid.UUID) *Membership { } // QueryOrganization queries the organization edge of a Membership. -func (c *MembershipClient) QueryOrganization(m *Membership) *OrganizationQuery { +func (c *MembershipClient) QueryOrganization(_m *Membership) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := m.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(membership.Table, membership.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.OrganizationTable, membership.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryUser queries the user edge of a Membership. -func (c *MembershipClient) QueryUser(m *Membership) *UserQuery { +func (c *MembershipClient) QueryUser(_m *Membership) *UserQuery { query := (&UserClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := m.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(membership.Table, membership.FieldID, id), sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.UserTable, membership.UserColumn), ) - fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryParent queries the parent edge of a Membership. -func (c *MembershipClient) QueryParent(m *Membership) *MembershipQuery { +func (c *MembershipClient) QueryParent(_m *Membership) *MembershipQuery { query := (&MembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := m.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(membership.Table, membership.FieldID, id), sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.ParentTable, membership.ParentColumn), ) - fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryChildren queries the children edge of a Membership. -func (c *MembershipClient) QueryChildren(m *Membership) *MembershipQuery { +func (c *MembershipClient) QueryChildren(_m *Membership) *MembershipQuery { query := (&MembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := m.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(membership.Table, membership.FieldID, id), sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, membership.ChildrenTable, membership.ChildrenColumn), ) - fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -1938,8 +1938,8 @@ func (c *OrgInvitationClient) Update() *OrgInvitationUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *OrgInvitationClient) UpdateOne(oi *OrgInvitation) *OrgInvitationUpdateOne { - mutation := newOrgInvitationMutation(c.config, OpUpdateOne, withOrgInvitation(oi)) +func (c *OrgInvitationClient) UpdateOne(_m *OrgInvitation) *OrgInvitationUpdateOne { + mutation := newOrgInvitationMutation(c.config, OpUpdateOne, withOrgInvitation(_m)) return &OrgInvitationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -1956,8 +1956,8 @@ func (c *OrgInvitationClient) Delete() *OrgInvitationDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *OrgInvitationClient) DeleteOne(oi *OrgInvitation) *OrgInvitationDeleteOne { - return c.DeleteOneID(oi.ID) +func (c *OrgInvitationClient) DeleteOne(_m *OrgInvitation) *OrgInvitationDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -1992,32 +1992,32 @@ func (c *OrgInvitationClient) GetX(ctx context.Context, id uuid.UUID) *OrgInvita } // QueryOrganization queries the organization edge of a OrgInvitation. -func (c *OrgInvitationClient) QueryOrganization(oi *OrgInvitation) *OrganizationQuery { +func (c *OrgInvitationClient) QueryOrganization(_m *OrgInvitation) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := oi.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(orginvitation.Table, orginvitation.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, orginvitation.OrganizationTable, orginvitation.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(oi.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QuerySender queries the sender edge of a OrgInvitation. -func (c *OrgInvitationClient) QuerySender(oi *OrgInvitation) *UserQuery { +func (c *OrgInvitationClient) QuerySender(_m *OrgInvitation) *UserQuery { query := (&UserClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := oi.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(orginvitation.Table, orginvitation.FieldID, id), sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, orginvitation.SenderTable, orginvitation.SenderColumn), ) - fromV = sqlgraph.Neighbors(oi.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -2103,8 +2103,8 @@ func (c *OrganizationClient) Update() *OrganizationUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *OrganizationClient) UpdateOne(o *Organization) *OrganizationUpdateOne { - mutation := newOrganizationMutation(c.config, OpUpdateOne, withOrganization(o)) +func (c *OrganizationClient) UpdateOne(_m *Organization) *OrganizationUpdateOne { + mutation := newOrganizationMutation(c.config, OpUpdateOne, withOrganization(_m)) return &OrganizationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -2121,8 +2121,8 @@ func (c *OrganizationClient) Delete() *OrganizationDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *OrganizationClient) DeleteOne(o *Organization) *OrganizationDeleteOne { - return c.DeleteOneID(o.ID) +func (c *OrganizationClient) DeleteOne(_m *Organization) *OrganizationDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -2157,128 +2157,128 @@ func (c *OrganizationClient) GetX(ctx context.Context, id uuid.UUID) *Organizati } // QueryMemberships queries the memberships edge of a Organization. -func (c *OrganizationClient) QueryMemberships(o *Organization) *MembershipQuery { +func (c *OrganizationClient) QueryMemberships(_m *Organization) *MembershipQuery { query := (&MembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.MembershipsTable, organization.MembershipsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflowContracts queries the workflow_contracts edge of a Organization. -func (c *OrganizationClient) QueryWorkflowContracts(o *Organization) *WorkflowContractQuery { +func (c *OrganizationClient) QueryWorkflowContracts(_m *Organization) *WorkflowContractQuery { query := (&WorkflowContractClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.WorkflowContractsTable, organization.WorkflowContractsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflows queries the workflows edge of a Organization. -func (c *OrganizationClient) QueryWorkflows(o *Organization) *WorkflowQuery { +func (c *OrganizationClient) QueryWorkflows(_m *Organization) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.WorkflowsTable, organization.WorkflowsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryCasBackends queries the cas_backends edge of a Organization. -func (c *OrganizationClient) QueryCasBackends(o *Organization) *CASBackendQuery { +func (c *OrganizationClient) QueryCasBackends(_m *Organization) *CASBackendQuery { query := (&CASBackendClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.CasBackendsTable, organization.CasBackendsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryIntegrations queries the integrations edge of a Organization. -func (c *OrganizationClient) QueryIntegrations(o *Organization) *IntegrationQuery { +func (c *OrganizationClient) QueryIntegrations(_m *Organization) *IntegrationQuery { query := (&IntegrationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(integration.Table, integration.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.IntegrationsTable, organization.IntegrationsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryAPITokens queries the api_tokens edge of a Organization. -func (c *OrganizationClient) QueryAPITokens(o *Organization) *APITokenQuery { +func (c *OrganizationClient) QueryAPITokens(_m *Organization) *APITokenQuery { query := (&APITokenClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(apitoken.Table, apitoken.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.APITokensTable, organization.APITokensColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryProjects queries the projects edge of a Organization. -func (c *OrganizationClient) QueryProjects(o *Organization) *ProjectQuery { +func (c *OrganizationClient) QueryProjects(_m *Organization) *ProjectQuery { query := (&ProjectClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.ProjectsTable, organization.ProjectsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryGroups queries the groups edge of a Organization. -func (c *OrganizationClient) QueryGroups(o *Organization) *GroupQuery { +func (c *OrganizationClient) QueryGroups(_m *Organization) *GroupQuery { query := (&GroupClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := o.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(organization.Table, organization.FieldID, id), sqlgraph.To(group.Table, group.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.GroupsTable, organization.GroupsColumn), ) - fromV = sqlgraph.Neighbors(o.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -2364,8 +2364,8 @@ func (c *ProjectClient) Update() *ProjectUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *ProjectClient) UpdateOne(pr *Project) *ProjectUpdateOne { - mutation := newProjectMutation(c.config, OpUpdateOne, withProject(pr)) +func (c *ProjectClient) UpdateOne(_m *Project) *ProjectUpdateOne { + mutation := newProjectMutation(c.config, OpUpdateOne, withProject(_m)) return &ProjectUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -2382,8 +2382,8 @@ func (c *ProjectClient) Delete() *ProjectDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *ProjectClient) DeleteOne(pr *Project) *ProjectDeleteOne { - return c.DeleteOneID(pr.ID) +func (c *ProjectClient) DeleteOne(_m *Project) *ProjectDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -2418,48 +2418,48 @@ func (c *ProjectClient) GetX(ctx context.Context, id uuid.UUID) *Project { } // QueryOrganization queries the organization edge of a Project. -func (c *ProjectClient) QueryOrganization(pr *Project) *OrganizationQuery { +func (c *ProjectClient) QueryOrganization(_m *Project) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(project.Table, project.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, project.OrganizationTable, project.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(pr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflows queries the workflows edge of a Project. -func (c *ProjectClient) QueryWorkflows(pr *Project) *WorkflowQuery { +func (c *ProjectClient) QueryWorkflows(_m *Project) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(project.Table, project.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, project.WorkflowsTable, project.WorkflowsColumn), ) - fromV = sqlgraph.Neighbors(pr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryVersions queries the versions edge of a Project. -func (c *ProjectClient) QueryVersions(pr *Project) *ProjectVersionQuery { +func (c *ProjectClient) QueryVersions(_m *Project) *ProjectVersionQuery { query := (&ProjectVersionClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(project.Table, project.FieldID, id), sqlgraph.To(projectversion.Table, projectversion.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, project.VersionsTable, project.VersionsColumn), ) - fromV = sqlgraph.Neighbors(pr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -2545,8 +2545,8 @@ func (c *ProjectVersionClient) Update() *ProjectVersionUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *ProjectVersionClient) UpdateOne(pv *ProjectVersion) *ProjectVersionUpdateOne { - mutation := newProjectVersionMutation(c.config, OpUpdateOne, withProjectVersion(pv)) +func (c *ProjectVersionClient) UpdateOne(_m *ProjectVersion) *ProjectVersionUpdateOne { + mutation := newProjectVersionMutation(c.config, OpUpdateOne, withProjectVersion(_m)) return &ProjectVersionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -2563,8 +2563,8 @@ func (c *ProjectVersionClient) Delete() *ProjectVersionDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *ProjectVersionClient) DeleteOne(pv *ProjectVersion) *ProjectVersionDeleteOne { - return c.DeleteOneID(pv.ID) +func (c *ProjectVersionClient) DeleteOne(_m *ProjectVersion) *ProjectVersionDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -2599,32 +2599,32 @@ func (c *ProjectVersionClient) GetX(ctx context.Context, id uuid.UUID) *ProjectV } // QueryProject queries the project edge of a ProjectVersion. -func (c *ProjectVersionClient) QueryProject(pv *ProjectVersion) *ProjectQuery { +func (c *ProjectVersionClient) QueryProject(_m *ProjectVersion) *ProjectQuery { query := (&ProjectClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pv.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(projectversion.Table, projectversion.FieldID, id), sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, projectversion.ProjectTable, projectversion.ProjectColumn), ) - fromV = sqlgraph.Neighbors(pv.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryRuns queries the runs edge of a ProjectVersion. -func (c *ProjectVersionClient) QueryRuns(pv *ProjectVersion) *WorkflowRunQuery { +func (c *ProjectVersionClient) QueryRuns(_m *ProjectVersion) *WorkflowRunQuery { query := (&WorkflowRunClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pv.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(projectversion.Table, projectversion.FieldID, id), sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, projectversion.RunsTable, projectversion.RunsColumn), ) - fromV = sqlgraph.Neighbors(pv.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -2710,8 +2710,8 @@ func (c *ReferrerClient) Update() *ReferrerUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *ReferrerClient) UpdateOne(r *Referrer) *ReferrerUpdateOne { - mutation := newReferrerMutation(c.config, OpUpdateOne, withReferrer(r)) +func (c *ReferrerClient) UpdateOne(_m *Referrer) *ReferrerUpdateOne { + mutation := newReferrerMutation(c.config, OpUpdateOne, withReferrer(_m)) return &ReferrerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -2728,8 +2728,8 @@ func (c *ReferrerClient) Delete() *ReferrerDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *ReferrerClient) DeleteOne(r *Referrer) *ReferrerDeleteOne { - return c.DeleteOneID(r.ID) +func (c *ReferrerClient) DeleteOne(_m *Referrer) *ReferrerDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -2764,48 +2764,48 @@ func (c *ReferrerClient) GetX(ctx context.Context, id uuid.UUID) *Referrer { } // QueryReferredBy queries the referred_by edge of a Referrer. -func (c *ReferrerClient) QueryReferredBy(r *Referrer) *ReferrerQuery { +func (c *ReferrerClient) QueryReferredBy(_m *Referrer) *ReferrerQuery { query := (&ReferrerClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := r.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(referrer.Table, referrer.FieldID, id), sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, referrer.ReferredByTable, referrer.ReferredByPrimaryKey...), ) - fromV = sqlgraph.Neighbors(r.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryReferences queries the references edge of a Referrer. -func (c *ReferrerClient) QueryReferences(r *Referrer) *ReferrerQuery { +func (c *ReferrerClient) QueryReferences(_m *Referrer) *ReferrerQuery { query := (&ReferrerClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := r.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(referrer.Table, referrer.FieldID, id), sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, referrer.ReferencesTable, referrer.ReferencesPrimaryKey...), ) - fromV = sqlgraph.Neighbors(r.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflows queries the workflows edge of a Referrer. -func (c *ReferrerClient) QueryWorkflows(r *Referrer) *WorkflowQuery { +func (c *ReferrerClient) QueryWorkflows(_m *Referrer) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := r.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(referrer.Table, referrer.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, referrer.WorkflowsTable, referrer.WorkflowsPrimaryKey...), ) - fromV = sqlgraph.Neighbors(r.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -2891,8 +2891,8 @@ func (c *RobotAccountClient) Update() *RobotAccountUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *RobotAccountClient) UpdateOne(ra *RobotAccount) *RobotAccountUpdateOne { - mutation := newRobotAccountMutation(c.config, OpUpdateOne, withRobotAccount(ra)) +func (c *RobotAccountClient) UpdateOne(_m *RobotAccount) *RobotAccountUpdateOne { + mutation := newRobotAccountMutation(c.config, OpUpdateOne, withRobotAccount(_m)) return &RobotAccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -2909,8 +2909,8 @@ func (c *RobotAccountClient) Delete() *RobotAccountDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *RobotAccountClient) DeleteOne(ra *RobotAccount) *RobotAccountDeleteOne { - return c.DeleteOneID(ra.ID) +func (c *RobotAccountClient) DeleteOne(_m *RobotAccount) *RobotAccountDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -2945,16 +2945,16 @@ func (c *RobotAccountClient) GetX(ctx context.Context, id uuid.UUID) *RobotAccou } // QueryWorkflow queries the workflow edge of a RobotAccount. -func (c *RobotAccountClient) QueryWorkflow(ra *RobotAccount) *WorkflowQuery { +func (c *RobotAccountClient) QueryWorkflow(_m *RobotAccount) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := ra.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(robotaccount.Table, robotaccount.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, robotaccount.WorkflowTable, robotaccount.WorkflowColumn), ) - fromV = sqlgraph.Neighbors(ra.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -3040,8 +3040,8 @@ func (c *UserClient) Update() *UserUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { - mutation := newUserMutation(c.config, OpUpdateOne, withUser(u)) +func (c *UserClient) UpdateOne(_m *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(_m)) return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -3058,8 +3058,8 @@ func (c *UserClient) Delete() *UserDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { - return c.DeleteOneID(u.ID) +func (c *UserClient) DeleteOne(_m *User) *UserDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -3094,32 +3094,32 @@ func (c *UserClient) GetX(ctx context.Context, id uuid.UUID) *User { } // QueryMemberships queries the memberships edge of a User. -func (c *UserClient) QueryMemberships(u *User) *MembershipQuery { +func (c *UserClient) QueryMemberships(_m *User) *MembershipQuery { query := (&MembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := u.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(user.Table, user.FieldID, id), sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, user.MembershipsTable, user.MembershipsColumn), ) - fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryGroupMemberships queries the group_memberships edge of a User. -func (c *UserClient) QueryGroupMemberships(u *User) *GroupMembershipQuery { +func (c *UserClient) QueryGroupMemberships(_m *User) *GroupMembershipQuery { query := (&GroupMembershipClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := u.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(user.Table, user.FieldID, id), sqlgraph.To(groupmembership.Table, groupmembership.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, user.GroupMembershipsTable, user.GroupMembershipsColumn), ) - fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -3205,8 +3205,8 @@ func (c *WorkflowClient) Update() *WorkflowUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *WorkflowClient) UpdateOne(w *Workflow) *WorkflowUpdateOne { - mutation := newWorkflowMutation(c.config, OpUpdateOne, withWorkflow(w)) +func (c *WorkflowClient) UpdateOne(_m *Workflow) *WorkflowUpdateOne { + mutation := newWorkflowMutation(c.config, OpUpdateOne, withWorkflow(_m)) return &WorkflowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -3223,8 +3223,8 @@ func (c *WorkflowClient) Delete() *WorkflowDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *WorkflowClient) DeleteOne(w *Workflow) *WorkflowDeleteOne { - return c.DeleteOneID(w.ID) +func (c *WorkflowClient) DeleteOne(_m *Workflow) *WorkflowDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -3259,128 +3259,128 @@ func (c *WorkflowClient) GetX(ctx context.Context, id uuid.UUID) *Workflow { } // QueryRobotaccounts queries the robotaccounts edge of a Workflow. -func (c *WorkflowClient) QueryRobotaccounts(w *Workflow) *RobotAccountQuery { +func (c *WorkflowClient) QueryRobotaccounts(_m *Workflow) *RobotAccountQuery { query := (&RobotAccountClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(robotaccount.Table, robotaccount.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflow.RobotaccountsTable, workflow.RobotaccountsColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflowruns queries the workflowruns edge of a Workflow. -func (c *WorkflowClient) QueryWorkflowruns(w *Workflow) *WorkflowRunQuery { +func (c *WorkflowClient) QueryWorkflowruns(_m *Workflow) *WorkflowRunQuery { query := (&WorkflowRunClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflow.WorkflowrunsTable, workflow.WorkflowrunsColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryOrganization queries the organization edge of a Workflow. -func (c *WorkflowClient) QueryOrganization(w *Workflow) *OrganizationQuery { +func (c *WorkflowClient) QueryOrganization(_m *Workflow) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflow.OrganizationTable, workflow.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryContract queries the contract edge of a Workflow. -func (c *WorkflowClient) QueryContract(w *Workflow) *WorkflowContractQuery { +func (c *WorkflowClient) QueryContract(_m *Workflow) *WorkflowContractQuery { query := (&WorkflowContractClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflow.ContractTable, workflow.ContractColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryIntegrationAttachments queries the integration_attachments edge of a Workflow. -func (c *WorkflowClient) QueryIntegrationAttachments(w *Workflow) *IntegrationAttachmentQuery { +func (c *WorkflowClient) QueryIntegrationAttachments(_m *Workflow) *IntegrationAttachmentQuery { query := (&IntegrationAttachmentClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(integrationattachment.Table, integrationattachment.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, workflow.IntegrationAttachmentsTable, workflow.IntegrationAttachmentsColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryProject queries the project edge of a Workflow. -func (c *WorkflowClient) QueryProject(w *Workflow) *ProjectQuery { +func (c *WorkflowClient) QueryProject(_m *Workflow) *ProjectQuery { query := (&ProjectClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflow.ProjectTable, workflow.ProjectColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryLatestWorkflowRun queries the latest_workflow_run edge of a Workflow. -func (c *WorkflowClient) QueryLatestWorkflowRun(w *Workflow) *WorkflowRunQuery { +func (c *WorkflowClient) QueryLatestWorkflowRun(_m *Workflow) *WorkflowRunQuery { query := (&WorkflowRunClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflow.LatestWorkflowRunTable, workflow.LatestWorkflowRunColumn), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryReferrers queries the referrers edge of a Workflow. -func (c *WorkflowClient) QueryReferrers(w *Workflow) *ReferrerQuery { +func (c *WorkflowClient) QueryReferrers(_m *Workflow) *ReferrerQuery { query := (&ReferrerClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := w.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflow.Table, workflow.FieldID, id), sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, workflow.ReferrersTable, workflow.ReferrersPrimaryKey...), ) - fromV = sqlgraph.Neighbors(w.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -3466,8 +3466,8 @@ func (c *WorkflowContractClient) Update() *WorkflowContractUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *WorkflowContractClient) UpdateOne(wc *WorkflowContract) *WorkflowContractUpdateOne { - mutation := newWorkflowContractMutation(c.config, OpUpdateOne, withWorkflowContract(wc)) +func (c *WorkflowContractClient) UpdateOne(_m *WorkflowContract) *WorkflowContractUpdateOne { + mutation := newWorkflowContractMutation(c.config, OpUpdateOne, withWorkflowContract(_m)) return &WorkflowContractUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -3484,8 +3484,8 @@ func (c *WorkflowContractClient) Delete() *WorkflowContractDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *WorkflowContractClient) DeleteOne(wc *WorkflowContract) *WorkflowContractDeleteOne { - return c.DeleteOneID(wc.ID) +func (c *WorkflowContractClient) DeleteOne(_m *WorkflowContract) *WorkflowContractDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -3520,48 +3520,48 @@ func (c *WorkflowContractClient) GetX(ctx context.Context, id uuid.UUID) *Workfl } // QueryVersions queries the versions edge of a WorkflowContract. -func (c *WorkflowContractClient) QueryVersions(wc *WorkflowContract) *WorkflowContractVersionQuery { +func (c *WorkflowContractClient) QueryVersions(_m *WorkflowContract) *WorkflowContractVersionQuery { query := (&WorkflowContractVersionClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wc.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowcontract.Table, workflowcontract.FieldID, id), sqlgraph.To(workflowcontractversion.Table, workflowcontractversion.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflowcontract.VersionsTable, workflowcontract.VersionsColumn), ) - fromV = sqlgraph.Neighbors(wc.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryOrganization queries the organization edge of a WorkflowContract. -func (c *WorkflowContractClient) QueryOrganization(wc *WorkflowContract) *OrganizationQuery { +func (c *WorkflowContractClient) QueryOrganization(_m *WorkflowContract) *OrganizationQuery { query := (&OrganizationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wc.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowcontract.Table, workflowcontract.FieldID, id), sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowcontract.OrganizationTable, workflowcontract.OrganizationColumn), ) - fromV = sqlgraph.Neighbors(wc.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryWorkflows queries the workflows edge of a WorkflowContract. -func (c *WorkflowContractClient) QueryWorkflows(wc *WorkflowContract) *WorkflowQuery { +func (c *WorkflowContractClient) QueryWorkflows(_m *WorkflowContract) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wc.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowcontract.Table, workflowcontract.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, workflowcontract.WorkflowsTable, workflowcontract.WorkflowsColumn), ) - fromV = sqlgraph.Neighbors(wc.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -3647,8 +3647,8 @@ func (c *WorkflowContractVersionClient) Update() *WorkflowContractVersionUpdate } // UpdateOne returns an update builder for the given entity. -func (c *WorkflowContractVersionClient) UpdateOne(wcv *WorkflowContractVersion) *WorkflowContractVersionUpdateOne { - mutation := newWorkflowContractVersionMutation(c.config, OpUpdateOne, withWorkflowContractVersion(wcv)) +func (c *WorkflowContractVersionClient) UpdateOne(_m *WorkflowContractVersion) *WorkflowContractVersionUpdateOne { + mutation := newWorkflowContractVersionMutation(c.config, OpUpdateOne, withWorkflowContractVersion(_m)) return &WorkflowContractVersionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -3665,8 +3665,8 @@ func (c *WorkflowContractVersionClient) Delete() *WorkflowContractVersionDelete } // DeleteOne returns a builder for deleting the given entity. -func (c *WorkflowContractVersionClient) DeleteOne(wcv *WorkflowContractVersion) *WorkflowContractVersionDeleteOne { - return c.DeleteOneID(wcv.ID) +func (c *WorkflowContractVersionClient) DeleteOne(_m *WorkflowContractVersion) *WorkflowContractVersionDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -3701,16 +3701,16 @@ func (c *WorkflowContractVersionClient) GetX(ctx context.Context, id uuid.UUID) } // QueryContract queries the contract edge of a WorkflowContractVersion. -func (c *WorkflowContractVersionClient) QueryContract(wcv *WorkflowContractVersion) *WorkflowContractQuery { +func (c *WorkflowContractVersionClient) QueryContract(_m *WorkflowContractVersion) *WorkflowContractQuery { query := (&WorkflowContractClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wcv.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowcontractversion.Table, workflowcontractversion.FieldID, id), sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowcontractversion.ContractTable, workflowcontractversion.ContractColumn), ) - fromV = sqlgraph.Neighbors(wcv.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query @@ -3796,8 +3796,8 @@ func (c *WorkflowRunClient) Update() *WorkflowRunUpdate { } // UpdateOne returns an update builder for the given entity. -func (c *WorkflowRunClient) UpdateOne(wr *WorkflowRun) *WorkflowRunUpdateOne { - mutation := newWorkflowRunMutation(c.config, OpUpdateOne, withWorkflowRun(wr)) +func (c *WorkflowRunClient) UpdateOne(_m *WorkflowRun) *WorkflowRunUpdateOne { + mutation := newWorkflowRunMutation(c.config, OpUpdateOne, withWorkflowRun(_m)) return &WorkflowRunUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -3814,8 +3814,8 @@ func (c *WorkflowRunClient) Delete() *WorkflowRunDelete { } // DeleteOne returns a builder for deleting the given entity. -func (c *WorkflowRunClient) DeleteOne(wr *WorkflowRun) *WorkflowRunDeleteOne { - return c.DeleteOneID(wr.ID) +func (c *WorkflowRunClient) DeleteOne(_m *WorkflowRun) *WorkflowRunDeleteOne { + return c.DeleteOneID(_m.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. @@ -3850,80 +3850,80 @@ func (c *WorkflowRunClient) GetX(ctx context.Context, id uuid.UUID) *WorkflowRun } // QueryWorkflow queries the workflow edge of a WorkflowRun. -func (c *WorkflowRunClient) QueryWorkflow(wr *WorkflowRun) *WorkflowQuery { +func (c *WorkflowRunClient) QueryWorkflow(_m *WorkflowRun) *WorkflowQuery { query := (&WorkflowClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowrun.Table, workflowrun.FieldID, id), sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowrun.WorkflowTable, workflowrun.WorkflowColumn), ) - fromV = sqlgraph.Neighbors(wr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryContractVersion queries the contract_version edge of a WorkflowRun. -func (c *WorkflowRunClient) QueryContractVersion(wr *WorkflowRun) *WorkflowContractVersionQuery { +func (c *WorkflowRunClient) QueryContractVersion(_m *WorkflowRun) *WorkflowContractVersionQuery { query := (&WorkflowContractVersionClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowrun.Table, workflowrun.FieldID, id), sqlgraph.To(workflowcontractversion.Table, workflowcontractversion.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflowrun.ContractVersionTable, workflowrun.ContractVersionColumn), ) - fromV = sqlgraph.Neighbors(wr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryCasBackends queries the cas_backends edge of a WorkflowRun. -func (c *WorkflowRunClient) QueryCasBackends(wr *WorkflowRun) *CASBackendQuery { +func (c *WorkflowRunClient) QueryCasBackends(_m *WorkflowRun) *CASBackendQuery { query := (&CASBackendClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowrun.Table, workflowrun.FieldID, id), sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, workflowrun.CasBackendsTable, workflowrun.CasBackendsPrimaryKey...), ) - fromV = sqlgraph.Neighbors(wr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryVersion queries the version edge of a WorkflowRun. -func (c *WorkflowRunClient) QueryVersion(wr *WorkflowRun) *ProjectVersionQuery { +func (c *WorkflowRunClient) QueryVersion(_m *WorkflowRun) *ProjectVersionQuery { query := (&ProjectVersionClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowrun.Table, workflowrun.FieldID, id), sqlgraph.To(projectversion.Table, projectversion.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowrun.VersionTable, workflowrun.VersionColumn), ) - fromV = sqlgraph.Neighbors(wr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query } // QueryAttestationBundle queries the attestation_bundle edge of a WorkflowRun. -func (c *WorkflowRunClient) QueryAttestationBundle(wr *WorkflowRun) *AttestationQuery { +func (c *WorkflowRunClient) QueryAttestationBundle(_m *WorkflowRun) *AttestationQuery { query := (&AttestationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := wr.ID + id := _m.ID step := sqlgraph.NewStep( sqlgraph.From(workflowrun.Table, workflowrun.FieldID, id), sqlgraph.To(attestation.Table, attestation.FieldID), sqlgraph.Edge(sqlgraph.O2O, false, workflowrun.AttestationBundleTable, workflowrun.AttestationBundleColumn), ) - fromV = sqlgraph.Neighbors(wr.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) return fromV, nil } return query diff --git a/app/controlplane/pkg/data/ent/ent.go b/app/controlplane/pkg/data/ent/ent.go index 2ce1290cf..fc1fe487a 100644 --- a/app/controlplane/pkg/data/ent/ent.go +++ b/app/controlplane/pkg/data/ent/ent.go @@ -89,7 +89,7 @@ var ( ) // checkColumn checks if the column exists in the given table. -func checkColumn(table, column string) error { +func checkColumn(t, c string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ apitoken.Table: apitoken.ValidColumn, @@ -114,7 +114,7 @@ func checkColumn(table, column string) error { workflowrun.Table: workflowrun.ValidColumn, }) }) - return columnCheck(table, column) + return columnCheck(t, c) } // Asc applies the given fields in ASC order. diff --git a/app/controlplane/pkg/data/ent/group.go b/app/controlplane/pkg/data/ent/group.go index 5f67fea4d..8dea7c6fe 100644 --- a/app/controlplane/pkg/data/ent/group.go +++ b/app/controlplane/pkg/data/ent/group.go @@ -92,7 +92,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Group fields. -func (gr *Group) assignValues(columns []string, values []any) error { +func (_m *Group) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -102,52 +102,52 @@ func (gr *Group) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - gr.ID = *value + _m.ID = *value } case group.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - gr.Name = value.String + _m.Name = value.String } case group.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - gr.Description = value.String + _m.Description = value.String } case group.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - gr.OrganizationID = *value + _m.OrganizationID = *value } case group.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - gr.CreatedAt = value.Time + _m.CreatedAt = value.Time } case group.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - gr.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case group.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - gr.DeletedAt = value.Time + _m.DeletedAt = value.Time } case group.FieldMemberCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field member_count", values[i]) } else if value.Valid { - gr.MemberCount = int(value.Int64) + _m.MemberCount = int(value.Int64) } default: - gr.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -155,63 +155,63 @@ func (gr *Group) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Group. // This includes values selected through modifiers, order, etc. -func (gr *Group) Value(name string) (ent.Value, error) { - return gr.selectValues.Get(name) +func (_m *Group) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryGroupMemberships queries the "group_memberships" edge of the Group entity. -func (gr *Group) QueryGroupMemberships() *GroupMembershipQuery { - return NewGroupClient(gr.config).QueryGroupMemberships(gr) +func (_m *Group) QueryGroupMemberships() *GroupMembershipQuery { + return NewGroupClient(_m.config).QueryGroupMemberships(_m) } // QueryOrganization queries the "organization" edge of the Group entity. -func (gr *Group) QueryOrganization() *OrganizationQuery { - return NewGroupClient(gr.config).QueryOrganization(gr) +func (_m *Group) QueryOrganization() *OrganizationQuery { + return NewGroupClient(_m.config).QueryOrganization(_m) } // Update returns a builder for updating this Group. // Note that you need to call Group.Unwrap() before calling this method if this Group // was returned from a transaction, and the transaction was committed or rolled back. -func (gr *Group) Update() *GroupUpdateOne { - return NewGroupClient(gr.config).UpdateOne(gr) +func (_m *Group) Update() *GroupUpdateOne { + return NewGroupClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Group entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (gr *Group) Unwrap() *Group { - _tx, ok := gr.config.driver.(*txDriver) +func (_m *Group) Unwrap() *Group { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Group is not a transactional entity") } - gr.config.driver = _tx.drv - return gr + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (gr *Group) String() string { +func (_m *Group) String() string { var builder strings.Builder builder.WriteString("Group(") - builder.WriteString(fmt.Sprintf("id=%v, ", gr.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(gr.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(gr.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", gr.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(gr.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(gr.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(gr.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("member_count=") - builder.WriteString(fmt.Sprintf("%v", gr.MemberCount)) + builder.WriteString(fmt.Sprintf("%v", _m.MemberCount)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/group_create.go b/app/controlplane/pkg/data/ent/group_create.go index f0d0d1647..dca4f1fc0 100644 --- a/app/controlplane/pkg/data/ent/group_create.go +++ b/app/controlplane/pkg/data/ent/group_create.go @@ -27,135 +27,135 @@ type GroupCreate struct { } // SetName sets the "name" field. -func (gc *GroupCreate) SetName(s string) *GroupCreate { - gc.mutation.SetName(s) - return gc +func (_c *GroupCreate) SetName(v string) *GroupCreate { + _c.mutation.SetName(v) + return _c } // SetDescription sets the "description" field. -func (gc *GroupCreate) SetDescription(s string) *GroupCreate { - gc.mutation.SetDescription(s) - return gc +func (_c *GroupCreate) SetDescription(v string) *GroupCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (gc *GroupCreate) SetNillableDescription(s *string) *GroupCreate { - if s != nil { - gc.SetDescription(*s) +func (_c *GroupCreate) SetNillableDescription(v *string) *GroupCreate { + if v != nil { + _c.SetDescription(*v) } - return gc + return _c } // SetOrganizationID sets the "organization_id" field. -func (gc *GroupCreate) SetOrganizationID(u uuid.UUID) *GroupCreate { - gc.mutation.SetOrganizationID(u) - return gc +func (_c *GroupCreate) SetOrganizationID(v uuid.UUID) *GroupCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (gc *GroupCreate) SetCreatedAt(t time.Time) *GroupCreate { - gc.mutation.SetCreatedAt(t) - return gc +func (_c *GroupCreate) SetCreatedAt(v time.Time) *GroupCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (gc *GroupCreate) SetNillableCreatedAt(t *time.Time) *GroupCreate { - if t != nil { - gc.SetCreatedAt(*t) +func (_c *GroupCreate) SetNillableCreatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return gc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (gc *GroupCreate) SetUpdatedAt(t time.Time) *GroupCreate { - gc.mutation.SetUpdatedAt(t) - return gc +func (_c *GroupCreate) SetUpdatedAt(v time.Time) *GroupCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (gc *GroupCreate) SetNillableUpdatedAt(t *time.Time) *GroupCreate { - if t != nil { - gc.SetUpdatedAt(*t) +func (_c *GroupCreate) SetNillableUpdatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return gc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (gc *GroupCreate) SetDeletedAt(t time.Time) *GroupCreate { - gc.mutation.SetDeletedAt(t) - return gc +func (_c *GroupCreate) SetDeletedAt(v time.Time) *GroupCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (gc *GroupCreate) SetNillableDeletedAt(t *time.Time) *GroupCreate { - if t != nil { - gc.SetDeletedAt(*t) +func (_c *GroupCreate) SetNillableDeletedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return gc + return _c } // SetMemberCount sets the "member_count" field. -func (gc *GroupCreate) SetMemberCount(i int) *GroupCreate { - gc.mutation.SetMemberCount(i) - return gc +func (_c *GroupCreate) SetMemberCount(v int) *GroupCreate { + _c.mutation.SetMemberCount(v) + return _c } // SetNillableMemberCount sets the "member_count" field if the given value is not nil. -func (gc *GroupCreate) SetNillableMemberCount(i *int) *GroupCreate { - if i != nil { - gc.SetMemberCount(*i) +func (_c *GroupCreate) SetNillableMemberCount(v *int) *GroupCreate { + if v != nil { + _c.SetMemberCount(*v) } - return gc + return _c } // SetID sets the "id" field. -func (gc *GroupCreate) SetID(u uuid.UUID) *GroupCreate { - gc.mutation.SetID(u) - return gc +func (_c *GroupCreate) SetID(v uuid.UUID) *GroupCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (gc *GroupCreate) SetNillableID(u *uuid.UUID) *GroupCreate { - if u != nil { - gc.SetID(*u) +func (_c *GroupCreate) SetNillableID(v *uuid.UUID) *GroupCreate { + if v != nil { + _c.SetID(*v) } - return gc + return _c } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (gc *GroupCreate) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupCreate { - gc.mutation.AddGroupMembershipIDs(ids...) - return gc +func (_c *GroupCreate) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupCreate { + _c.mutation.AddGroupMembershipIDs(ids...) + return _c } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (gc *GroupCreate) AddGroupMemberships(g ...*GroupMembership) *GroupCreate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_c *GroupCreate) AddGroupMemberships(v ...*GroupMembership) *GroupCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return gc.AddGroupMembershipIDs(ids...) + return _c.AddGroupMembershipIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (gc *GroupCreate) SetOrganization(o *Organization) *GroupCreate { - return gc.SetOrganizationID(o.ID) +func (_c *GroupCreate) SetOrganization(v *Organization) *GroupCreate { + return _c.SetOrganizationID(v.ID) } // Mutation returns the GroupMutation object of the builder. -func (gc *GroupCreate) Mutation() *GroupMutation { - return gc.mutation +func (_c *GroupCreate) Mutation() *GroupMutation { + return _c.mutation } // Save creates the Group in the database. -func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) { - gc.defaults() - return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks) +func (_c *GroupCreate) Save(ctx context.Context) (*Group, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (gc *GroupCreate) SaveX(ctx context.Context) *Group { - v, err := gc.Save(ctx) +func (_c *GroupCreate) SaveX(ctx context.Context) *Group { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -163,72 +163,72 @@ func (gc *GroupCreate) SaveX(ctx context.Context) *Group { } // Exec executes the query. -func (gc *GroupCreate) Exec(ctx context.Context) error { - _, err := gc.Save(ctx) +func (_c *GroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gc *GroupCreate) ExecX(ctx context.Context) { - if err := gc.Exec(ctx); err != nil { +func (_c *GroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (gc *GroupCreate) defaults() { - if _, ok := gc.mutation.CreatedAt(); !ok { +func (_c *GroupCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := group.DefaultCreatedAt() - gc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := gc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := group.DefaultUpdatedAt() - gc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := gc.mutation.MemberCount(); !ok { + if _, ok := _c.mutation.MemberCount(); !ok { v := group.DefaultMemberCount - gc.mutation.SetMemberCount(v) + _c.mutation.SetMemberCount(v) } - if _, ok := gc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := group.DefaultID() - gc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (gc *GroupCreate) check() error { - if _, ok := gc.mutation.Name(); !ok { +func (_c *GroupCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Group.name"`)} } - if v, ok := gc.mutation.Name(); ok { + if v, ok := _c.mutation.Name(); ok { if err := group.NameValidator(v); err != nil { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} } } - if _, ok := gc.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "Group.organization_id"`)} } - if _, ok := gc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Group.created_at"`)} } - if _, ok := gc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Group.updated_at"`)} } - if _, ok := gc.mutation.MemberCount(); !ok { + if _, ok := _c.mutation.MemberCount(); !ok { return &ValidationError{Name: "member_count", err: errors.New(`ent: missing required field "Group.member_count"`)} } - if len(gc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "Group.organization"`)} } return nil } -func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { - if err := gc.check(); err != nil { +func (_c *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := gc.createSpec() - if err := sqlgraph.CreateNode(ctx, gc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -241,46 +241,46 @@ func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { return nil, err } } - gc.mutation.id = &_node.ID - gc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { +func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { var ( - _node = &Group{config: gc.config} + _node = &Group{config: _c.config} _spec = sqlgraph.NewCreateSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID)) ) - _spec.OnConflict = gc.conflict - if id, ok := gc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := gc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(group.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := gc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(group.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := gc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(group.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := gc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := gc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := gc.mutation.MemberCount(); ok { + if value, ok := _c.mutation.MemberCount(); ok { _spec.SetField(group.FieldMemberCount, field.TypeInt, value) _node.MemberCount = value } - if nodes := gc.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -296,7 +296,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := gc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -332,10 +332,10 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { // SetName(v+v). // }). // Exec(ctx) -func (gc *GroupCreate) OnConflict(opts ...sql.ConflictOption) *GroupUpsertOne { - gc.conflict = opts +func (_c *GroupCreate) OnConflict(opts ...sql.ConflictOption) *GroupUpsertOne { + _c.conflict = opts return &GroupUpsertOne{ - create: gc, + create: _c, } } @@ -345,10 +345,10 @@ func (gc *GroupCreate) OnConflict(opts ...sql.ConflictOption) *GroupUpsertOne { // client.Group.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (gc *GroupCreate) OnConflictColumns(columns ...string) *GroupUpsertOne { - gc.conflict = append(gc.conflict, sql.ConflictColumns(columns...)) +func (_c *GroupCreate) OnConflictColumns(columns ...string) *GroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &GroupUpsertOne{ - create: gc, + create: _c, } } @@ -658,16 +658,16 @@ type GroupCreateBulk struct { } // Save creates the Group entities in the database. -func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { - if gcb.err != nil { - return nil, gcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(gcb.builders)) - nodes := make([]*Group, len(gcb.builders)) - mutators := make([]Mutator, len(gcb.builders)) - for i := range gcb.builders { +func (_c *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Group, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := gcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*GroupMutation) @@ -681,12 +681,12 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = gcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, gcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -706,7 +706,7 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, gcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -714,8 +714,8 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { } // SaveX is like Save, but panics if an error occurs. -func (gcb *GroupCreateBulk) SaveX(ctx context.Context) []*Group { - v, err := gcb.Save(ctx) +func (_c *GroupCreateBulk) SaveX(ctx context.Context) []*Group { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -723,14 +723,14 @@ func (gcb *GroupCreateBulk) SaveX(ctx context.Context) []*Group { } // Exec executes the query. -func (gcb *GroupCreateBulk) Exec(ctx context.Context) error { - _, err := gcb.Save(ctx) +func (_c *GroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gcb *GroupCreateBulk) ExecX(ctx context.Context) { - if err := gcb.Exec(ctx); err != nil { +func (_c *GroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -750,10 +750,10 @@ func (gcb *GroupCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (gcb *GroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupUpsertBulk { - gcb.conflict = opts +func (_c *GroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupUpsertBulk { + _c.conflict = opts return &GroupUpsertBulk{ - create: gcb, + create: _c, } } @@ -763,10 +763,10 @@ func (gcb *GroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupUpsertB // client.Group.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (gcb *GroupCreateBulk) OnConflictColumns(columns ...string) *GroupUpsertBulk { - gcb.conflict = append(gcb.conflict, sql.ConflictColumns(columns...)) +func (_c *GroupCreateBulk) OnConflictColumns(columns ...string) *GroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &GroupUpsertBulk{ - create: gcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/group_delete.go b/app/controlplane/pkg/data/ent/group_delete.go index 67f4594b4..d0c1ea5e9 100644 --- a/app/controlplane/pkg/data/ent/group_delete.go +++ b/app/controlplane/pkg/data/ent/group_delete.go @@ -20,56 +20,56 @@ type GroupDelete struct { } // Where appends a list predicates to the GroupDelete builder. -func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { - gd.mutation.Where(ps...) - return gd +func (_d *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (gd *GroupDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks) +func (_d *GroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (gd *GroupDelete) ExecX(ctx context.Context) int { - n, err := gd.Exec(ctx) +func (_d *GroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (gd *GroupDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *GroupDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID)) - if ps := gd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, gd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - gd.mutation.done = true + _d.mutation.done = true return affected, err } // GroupDeleteOne is the builder for deleting a single Group entity. type GroupDeleteOne struct { - gd *GroupDelete + _d *GroupDelete } // Where appends a list predicates to the GroupDelete builder. -func (gdo *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne { - gdo.gd.mutation.Where(ps...) - return gdo +func (_d *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (gdo *GroupDeleteOne) Exec(ctx context.Context) error { - n, err := gdo.gd.Exec(ctx) +func (_d *GroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (gdo *GroupDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (gdo *GroupDeleteOne) ExecX(ctx context.Context) { - if err := gdo.Exec(ctx); err != nil { +func (_d *GroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/group_query.go b/app/controlplane/pkg/data/ent/group_query.go index 968116fb2..2d9098f57 100644 --- a/app/controlplane/pkg/data/ent/group_query.go +++ b/app/controlplane/pkg/data/ent/group_query.go @@ -36,44 +36,44 @@ type GroupQuery struct { } // Where adds a new predicate for the GroupQuery builder. -func (gq *GroupQuery) Where(ps ...predicate.Group) *GroupQuery { - gq.predicates = append(gq.predicates, ps...) - return gq +func (_q *GroupQuery) Where(ps ...predicate.Group) *GroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (gq *GroupQuery) Limit(limit int) *GroupQuery { - gq.ctx.Limit = &limit - return gq +func (_q *GroupQuery) Limit(limit int) *GroupQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (gq *GroupQuery) Offset(offset int) *GroupQuery { - gq.ctx.Offset = &offset - return gq +func (_q *GroupQuery) Offset(offset int) *GroupQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (gq *GroupQuery) Unique(unique bool) *GroupQuery { - gq.ctx.Unique = &unique - return gq +func (_q *GroupQuery) Unique(unique bool) *GroupQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { - gq.order = append(gq.order, o...) - return gq +func (_q *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { + _q.order = append(_q.order, o...) + return _q } // QueryGroupMemberships chains the current query on the "group_memberships" edge. -func (gq *GroupQuery) QueryGroupMemberships() *GroupMembershipQuery { - query := (&GroupMembershipClient{config: gq.config}).Query() +func (_q *GroupQuery) QueryGroupMemberships() *GroupMembershipQuery { + query := (&GroupMembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := gq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := gq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -82,20 +82,20 @@ func (gq *GroupQuery) QueryGroupMemberships() *GroupMembershipQuery { sqlgraph.To(groupmembership.Table, groupmembership.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, group.GroupMembershipsTable, group.GroupMembershipsColumn), ) - fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryOrganization chains the current query on the "organization" edge. -func (gq *GroupQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: gq.config}).Query() +func (_q *GroupQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := gq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := gq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -104,7 +104,7 @@ func (gq *GroupQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, group.OrganizationTable, group.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -112,8 +112,8 @@ func (gq *GroupQuery) QueryOrganization() *OrganizationQuery { // First returns the first Group entity from the query. // Returns a *NotFoundError when no Group was found. -func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { - nodes, err := gq.Limit(1).All(setContextOp(ctx, gq.ctx, ent.OpQueryFirst)) +func (_q *GroupQuery) First(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -124,8 +124,8 @@ func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { } // FirstX is like First, but panics if an error occurs. -func (gq *GroupQuery) FirstX(ctx context.Context) *Group { - node, err := gq.First(ctx) +func (_q *GroupQuery) FirstX(ctx context.Context) *Group { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,9 +134,9 @@ func (gq *GroupQuery) FirstX(ctx context.Context) *Group { // FirstID returns the first Group ID from the query. // Returns a *NotFoundError when no Group ID was found. -func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gq.Limit(1).IDs(setContextOp(ctx, gq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -147,8 +147,8 @@ func (gq *GroupQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { } // FirstIDX is like FirstID, but panics if an error occurs. -func (gq *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := gq.FirstID(ctx) +func (_q *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,8 +158,8 @@ func (gq *GroupQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Group entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Group entity is found. // Returns a *NotFoundError when no Group entities are found. -func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) { - nodes, err := gq.Limit(2).All(setContextOp(ctx, gq.ctx, ent.OpQueryOnly)) +func (_q *GroupQuery) Only(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -174,8 +174,8 @@ func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) { } // OnlyX is like Only, but panics if an error occurs. -func (gq *GroupQuery) OnlyX(ctx context.Context) *Group { - node, err := gq.Only(ctx) +func (_q *GroupQuery) OnlyX(ctx context.Context) *Group { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -185,9 +185,9 @@ func (gq *GroupQuery) OnlyX(ctx context.Context) *Group { // OnlyID is like Only, but returns the only Group ID in the query. // Returns a *NotSingularError when more than one Group ID is found. // Returns a *NotFoundError when no entities are found. -func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gq.Limit(2).IDs(setContextOp(ctx, gq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -202,8 +202,8 @@ func (gq *GroupQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (gq *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := gq.OnlyID(ctx) +func (_q *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -211,18 +211,18 @@ func (gq *GroupQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Groups. -func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) { - ctx = setContextOp(ctx, gq.ctx, ent.OpQueryAll) - if err := gq.prepareQuery(ctx); err != nil { +func (_q *GroupQuery) All(ctx context.Context) ([]*Group, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Group, *GroupQuery]() - return withInterceptors[[]*Group](ctx, gq, qr, gq.inters) + return withInterceptors[[]*Group](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (gq *GroupQuery) AllX(ctx context.Context) []*Group { - nodes, err := gq.All(ctx) +func (_q *GroupQuery) AllX(ctx context.Context) []*Group { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -230,20 +230,20 @@ func (gq *GroupQuery) AllX(ctx context.Context) []*Group { } // IDs executes the query and returns a list of Group IDs. -func (gq *GroupQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if gq.ctx.Unique == nil && gq.path != nil { - gq.Unique(true) +func (_q *GroupQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, gq.ctx, ent.OpQueryIDs) - if err = gq.Select(group.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(group.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (gq *GroupQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := gq.IDs(ctx) +func (_q *GroupQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -251,17 +251,17 @@ func (gq *GroupQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (gq *GroupQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, gq.ctx, ent.OpQueryCount) - if err := gq.prepareQuery(ctx); err != nil { +func (_q *GroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, gq, querierCount[*GroupQuery](), gq.inters) + return withInterceptors[int](ctx, _q, querierCount[*GroupQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (gq *GroupQuery) CountX(ctx context.Context) int { - count, err := gq.Count(ctx) +func (_q *GroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -269,9 +269,9 @@ func (gq *GroupQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, gq.ctx, ent.OpQueryExist) - switch _, err := gq.FirstID(ctx); { +func (_q *GroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -282,8 +282,8 @@ func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (gq *GroupQuery) ExistX(ctx context.Context) bool { - exist, err := gq.Exist(ctx) +func (_q *GroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -292,45 +292,45 @@ func (gq *GroupQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the GroupQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (gq *GroupQuery) Clone() *GroupQuery { - if gq == nil { +func (_q *GroupQuery) Clone() *GroupQuery { + if _q == nil { return nil } return &GroupQuery{ - config: gq.config, - ctx: gq.ctx.Clone(), - order: append([]group.OrderOption{}, gq.order...), - inters: append([]Interceptor{}, gq.inters...), - predicates: append([]predicate.Group{}, gq.predicates...), - withGroupMemberships: gq.withGroupMemberships.Clone(), - withOrganization: gq.withOrganization.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]group.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Group{}, _q.predicates...), + withGroupMemberships: _q.withGroupMemberships.Clone(), + withOrganization: _q.withOrganization.Clone(), // clone intermediate query. - sql: gq.sql.Clone(), - path: gq.path, - modifiers: append([]func(*sql.Selector){}, gq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithGroupMemberships tells the query-builder to eager-load the nodes that are connected to // the "group_memberships" edge. The optional arguments are used to configure the query builder of the edge. -func (gq *GroupQuery) WithGroupMemberships(opts ...func(*GroupMembershipQuery)) *GroupQuery { - query := (&GroupMembershipClient{config: gq.config}).Query() +func (_q *GroupQuery) WithGroupMemberships(opts ...func(*GroupMembershipQuery)) *GroupQuery { + query := (&GroupMembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - gq.withGroupMemberships = query - return gq + _q.withGroupMemberships = query + return _q } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (gq *GroupQuery) WithOrganization(opts ...func(*OrganizationQuery)) *GroupQuery { - query := (&OrganizationClient{config: gq.config}).Query() +func (_q *GroupQuery) WithOrganization(opts ...func(*OrganizationQuery)) *GroupQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - gq.withOrganization = query - return gq + _q.withOrganization = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -347,10 +347,10 @@ func (gq *GroupQuery) WithOrganization(opts ...func(*OrganizationQuery)) *GroupQ // GroupBy(group.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { - gq.ctx.Fields = append([]string{field}, fields...) - grbuild := &GroupGroupBy{build: gq} - grbuild.flds = &gq.ctx.Fields +func (_q *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &GroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = group.Label grbuild.scan = grbuild.Scan return grbuild @@ -368,84 +368,84 @@ func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { // client.Group.Query(). // Select(group.FieldName). // Scan(ctx, &v) -func (gq *GroupQuery) Select(fields ...string) *GroupSelect { - gq.ctx.Fields = append(gq.ctx.Fields, fields...) - sbuild := &GroupSelect{GroupQuery: gq} +func (_q *GroupQuery) Select(fields ...string) *GroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &GroupSelect{GroupQuery: _q} sbuild.label = group.Label - sbuild.flds, sbuild.scan = &gq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a GroupSelect configured with the given aggregations. -func (gq *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect { - return gq.Select().Aggregate(fns...) +func (_q *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect { + return _q.Select().Aggregate(fns...) } -func (gq *GroupQuery) prepareQuery(ctx context.Context) error { - for _, inter := range gq.inters { +func (_q *GroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, gq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range gq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !group.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if gq.path != nil { - prev, err := gq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - gq.sql = prev + _q.sql = prev } return nil } -func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) { +func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) { var ( nodes = []*Group{} - _spec = gq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - gq.withGroupMemberships != nil, - gq.withOrganization != nil, + _q.withGroupMemberships != nil, + _q.withOrganization != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*Group).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Group{config: gq.config} + node := &Group{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(gq.modifiers) > 0 { - _spec.Modifiers = gq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, gq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := gq.withGroupMemberships; query != nil { - if err := gq.loadGroupMemberships(ctx, query, nodes, + if query := _q.withGroupMemberships; query != nil { + if err := _q.loadGroupMemberships(ctx, query, nodes, func(n *Group) { n.Edges.GroupMemberships = []*GroupMembership{} }, func(n *Group, e *GroupMembership) { n.Edges.GroupMemberships = append(n.Edges.GroupMemberships, e) }); err != nil { return nil, err } } - if query := gq.withOrganization; query != nil { - if err := gq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *Group, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } @@ -453,7 +453,7 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, return nodes, nil } -func (gq *GroupQuery) loadGroupMemberships(ctx context.Context, query *GroupMembershipQuery, nodes []*Group, init func(*Group), assign func(*Group, *GroupMembership)) error { +func (_q *GroupQuery) loadGroupMemberships(ctx context.Context, query *GroupMembershipQuery, nodes []*Group, init func(*Group), assign func(*Group, *GroupMembership)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Group) for i := range nodes { @@ -483,7 +483,7 @@ func (gq *GroupQuery) loadGroupMemberships(ctx context.Context, query *GroupMemb } return nil } -func (gq *GroupQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Group, init func(*Group), assign func(*Group, *Organization)) error { +func (_q *GroupQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Group, init func(*Group), assign func(*Group, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Group) for i := range nodes { @@ -513,27 +513,27 @@ func (gq *GroupQuery) loadOrganization(ctx context.Context, query *OrganizationQ return nil } -func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { - _spec := gq.querySpec() - if len(gq.modifiers) > 0 { - _spec.Modifiers = gq.modifiers +func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = gq.ctx.Fields - if len(gq.ctx.Fields) > 0 { - _spec.Unique = gq.ctx.Unique != nil && *gq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, gq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *GroupQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID)) - _spec.From = gq.sql - if unique := gq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if gq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := gq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) for i := range fields { @@ -541,24 +541,24 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if gq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(group.FieldOrganizationID) } } - if ps := gq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := gq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := gq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := gq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -568,36 +568,36 @@ func (gq *GroupQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(gq.driver.Dialect()) +func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(group.Table) - columns := gq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = group.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if gq.sql != nil { - selector = gq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if gq.ctx.Unique != nil && *gq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range gq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range gq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range gq.order { + for _, p := range _q.order { p(selector) } - if offset := gq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := gq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -606,33 +606,33 @@ func (gq *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (gq *GroupQuery) ForUpdate(opts ...sql.LockOption) *GroupQuery { - if gq.driver.Dialect() == dialect.Postgres { - gq.Unique(false) +func (_q *GroupQuery) ForUpdate(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - gq.modifiers = append(gq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return gq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (gq *GroupQuery) ForShare(opts ...sql.LockOption) *GroupQuery { - if gq.driver.Dialect() == dialect.Postgres { - gq.Unique(false) +func (_q *GroupQuery) ForShare(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - gq.modifiers = append(gq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return gq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (gq *GroupQuery) Modify(modifiers ...func(s *sql.Selector)) *GroupSelect { - gq.modifiers = append(gq.modifiers, modifiers...) - return gq.Select() +func (_q *GroupQuery) Modify(modifiers ...func(s *sql.Selector)) *GroupSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // GroupGroupBy is the group-by builder for Group entities. @@ -642,41 +642,41 @@ type GroupGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (ggb *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy { - ggb.fns = append(ggb.fns, fns...) - return ggb +func (_g *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (ggb *GroupGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ggb.build.ctx, ent.OpQueryGroupBy) - if err := ggb.build.prepareQuery(ctx); err != nil { +func (_g *GroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, ggb.build, ggb, ggb.build.inters, v) + return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (ggb *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error { +func (_g *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(ggb.fns)) - for _, fn := range ggb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*ggb.flds)+len(ggb.fns)) - for _, f := range *ggb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*ggb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ggb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -690,27 +690,27 @@ type GroupSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (gs *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect { - gs.fns = append(gs.fns, fns...) - return gs +func (_s *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (gs *GroupSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, gs.ctx, ent.OpQuerySelect) - if err := gs.prepareQuery(ctx); err != nil { +func (_s *GroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, gs.GroupQuery, gs, gs.inters, v) + return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, _s.GroupQuery, _s, _s.inters, v) } -func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error { +func (_s *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(gs.fns)) - for _, fn := range gs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*gs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -718,7 +718,7 @@ func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) err } rows := &sql.Rows{} query, args := selector.Query() - if err := gs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -726,7 +726,7 @@ func (gs *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) err } // Modify adds a query modifier for attaching custom logic to queries. -func (gs *GroupSelect) Modify(modifiers ...func(s *sql.Selector)) *GroupSelect { - gs.modifiers = append(gs.modifiers, modifiers...) - return gs +func (_s *GroupSelect) Modify(modifiers ...func(s *sql.Selector)) *GroupSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/group_update.go b/app/controlplane/pkg/data/ent/group_update.go index 99f088c27..f110d0f24 100644 --- a/app/controlplane/pkg/data/ent/group_update.go +++ b/app/controlplane/pkg/data/ent/group_update.go @@ -27,174 +27,174 @@ type GroupUpdate struct { } // Where appends a list predicates to the GroupUpdate builder. -func (gu *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate { - gu.mutation.Where(ps...) - return gu +func (_u *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate { + _u.mutation.Where(ps...) + return _u } // SetName sets the "name" field. -func (gu *GroupUpdate) SetName(s string) *GroupUpdate { - gu.mutation.SetName(s) - return gu +func (_u *GroupUpdate) SetName(v string) *GroupUpdate { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate { - if s != nil { - gu.SetName(*s) +func (_u *GroupUpdate) SetNillableName(v *string) *GroupUpdate { + if v != nil { + _u.SetName(*v) } - return gu + return _u } // SetDescription sets the "description" field. -func (gu *GroupUpdate) SetDescription(s string) *GroupUpdate { - gu.mutation.SetDescription(s) - return gu +func (_u *GroupUpdate) SetDescription(v string) *GroupUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableDescription(s *string) *GroupUpdate { - if s != nil { - gu.SetDescription(*s) +func (_u *GroupUpdate) SetNillableDescription(v *string) *GroupUpdate { + if v != nil { + _u.SetDescription(*v) } - return gu + return _u } // ClearDescription clears the value of the "description" field. -func (gu *GroupUpdate) ClearDescription() *GroupUpdate { - gu.mutation.ClearDescription() - return gu +func (_u *GroupUpdate) ClearDescription() *GroupUpdate { + _u.mutation.ClearDescription() + return _u } // SetOrganizationID sets the "organization_id" field. -func (gu *GroupUpdate) SetOrganizationID(u uuid.UUID) *GroupUpdate { - gu.mutation.SetOrganizationID(u) - return gu +func (_u *GroupUpdate) SetOrganizationID(v uuid.UUID) *GroupUpdate { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableOrganizationID(u *uuid.UUID) *GroupUpdate { - if u != nil { - gu.SetOrganizationID(*u) +func (_u *GroupUpdate) SetNillableOrganizationID(v *uuid.UUID) *GroupUpdate { + if v != nil { + _u.SetOrganizationID(*v) } - return gu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (gu *GroupUpdate) SetUpdatedAt(t time.Time) *GroupUpdate { - gu.mutation.SetUpdatedAt(t) - return gu +func (_u *GroupUpdate) SetUpdatedAt(v time.Time) *GroupUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableUpdatedAt(t *time.Time) *GroupUpdate { - if t != nil { - gu.SetUpdatedAt(*t) +func (_u *GroupUpdate) SetNillableUpdatedAt(v *time.Time) *GroupUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return gu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (gu *GroupUpdate) SetDeletedAt(t time.Time) *GroupUpdate { - gu.mutation.SetDeletedAt(t) - return gu +func (_u *GroupUpdate) SetDeletedAt(v time.Time) *GroupUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableDeletedAt(t *time.Time) *GroupUpdate { - if t != nil { - gu.SetDeletedAt(*t) +func (_u *GroupUpdate) SetNillableDeletedAt(v *time.Time) *GroupUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return gu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (gu *GroupUpdate) ClearDeletedAt() *GroupUpdate { - gu.mutation.ClearDeletedAt() - return gu +func (_u *GroupUpdate) ClearDeletedAt() *GroupUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetMemberCount sets the "member_count" field. -func (gu *GroupUpdate) SetMemberCount(i int) *GroupUpdate { - gu.mutation.ResetMemberCount() - gu.mutation.SetMemberCount(i) - return gu +func (_u *GroupUpdate) SetMemberCount(v int) *GroupUpdate { + _u.mutation.ResetMemberCount() + _u.mutation.SetMemberCount(v) + return _u } // SetNillableMemberCount sets the "member_count" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableMemberCount(i *int) *GroupUpdate { - if i != nil { - gu.SetMemberCount(*i) +func (_u *GroupUpdate) SetNillableMemberCount(v *int) *GroupUpdate { + if v != nil { + _u.SetMemberCount(*v) } - return gu + return _u } -// AddMemberCount adds i to the "member_count" field. -func (gu *GroupUpdate) AddMemberCount(i int) *GroupUpdate { - gu.mutation.AddMemberCount(i) - return gu +// AddMemberCount adds value to the "member_count" field. +func (_u *GroupUpdate) AddMemberCount(v int) *GroupUpdate { + _u.mutation.AddMemberCount(v) + return _u } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (gu *GroupUpdate) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdate { - gu.mutation.AddGroupMembershipIDs(ids...) - return gu +func (_u *GroupUpdate) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdate { + _u.mutation.AddGroupMembershipIDs(ids...) + return _u } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (gu *GroupUpdate) AddGroupMemberships(g ...*GroupMembership) *GroupUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *GroupUpdate) AddGroupMemberships(v ...*GroupMembership) *GroupUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return gu.AddGroupMembershipIDs(ids...) + return _u.AddGroupMembershipIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (gu *GroupUpdate) SetOrganization(o *Organization) *GroupUpdate { - return gu.SetOrganizationID(o.ID) +func (_u *GroupUpdate) SetOrganization(v *Organization) *GroupUpdate { + return _u.SetOrganizationID(v.ID) } // Mutation returns the GroupMutation object of the builder. -func (gu *GroupUpdate) Mutation() *GroupMutation { - return gu.mutation +func (_u *GroupUpdate) Mutation() *GroupMutation { + return _u.mutation } // ClearGroupMemberships clears all "group_memberships" edges to the GroupMembership entity. -func (gu *GroupUpdate) ClearGroupMemberships() *GroupUpdate { - gu.mutation.ClearGroupMemberships() - return gu +func (_u *GroupUpdate) ClearGroupMemberships() *GroupUpdate { + _u.mutation.ClearGroupMemberships() + return _u } // RemoveGroupMembershipIDs removes the "group_memberships" edge to GroupMembership entities by IDs. -func (gu *GroupUpdate) RemoveGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdate { - gu.mutation.RemoveGroupMembershipIDs(ids...) - return gu +func (_u *GroupUpdate) RemoveGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdate { + _u.mutation.RemoveGroupMembershipIDs(ids...) + return _u } // RemoveGroupMemberships removes "group_memberships" edges to GroupMembership entities. -func (gu *GroupUpdate) RemoveGroupMemberships(g ...*GroupMembership) *GroupUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *GroupUpdate) RemoveGroupMemberships(v ...*GroupMembership) *GroupUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return gu.RemoveGroupMembershipIDs(ids...) + return _u.RemoveGroupMembershipIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (gu *GroupUpdate) ClearOrganization() *GroupUpdate { - gu.mutation.ClearOrganization() - return gu +func (_u *GroupUpdate) ClearOrganization() *GroupUpdate { + _u.mutation.ClearOrganization() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks) +func (_u *GroupUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (gu *GroupUpdate) SaveX(ctx context.Context) int { - affected, err := gu.Save(ctx) +func (_u *GroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -202,74 +202,74 @@ func (gu *GroupUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (gu *GroupUpdate) Exec(ctx context.Context) error { - _, err := gu.Save(ctx) +func (_u *GroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gu *GroupUpdate) ExecX(ctx context.Context) { - if err := gu.Exec(ctx); err != nil { +func (_u *GroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (gu *GroupUpdate) check() error { - if v, ok := gu.mutation.Name(); ok { +func (_u *GroupUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { if err := group.NameValidator(v); err != nil { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} } } - if gu.mutation.OrganizationCleared() && len(gu.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Group.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (gu *GroupUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupUpdate { - gu.modifiers = append(gu.modifiers, modifiers...) - return gu +func (_u *GroupUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := gu.check(); err != nil { - return n, err +func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID)) - if ps := gu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := gu.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(group.FieldName, field.TypeString, value) } - if value, ok := gu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(group.FieldDescription, field.TypeString, value) } - if gu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(group.FieldDescription, field.TypeString) } - if value, ok := gu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := gu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) } - if gu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(group.FieldDeletedAt, field.TypeTime) } - if value, ok := gu.mutation.MemberCount(); ok { + if value, ok := _u.mutation.MemberCount(); ok { _spec.SetField(group.FieldMemberCount, field.TypeInt, value) } - if value, ok := gu.mutation.AddedMemberCount(); ok { + if value, ok := _u.mutation.AddedMemberCount(); ok { _spec.AddField(group.FieldMemberCount, field.TypeInt, value) } - if gu.mutation.GroupMembershipsCleared() { + if _u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -282,7 +282,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gu.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !gu.mutation.GroupMembershipsCleared() { + if nodes := _u.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !_u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -298,7 +298,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gu.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -314,7 +314,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if gu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -327,7 +327,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -343,8 +343,8 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(gu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{group.Label} } else if sqlgraph.IsConstraintError(err) { @@ -352,8 +352,8 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - gu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // GroupUpdateOne is the builder for updating a single Group entity. @@ -366,181 +366,181 @@ type GroupUpdateOne struct { } // SetName sets the "name" field. -func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne { - guo.mutation.SetName(s) - return guo +func (_u *GroupUpdateOne) SetName(v string) *GroupUpdateOne { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne { - if s != nil { - guo.SetName(*s) +func (_u *GroupUpdateOne) SetNillableName(v *string) *GroupUpdateOne { + if v != nil { + _u.SetName(*v) } - return guo + return _u } // SetDescription sets the "description" field. -func (guo *GroupUpdateOne) SetDescription(s string) *GroupUpdateOne { - guo.mutation.SetDescription(s) - return guo +func (_u *GroupUpdateOne) SetDescription(v string) *GroupUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableDescription(s *string) *GroupUpdateOne { - if s != nil { - guo.SetDescription(*s) +func (_u *GroupUpdateOne) SetNillableDescription(v *string) *GroupUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return guo + return _u } // ClearDescription clears the value of the "description" field. -func (guo *GroupUpdateOne) ClearDescription() *GroupUpdateOne { - guo.mutation.ClearDescription() - return guo +func (_u *GroupUpdateOne) ClearDescription() *GroupUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetOrganizationID sets the "organization_id" field. -func (guo *GroupUpdateOne) SetOrganizationID(u uuid.UUID) *GroupUpdateOne { - guo.mutation.SetOrganizationID(u) - return guo +func (_u *GroupUpdateOne) SetOrganizationID(v uuid.UUID) *GroupUpdateOne { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableOrganizationID(u *uuid.UUID) *GroupUpdateOne { - if u != nil { - guo.SetOrganizationID(*u) +func (_u *GroupUpdateOne) SetNillableOrganizationID(v *uuid.UUID) *GroupUpdateOne { + if v != nil { + _u.SetOrganizationID(*v) } - return guo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (guo *GroupUpdateOne) SetUpdatedAt(t time.Time) *GroupUpdateOne { - guo.mutation.SetUpdatedAt(t) - return guo +func (_u *GroupUpdateOne) SetUpdatedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableUpdatedAt(t *time.Time) *GroupUpdateOne { - if t != nil { - guo.SetUpdatedAt(*t) +func (_u *GroupUpdateOne) SetNillableUpdatedAt(v *time.Time) *GroupUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return guo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (guo *GroupUpdateOne) SetDeletedAt(t time.Time) *GroupUpdateOne { - guo.mutation.SetDeletedAt(t) - return guo +func (_u *GroupUpdateOne) SetDeletedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableDeletedAt(t *time.Time) *GroupUpdateOne { - if t != nil { - guo.SetDeletedAt(*t) +func (_u *GroupUpdateOne) SetNillableDeletedAt(v *time.Time) *GroupUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return guo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (guo *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne { - guo.mutation.ClearDeletedAt() - return guo +func (_u *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetMemberCount sets the "member_count" field. -func (guo *GroupUpdateOne) SetMemberCount(i int) *GroupUpdateOne { - guo.mutation.ResetMemberCount() - guo.mutation.SetMemberCount(i) - return guo +func (_u *GroupUpdateOne) SetMemberCount(v int) *GroupUpdateOne { + _u.mutation.ResetMemberCount() + _u.mutation.SetMemberCount(v) + return _u } // SetNillableMemberCount sets the "member_count" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableMemberCount(i *int) *GroupUpdateOne { - if i != nil { - guo.SetMemberCount(*i) +func (_u *GroupUpdateOne) SetNillableMemberCount(v *int) *GroupUpdateOne { + if v != nil { + _u.SetMemberCount(*v) } - return guo + return _u } -// AddMemberCount adds i to the "member_count" field. -func (guo *GroupUpdateOne) AddMemberCount(i int) *GroupUpdateOne { - guo.mutation.AddMemberCount(i) - return guo +// AddMemberCount adds value to the "member_count" field. +func (_u *GroupUpdateOne) AddMemberCount(v int) *GroupUpdateOne { + _u.mutation.AddMemberCount(v) + return _u } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (guo *GroupUpdateOne) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdateOne { - guo.mutation.AddGroupMembershipIDs(ids...) - return guo +func (_u *GroupUpdateOne) AddGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdateOne { + _u.mutation.AddGroupMembershipIDs(ids...) + return _u } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (guo *GroupUpdateOne) AddGroupMemberships(g ...*GroupMembership) *GroupUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *GroupUpdateOne) AddGroupMemberships(v ...*GroupMembership) *GroupUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return guo.AddGroupMembershipIDs(ids...) + return _u.AddGroupMembershipIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (guo *GroupUpdateOne) SetOrganization(o *Organization) *GroupUpdateOne { - return guo.SetOrganizationID(o.ID) +func (_u *GroupUpdateOne) SetOrganization(v *Organization) *GroupUpdateOne { + return _u.SetOrganizationID(v.ID) } // Mutation returns the GroupMutation object of the builder. -func (guo *GroupUpdateOne) Mutation() *GroupMutation { - return guo.mutation +func (_u *GroupUpdateOne) Mutation() *GroupMutation { + return _u.mutation } // ClearGroupMemberships clears all "group_memberships" edges to the GroupMembership entity. -func (guo *GroupUpdateOne) ClearGroupMemberships() *GroupUpdateOne { - guo.mutation.ClearGroupMemberships() - return guo +func (_u *GroupUpdateOne) ClearGroupMemberships() *GroupUpdateOne { + _u.mutation.ClearGroupMemberships() + return _u } // RemoveGroupMembershipIDs removes the "group_memberships" edge to GroupMembership entities by IDs. -func (guo *GroupUpdateOne) RemoveGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdateOne { - guo.mutation.RemoveGroupMembershipIDs(ids...) - return guo +func (_u *GroupUpdateOne) RemoveGroupMembershipIDs(ids ...uuid.UUID) *GroupUpdateOne { + _u.mutation.RemoveGroupMembershipIDs(ids...) + return _u } // RemoveGroupMemberships removes "group_memberships" edges to GroupMembership entities. -func (guo *GroupUpdateOne) RemoveGroupMemberships(g ...*GroupMembership) *GroupUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *GroupUpdateOne) RemoveGroupMemberships(v ...*GroupMembership) *GroupUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return guo.RemoveGroupMembershipIDs(ids...) + return _u.RemoveGroupMembershipIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (guo *GroupUpdateOne) ClearOrganization() *GroupUpdateOne { - guo.mutation.ClearOrganization() - return guo +func (_u *GroupUpdateOne) ClearOrganization() *GroupUpdateOne { + _u.mutation.ClearOrganization() + return _u } // Where appends a list predicates to the GroupUpdate builder. -func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { - guo.mutation.Where(ps...) - return guo +func (_u *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { - guo.fields = append([]string{field}, fields...) - return guo +func (_u *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Group entity. -func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { - return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks) +func (_u *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (guo *GroupUpdateOne) SaveX(ctx context.Context) *Group { - node, err := guo.Save(ctx) +func (_u *GroupUpdateOne) SaveX(ctx context.Context) *Group { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -548,48 +548,48 @@ func (guo *GroupUpdateOne) SaveX(ctx context.Context) *Group { } // Exec executes the query on the entity. -func (guo *GroupUpdateOne) Exec(ctx context.Context) error { - _, err := guo.Save(ctx) +func (_u *GroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (guo *GroupUpdateOne) ExecX(ctx context.Context) { - if err := guo.Exec(ctx); err != nil { +func (_u *GroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (guo *GroupUpdateOne) check() error { - if v, ok := guo.mutation.Name(); ok { +func (_u *GroupUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { if err := group.NameValidator(v); err != nil { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} } } - if guo.mutation.OrganizationCleared() && len(guo.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Group.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (guo *GroupUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupUpdateOne { - guo.modifiers = append(guo.modifiers, modifiers...) - return guo +func (_u *GroupUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) { - if err := guo.check(); err != nil { +func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID)) - id, ok := guo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Group.id" for update`)} } _spec.Node.ID.Value = id - if fields := guo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) for _, f := range fields { @@ -601,38 +601,38 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } } } - if ps := guo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := guo.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(group.FieldName, field.TypeString, value) } - if value, ok := guo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(group.FieldDescription, field.TypeString, value) } - if guo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(group.FieldDescription, field.TypeString) } - if value, ok := guo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := guo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) } - if guo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(group.FieldDeletedAt, field.TypeTime) } - if value, ok := guo.mutation.MemberCount(); ok { + if value, ok := _u.mutation.MemberCount(); ok { _spec.SetField(group.FieldMemberCount, field.TypeInt, value) } - if value, ok := guo.mutation.AddedMemberCount(); ok { + if value, ok := _u.mutation.AddedMemberCount(); ok { _spec.AddField(group.FieldMemberCount, field.TypeInt, value) } - if guo.mutation.GroupMembershipsCleared() { + if _u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -645,7 +645,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := guo.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !guo.mutation.GroupMembershipsCleared() { + if nodes := _u.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !_u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -661,7 +661,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := guo.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -677,7 +677,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if guo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -690,7 +690,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := guo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -706,11 +706,11 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(guo.modifiers...) - _node = &Group{config: guo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Group{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, guo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{group.Label} } else if sqlgraph.IsConstraintError(err) { @@ -718,6 +718,6 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error } return nil, err } - guo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/groupmembership.go b/app/controlplane/pkg/data/ent/groupmembership.go index f5ff6ddb9..e8d3ccbc6 100644 --- a/app/controlplane/pkg/data/ent/groupmembership.go +++ b/app/controlplane/pkg/data/ent/groupmembership.go @@ -91,7 +91,7 @@ func (*GroupMembership) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the GroupMembership fields. -func (gm *GroupMembership) assignValues(columns []string, values []any) error { +func (_m *GroupMembership) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -101,46 +101,46 @@ func (gm *GroupMembership) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - gm.ID = *value + _m.ID = *value } case groupmembership.FieldGroupID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field group_id", values[i]) } else if value != nil { - gm.GroupID = *value + _m.GroupID = *value } case groupmembership.FieldUserID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field user_id", values[i]) } else if value != nil { - gm.UserID = *value + _m.UserID = *value } case groupmembership.FieldMaintainer: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field maintainer", values[i]) } else if value.Valid { - gm.Maintainer = value.Bool + _m.Maintainer = value.Bool } case groupmembership.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - gm.CreatedAt = value.Time + _m.CreatedAt = value.Time } case groupmembership.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - gm.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case groupmembership.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - gm.DeletedAt = value.Time + _m.DeletedAt = value.Time } default: - gm.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -148,60 +148,60 @@ func (gm *GroupMembership) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the GroupMembership. // This includes values selected through modifiers, order, etc. -func (gm *GroupMembership) Value(name string) (ent.Value, error) { - return gm.selectValues.Get(name) +func (_m *GroupMembership) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryGroup queries the "group" edge of the GroupMembership entity. -func (gm *GroupMembership) QueryGroup() *GroupQuery { - return NewGroupMembershipClient(gm.config).QueryGroup(gm) +func (_m *GroupMembership) QueryGroup() *GroupQuery { + return NewGroupMembershipClient(_m.config).QueryGroup(_m) } // QueryUser queries the "user" edge of the GroupMembership entity. -func (gm *GroupMembership) QueryUser() *UserQuery { - return NewGroupMembershipClient(gm.config).QueryUser(gm) +func (_m *GroupMembership) QueryUser() *UserQuery { + return NewGroupMembershipClient(_m.config).QueryUser(_m) } // Update returns a builder for updating this GroupMembership. // Note that you need to call GroupMembership.Unwrap() before calling this method if this GroupMembership // was returned from a transaction, and the transaction was committed or rolled back. -func (gm *GroupMembership) Update() *GroupMembershipUpdateOne { - return NewGroupMembershipClient(gm.config).UpdateOne(gm) +func (_m *GroupMembership) Update() *GroupMembershipUpdateOne { + return NewGroupMembershipClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the GroupMembership entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (gm *GroupMembership) Unwrap() *GroupMembership { - _tx, ok := gm.config.driver.(*txDriver) +func (_m *GroupMembership) Unwrap() *GroupMembership { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: GroupMembership is not a transactional entity") } - gm.config.driver = _tx.drv - return gm + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (gm *GroupMembership) String() string { +func (_m *GroupMembership) String() string { var builder strings.Builder builder.WriteString("GroupMembership(") - builder.WriteString(fmt.Sprintf("id=%v, ", gm.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("group_id=") - builder.WriteString(fmt.Sprintf("%v", gm.GroupID)) + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) builder.WriteString(", ") builder.WriteString("user_id=") - builder.WriteString(fmt.Sprintf("%v", gm.UserID)) + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) builder.WriteString(", ") builder.WriteString("maintainer=") - builder.WriteString(fmt.Sprintf("%v", gm.Maintainer)) + builder.WriteString(fmt.Sprintf("%v", _m.Maintainer)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(gm.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(gm.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(gm.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/groupmembership_create.go b/app/controlplane/pkg/data/ent/groupmembership_create.go index 385ccb917..f2ff0756a 100644 --- a/app/controlplane/pkg/data/ent/groupmembership_create.go +++ b/app/controlplane/pkg/data/ent/groupmembership_create.go @@ -27,127 +27,127 @@ type GroupMembershipCreate struct { } // SetGroupID sets the "group_id" field. -func (gmc *GroupMembershipCreate) SetGroupID(u uuid.UUID) *GroupMembershipCreate { - gmc.mutation.SetGroupID(u) - return gmc +func (_c *GroupMembershipCreate) SetGroupID(v uuid.UUID) *GroupMembershipCreate { + _c.mutation.SetGroupID(v) + return _c } // SetNillableGroupID sets the "group_id" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableGroupID(u *uuid.UUID) *GroupMembershipCreate { - if u != nil { - gmc.SetGroupID(*u) +func (_c *GroupMembershipCreate) SetNillableGroupID(v *uuid.UUID) *GroupMembershipCreate { + if v != nil { + _c.SetGroupID(*v) } - return gmc + return _c } // SetUserID sets the "user_id" field. -func (gmc *GroupMembershipCreate) SetUserID(u uuid.UUID) *GroupMembershipCreate { - gmc.mutation.SetUserID(u) - return gmc +func (_c *GroupMembershipCreate) SetUserID(v uuid.UUID) *GroupMembershipCreate { + _c.mutation.SetUserID(v) + return _c } // SetNillableUserID sets the "user_id" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableUserID(u *uuid.UUID) *GroupMembershipCreate { - if u != nil { - gmc.SetUserID(*u) +func (_c *GroupMembershipCreate) SetNillableUserID(v *uuid.UUID) *GroupMembershipCreate { + if v != nil { + _c.SetUserID(*v) } - return gmc + return _c } // SetMaintainer sets the "maintainer" field. -func (gmc *GroupMembershipCreate) SetMaintainer(b bool) *GroupMembershipCreate { - gmc.mutation.SetMaintainer(b) - return gmc +func (_c *GroupMembershipCreate) SetMaintainer(v bool) *GroupMembershipCreate { + _c.mutation.SetMaintainer(v) + return _c } // SetNillableMaintainer sets the "maintainer" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableMaintainer(b *bool) *GroupMembershipCreate { - if b != nil { - gmc.SetMaintainer(*b) +func (_c *GroupMembershipCreate) SetNillableMaintainer(v *bool) *GroupMembershipCreate { + if v != nil { + _c.SetMaintainer(*v) } - return gmc + return _c } // SetCreatedAt sets the "created_at" field. -func (gmc *GroupMembershipCreate) SetCreatedAt(t time.Time) *GroupMembershipCreate { - gmc.mutation.SetCreatedAt(t) - return gmc +func (_c *GroupMembershipCreate) SetCreatedAt(v time.Time) *GroupMembershipCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableCreatedAt(t *time.Time) *GroupMembershipCreate { - if t != nil { - gmc.SetCreatedAt(*t) +func (_c *GroupMembershipCreate) SetNillableCreatedAt(v *time.Time) *GroupMembershipCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return gmc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (gmc *GroupMembershipCreate) SetUpdatedAt(t time.Time) *GroupMembershipCreate { - gmc.mutation.SetUpdatedAt(t) - return gmc +func (_c *GroupMembershipCreate) SetUpdatedAt(v time.Time) *GroupMembershipCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableUpdatedAt(t *time.Time) *GroupMembershipCreate { - if t != nil { - gmc.SetUpdatedAt(*t) +func (_c *GroupMembershipCreate) SetNillableUpdatedAt(v *time.Time) *GroupMembershipCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return gmc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (gmc *GroupMembershipCreate) SetDeletedAt(t time.Time) *GroupMembershipCreate { - gmc.mutation.SetDeletedAt(t) - return gmc +func (_c *GroupMembershipCreate) SetDeletedAt(v time.Time) *GroupMembershipCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableDeletedAt(t *time.Time) *GroupMembershipCreate { - if t != nil { - gmc.SetDeletedAt(*t) +func (_c *GroupMembershipCreate) SetNillableDeletedAt(v *time.Time) *GroupMembershipCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return gmc + return _c } // SetID sets the "id" field. -func (gmc *GroupMembershipCreate) SetID(u uuid.UUID) *GroupMembershipCreate { - gmc.mutation.SetID(u) - return gmc +func (_c *GroupMembershipCreate) SetID(v uuid.UUID) *GroupMembershipCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (gmc *GroupMembershipCreate) SetNillableID(u *uuid.UUID) *GroupMembershipCreate { - if u != nil { - gmc.SetID(*u) +func (_c *GroupMembershipCreate) SetNillableID(v *uuid.UUID) *GroupMembershipCreate { + if v != nil { + _c.SetID(*v) } - return gmc + return _c } // SetGroup sets the "group" edge to the Group entity. -func (gmc *GroupMembershipCreate) SetGroup(g *Group) *GroupMembershipCreate { - return gmc.SetGroupID(g.ID) +func (_c *GroupMembershipCreate) SetGroup(v *Group) *GroupMembershipCreate { + return _c.SetGroupID(v.ID) } // SetUser sets the "user" edge to the User entity. -func (gmc *GroupMembershipCreate) SetUser(u *User) *GroupMembershipCreate { - return gmc.SetUserID(u.ID) +func (_c *GroupMembershipCreate) SetUser(v *User) *GroupMembershipCreate { + return _c.SetUserID(v.ID) } // Mutation returns the GroupMembershipMutation object of the builder. -func (gmc *GroupMembershipCreate) Mutation() *GroupMembershipMutation { - return gmc.mutation +func (_c *GroupMembershipCreate) Mutation() *GroupMembershipMutation { + return _c.mutation } // Save creates the GroupMembership in the database. -func (gmc *GroupMembershipCreate) Save(ctx context.Context) (*GroupMembership, error) { - gmc.defaults() - return withHooks(ctx, gmc.sqlSave, gmc.mutation, gmc.hooks) +func (_c *GroupMembershipCreate) Save(ctx context.Context) (*GroupMembership, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (gmc *GroupMembershipCreate) SaveX(ctx context.Context) *GroupMembership { - v, err := gmc.Save(ctx) +func (_c *GroupMembershipCreate) SaveX(ctx context.Context) *GroupMembership { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -155,78 +155,78 @@ func (gmc *GroupMembershipCreate) SaveX(ctx context.Context) *GroupMembership { } // Exec executes the query. -func (gmc *GroupMembershipCreate) Exec(ctx context.Context) error { - _, err := gmc.Save(ctx) +func (_c *GroupMembershipCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gmc *GroupMembershipCreate) ExecX(ctx context.Context) { - if err := gmc.Exec(ctx); err != nil { +func (_c *GroupMembershipCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (gmc *GroupMembershipCreate) defaults() { - if _, ok := gmc.mutation.GroupID(); !ok { +func (_c *GroupMembershipCreate) defaults() { + if _, ok := _c.mutation.GroupID(); !ok { v := groupmembership.DefaultGroupID() - gmc.mutation.SetGroupID(v) + _c.mutation.SetGroupID(v) } - if _, ok := gmc.mutation.UserID(); !ok { + if _, ok := _c.mutation.UserID(); !ok { v := groupmembership.DefaultUserID() - gmc.mutation.SetUserID(v) + _c.mutation.SetUserID(v) } - if _, ok := gmc.mutation.Maintainer(); !ok { + if _, ok := _c.mutation.Maintainer(); !ok { v := groupmembership.DefaultMaintainer - gmc.mutation.SetMaintainer(v) + _c.mutation.SetMaintainer(v) } - if _, ok := gmc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := groupmembership.DefaultCreatedAt() - gmc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := gmc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := groupmembership.DefaultUpdatedAt() - gmc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := gmc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := groupmembership.DefaultID() - gmc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (gmc *GroupMembershipCreate) check() error { - if _, ok := gmc.mutation.GroupID(); !ok { +func (_c *GroupMembershipCreate) check() error { + if _, ok := _c.mutation.GroupID(); !ok { return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "GroupMembership.group_id"`)} } - if _, ok := gmc.mutation.UserID(); !ok { + if _, ok := _c.mutation.UserID(); !ok { return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "GroupMembership.user_id"`)} } - if _, ok := gmc.mutation.Maintainer(); !ok { + if _, ok := _c.mutation.Maintainer(); !ok { return &ValidationError{Name: "maintainer", err: errors.New(`ent: missing required field "GroupMembership.maintainer"`)} } - if _, ok := gmc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "GroupMembership.created_at"`)} } - if _, ok := gmc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "GroupMembership.updated_at"`)} } - if len(gmc.mutation.GroupIDs()) == 0 { + if len(_c.mutation.GroupIDs()) == 0 { return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "GroupMembership.group"`)} } - if len(gmc.mutation.UserIDs()) == 0 { + if len(_c.mutation.UserIDs()) == 0 { return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "GroupMembership.user"`)} } return nil } -func (gmc *GroupMembershipCreate) sqlSave(ctx context.Context) (*GroupMembership, error) { - if err := gmc.check(); err != nil { +func (_c *GroupMembershipCreate) sqlSave(ctx context.Context) (*GroupMembership, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := gmc.createSpec() - if err := sqlgraph.CreateNode(ctx, gmc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -239,38 +239,38 @@ func (gmc *GroupMembershipCreate) sqlSave(ctx context.Context) (*GroupMembership return nil, err } } - gmc.mutation.id = &_node.ID - gmc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (gmc *GroupMembershipCreate) createSpec() (*GroupMembership, *sqlgraph.CreateSpec) { +func (_c *GroupMembershipCreate) createSpec() (*GroupMembership, *sqlgraph.CreateSpec) { var ( - _node = &GroupMembership{config: gmc.config} + _node = &GroupMembership{config: _c.config} _spec = sqlgraph.NewCreateSpec(groupmembership.Table, sqlgraph.NewFieldSpec(groupmembership.FieldID, field.TypeUUID)) ) - _spec.OnConflict = gmc.conflict - if id, ok := gmc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := gmc.mutation.Maintainer(); ok { + if value, ok := _c.mutation.Maintainer(); ok { _spec.SetField(groupmembership.FieldMaintainer, field.TypeBool, value) _node.Maintainer = value } - if value, ok := gmc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(groupmembership.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := gmc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(groupmembership.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := gmc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(groupmembership.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if nodes := gmc.mutation.GroupIDs(); len(nodes) > 0 { + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -287,7 +287,7 @@ func (gmc *GroupMembershipCreate) createSpec() (*GroupMembership, *sqlgraph.Crea _node.GroupID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := gmc.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -323,10 +323,10 @@ func (gmc *GroupMembershipCreate) createSpec() (*GroupMembership, *sqlgraph.Crea // SetGroupID(v+v). // }). // Exec(ctx) -func (gmc *GroupMembershipCreate) OnConflict(opts ...sql.ConflictOption) *GroupMembershipUpsertOne { - gmc.conflict = opts +func (_c *GroupMembershipCreate) OnConflict(opts ...sql.ConflictOption) *GroupMembershipUpsertOne { + _c.conflict = opts return &GroupMembershipUpsertOne{ - create: gmc, + create: _c, } } @@ -336,10 +336,10 @@ func (gmc *GroupMembershipCreate) OnConflict(opts ...sql.ConflictOption) *GroupM // client.GroupMembership.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (gmc *GroupMembershipCreate) OnConflictColumns(columns ...string) *GroupMembershipUpsertOne { - gmc.conflict = append(gmc.conflict, sql.ConflictColumns(columns...)) +func (_c *GroupMembershipCreate) OnConflictColumns(columns ...string) *GroupMembershipUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &GroupMembershipUpsertOne{ - create: gmc, + create: _c, } } @@ -597,16 +597,16 @@ type GroupMembershipCreateBulk struct { } // Save creates the GroupMembership entities in the database. -func (gmcb *GroupMembershipCreateBulk) Save(ctx context.Context) ([]*GroupMembership, error) { - if gmcb.err != nil { - return nil, gmcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(gmcb.builders)) - nodes := make([]*GroupMembership, len(gmcb.builders)) - mutators := make([]Mutator, len(gmcb.builders)) - for i := range gmcb.builders { +func (_c *GroupMembershipCreateBulk) Save(ctx context.Context) ([]*GroupMembership, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*GroupMembership, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := gmcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*GroupMembershipMutation) @@ -620,12 +620,12 @@ func (gmcb *GroupMembershipCreateBulk) Save(ctx context.Context) ([]*GroupMember var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, gmcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = gmcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, gmcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -645,7 +645,7 @@ func (gmcb *GroupMembershipCreateBulk) Save(ctx context.Context) ([]*GroupMember }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, gmcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -653,8 +653,8 @@ func (gmcb *GroupMembershipCreateBulk) Save(ctx context.Context) ([]*GroupMember } // SaveX is like Save, but panics if an error occurs. -func (gmcb *GroupMembershipCreateBulk) SaveX(ctx context.Context) []*GroupMembership { - v, err := gmcb.Save(ctx) +func (_c *GroupMembershipCreateBulk) SaveX(ctx context.Context) []*GroupMembership { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -662,14 +662,14 @@ func (gmcb *GroupMembershipCreateBulk) SaveX(ctx context.Context) []*GroupMember } // Exec executes the query. -func (gmcb *GroupMembershipCreateBulk) Exec(ctx context.Context) error { - _, err := gmcb.Save(ctx) +func (_c *GroupMembershipCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gmcb *GroupMembershipCreateBulk) ExecX(ctx context.Context) { - if err := gmcb.Exec(ctx); err != nil { +func (_c *GroupMembershipCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -689,10 +689,10 @@ func (gmcb *GroupMembershipCreateBulk) ExecX(ctx context.Context) { // SetGroupID(v+v). // }). // Exec(ctx) -func (gmcb *GroupMembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupMembershipUpsertBulk { - gmcb.conflict = opts +func (_c *GroupMembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupMembershipUpsertBulk { + _c.conflict = opts return &GroupMembershipUpsertBulk{ - create: gmcb, + create: _c, } } @@ -702,10 +702,10 @@ func (gmcb *GroupMembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *G // client.GroupMembership.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (gmcb *GroupMembershipCreateBulk) OnConflictColumns(columns ...string) *GroupMembershipUpsertBulk { - gmcb.conflict = append(gmcb.conflict, sql.ConflictColumns(columns...)) +func (_c *GroupMembershipCreateBulk) OnConflictColumns(columns ...string) *GroupMembershipUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &GroupMembershipUpsertBulk{ - create: gmcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/groupmembership_delete.go b/app/controlplane/pkg/data/ent/groupmembership_delete.go index 1207481a9..f1040b6a0 100644 --- a/app/controlplane/pkg/data/ent/groupmembership_delete.go +++ b/app/controlplane/pkg/data/ent/groupmembership_delete.go @@ -20,56 +20,56 @@ type GroupMembershipDelete struct { } // Where appends a list predicates to the GroupMembershipDelete builder. -func (gmd *GroupMembershipDelete) Where(ps ...predicate.GroupMembership) *GroupMembershipDelete { - gmd.mutation.Where(ps...) - return gmd +func (_d *GroupMembershipDelete) Where(ps ...predicate.GroupMembership) *GroupMembershipDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (gmd *GroupMembershipDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, gmd.sqlExec, gmd.mutation, gmd.hooks) +func (_d *GroupMembershipDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (gmd *GroupMembershipDelete) ExecX(ctx context.Context) int { - n, err := gmd.Exec(ctx) +func (_d *GroupMembershipDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (gmd *GroupMembershipDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *GroupMembershipDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(groupmembership.Table, sqlgraph.NewFieldSpec(groupmembership.FieldID, field.TypeUUID)) - if ps := gmd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, gmd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - gmd.mutation.done = true + _d.mutation.done = true return affected, err } // GroupMembershipDeleteOne is the builder for deleting a single GroupMembership entity. type GroupMembershipDeleteOne struct { - gmd *GroupMembershipDelete + _d *GroupMembershipDelete } // Where appends a list predicates to the GroupMembershipDelete builder. -func (gmdo *GroupMembershipDeleteOne) Where(ps ...predicate.GroupMembership) *GroupMembershipDeleteOne { - gmdo.gmd.mutation.Where(ps...) - return gmdo +func (_d *GroupMembershipDeleteOne) Where(ps ...predicate.GroupMembership) *GroupMembershipDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (gmdo *GroupMembershipDeleteOne) Exec(ctx context.Context) error { - n, err := gmdo.gmd.Exec(ctx) +func (_d *GroupMembershipDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (gmdo *GroupMembershipDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (gmdo *GroupMembershipDeleteOne) ExecX(ctx context.Context) { - if err := gmdo.Exec(ctx); err != nil { +func (_d *GroupMembershipDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/groupmembership_query.go b/app/controlplane/pkg/data/ent/groupmembership_query.go index 64839af3b..28bfd57d8 100644 --- a/app/controlplane/pkg/data/ent/groupmembership_query.go +++ b/app/controlplane/pkg/data/ent/groupmembership_query.go @@ -35,44 +35,44 @@ type GroupMembershipQuery struct { } // Where adds a new predicate for the GroupMembershipQuery builder. -func (gmq *GroupMembershipQuery) Where(ps ...predicate.GroupMembership) *GroupMembershipQuery { - gmq.predicates = append(gmq.predicates, ps...) - return gmq +func (_q *GroupMembershipQuery) Where(ps ...predicate.GroupMembership) *GroupMembershipQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (gmq *GroupMembershipQuery) Limit(limit int) *GroupMembershipQuery { - gmq.ctx.Limit = &limit - return gmq +func (_q *GroupMembershipQuery) Limit(limit int) *GroupMembershipQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (gmq *GroupMembershipQuery) Offset(offset int) *GroupMembershipQuery { - gmq.ctx.Offset = &offset - return gmq +func (_q *GroupMembershipQuery) Offset(offset int) *GroupMembershipQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (gmq *GroupMembershipQuery) Unique(unique bool) *GroupMembershipQuery { - gmq.ctx.Unique = &unique - return gmq +func (_q *GroupMembershipQuery) Unique(unique bool) *GroupMembershipQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (gmq *GroupMembershipQuery) Order(o ...groupmembership.OrderOption) *GroupMembershipQuery { - gmq.order = append(gmq.order, o...) - return gmq +func (_q *GroupMembershipQuery) Order(o ...groupmembership.OrderOption) *GroupMembershipQuery { + _q.order = append(_q.order, o...) + return _q } // QueryGroup chains the current query on the "group" edge. -func (gmq *GroupMembershipQuery) QueryGroup() *GroupQuery { - query := (&GroupClient{config: gmq.config}).Query() +func (_q *GroupMembershipQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := gmq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := gmq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -81,20 +81,20 @@ func (gmq *GroupMembershipQuery) QueryGroup() *GroupQuery { sqlgraph.To(group.Table, group.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, groupmembership.GroupTable, groupmembership.GroupColumn), ) - fromU = sqlgraph.SetNeighbors(gmq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryUser chains the current query on the "user" edge. -func (gmq *GroupMembershipQuery) QueryUser() *UserQuery { - query := (&UserClient{config: gmq.config}).Query() +func (_q *GroupMembershipQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := gmq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := gmq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -103,7 +103,7 @@ func (gmq *GroupMembershipQuery) QueryUser() *UserQuery { sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, groupmembership.UserTable, groupmembership.UserColumn), ) - fromU = sqlgraph.SetNeighbors(gmq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -111,8 +111,8 @@ func (gmq *GroupMembershipQuery) QueryUser() *UserQuery { // First returns the first GroupMembership entity from the query. // Returns a *NotFoundError when no GroupMembership was found. -func (gmq *GroupMembershipQuery) First(ctx context.Context) (*GroupMembership, error) { - nodes, err := gmq.Limit(1).All(setContextOp(ctx, gmq.ctx, ent.OpQueryFirst)) +func (_q *GroupMembershipQuery) First(ctx context.Context) (*GroupMembership, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -123,8 +123,8 @@ func (gmq *GroupMembershipQuery) First(ctx context.Context) (*GroupMembership, e } // FirstX is like First, but panics if an error occurs. -func (gmq *GroupMembershipQuery) FirstX(ctx context.Context) *GroupMembership { - node, err := gmq.First(ctx) +func (_q *GroupMembershipQuery) FirstX(ctx context.Context) *GroupMembership { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -133,9 +133,9 @@ func (gmq *GroupMembershipQuery) FirstX(ctx context.Context) *GroupMembership { // FirstID returns the first GroupMembership ID from the query. // Returns a *NotFoundError when no GroupMembership ID was found. -func (gmq *GroupMembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *GroupMembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gmq.Limit(1).IDs(setContextOp(ctx, gmq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -146,8 +146,8 @@ func (gmq *GroupMembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err } // FirstIDX is like FirstID, but panics if an error occurs. -func (gmq *GroupMembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := gmq.FirstID(ctx) +func (_q *GroupMembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -157,8 +157,8 @@ func (gmq *GroupMembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single GroupMembership entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one GroupMembership entity is found. // Returns a *NotFoundError when no GroupMembership entities are found. -func (gmq *GroupMembershipQuery) Only(ctx context.Context) (*GroupMembership, error) { - nodes, err := gmq.Limit(2).All(setContextOp(ctx, gmq.ctx, ent.OpQueryOnly)) +func (_q *GroupMembershipQuery) Only(ctx context.Context) (*GroupMembership, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -173,8 +173,8 @@ func (gmq *GroupMembershipQuery) Only(ctx context.Context) (*GroupMembership, er } // OnlyX is like Only, but panics if an error occurs. -func (gmq *GroupMembershipQuery) OnlyX(ctx context.Context) *GroupMembership { - node, err := gmq.Only(ctx) +func (_q *GroupMembershipQuery) OnlyX(ctx context.Context) *GroupMembership { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -184,9 +184,9 @@ func (gmq *GroupMembershipQuery) OnlyX(ctx context.Context) *GroupMembership { // OnlyID is like Only, but returns the only GroupMembership ID in the query. // Returns a *NotSingularError when more than one GroupMembership ID is found. // Returns a *NotFoundError when no entities are found. -func (gmq *GroupMembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *GroupMembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gmq.Limit(2).IDs(setContextOp(ctx, gmq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -201,8 +201,8 @@ func (gmq *GroupMembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (gmq *GroupMembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := gmq.OnlyID(ctx) +func (_q *GroupMembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -210,18 +210,18 @@ func (gmq *GroupMembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of GroupMemberships. -func (gmq *GroupMembershipQuery) All(ctx context.Context) ([]*GroupMembership, error) { - ctx = setContextOp(ctx, gmq.ctx, ent.OpQueryAll) - if err := gmq.prepareQuery(ctx); err != nil { +func (_q *GroupMembershipQuery) All(ctx context.Context) ([]*GroupMembership, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*GroupMembership, *GroupMembershipQuery]() - return withInterceptors[[]*GroupMembership](ctx, gmq, qr, gmq.inters) + return withInterceptors[[]*GroupMembership](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (gmq *GroupMembershipQuery) AllX(ctx context.Context) []*GroupMembership { - nodes, err := gmq.All(ctx) +func (_q *GroupMembershipQuery) AllX(ctx context.Context) []*GroupMembership { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -229,20 +229,20 @@ func (gmq *GroupMembershipQuery) AllX(ctx context.Context) []*GroupMembership { } // IDs executes the query and returns a list of GroupMembership IDs. -func (gmq *GroupMembershipQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if gmq.ctx.Unique == nil && gmq.path != nil { - gmq.Unique(true) +func (_q *GroupMembershipQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, gmq.ctx, ent.OpQueryIDs) - if err = gmq.Select(groupmembership.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(groupmembership.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (gmq *GroupMembershipQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := gmq.IDs(ctx) +func (_q *GroupMembershipQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -250,17 +250,17 @@ func (gmq *GroupMembershipQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (gmq *GroupMembershipQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, gmq.ctx, ent.OpQueryCount) - if err := gmq.prepareQuery(ctx); err != nil { +func (_q *GroupMembershipQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, gmq, querierCount[*GroupMembershipQuery](), gmq.inters) + return withInterceptors[int](ctx, _q, querierCount[*GroupMembershipQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (gmq *GroupMembershipQuery) CountX(ctx context.Context) int { - count, err := gmq.Count(ctx) +func (_q *GroupMembershipQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -268,9 +268,9 @@ func (gmq *GroupMembershipQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (gmq *GroupMembershipQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, gmq.ctx, ent.OpQueryExist) - switch _, err := gmq.FirstID(ctx); { +func (_q *GroupMembershipQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -281,8 +281,8 @@ func (gmq *GroupMembershipQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (gmq *GroupMembershipQuery) ExistX(ctx context.Context) bool { - exist, err := gmq.Exist(ctx) +func (_q *GroupMembershipQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -291,45 +291,45 @@ func (gmq *GroupMembershipQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the GroupMembershipQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (gmq *GroupMembershipQuery) Clone() *GroupMembershipQuery { - if gmq == nil { +func (_q *GroupMembershipQuery) Clone() *GroupMembershipQuery { + if _q == nil { return nil } return &GroupMembershipQuery{ - config: gmq.config, - ctx: gmq.ctx.Clone(), - order: append([]groupmembership.OrderOption{}, gmq.order...), - inters: append([]Interceptor{}, gmq.inters...), - predicates: append([]predicate.GroupMembership{}, gmq.predicates...), - withGroup: gmq.withGroup.Clone(), - withUser: gmq.withUser.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]groupmembership.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.GroupMembership{}, _q.predicates...), + withGroup: _q.withGroup.Clone(), + withUser: _q.withUser.Clone(), // clone intermediate query. - sql: gmq.sql.Clone(), - path: gmq.path, - modifiers: append([]func(*sql.Selector){}, gmq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithGroup tells the query-builder to eager-load the nodes that are connected to // the "group" edge. The optional arguments are used to configure the query builder of the edge. -func (gmq *GroupMembershipQuery) WithGroup(opts ...func(*GroupQuery)) *GroupMembershipQuery { - query := (&GroupClient{config: gmq.config}).Query() +func (_q *GroupMembershipQuery) WithGroup(opts ...func(*GroupQuery)) *GroupMembershipQuery { + query := (&GroupClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - gmq.withGroup = query - return gmq + _q.withGroup = query + return _q } // WithUser tells the query-builder to eager-load the nodes that are connected to // the "user" edge. The optional arguments are used to configure the query builder of the edge. -func (gmq *GroupMembershipQuery) WithUser(opts ...func(*UserQuery)) *GroupMembershipQuery { - query := (&UserClient{config: gmq.config}).Query() +func (_q *GroupMembershipQuery) WithUser(opts ...func(*UserQuery)) *GroupMembershipQuery { + query := (&UserClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - gmq.withUser = query - return gmq + _q.withUser = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -346,10 +346,10 @@ func (gmq *GroupMembershipQuery) WithUser(opts ...func(*UserQuery)) *GroupMember // GroupBy(groupmembership.FieldGroupID). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (gmq *GroupMembershipQuery) GroupBy(field string, fields ...string) *GroupMembershipGroupBy { - gmq.ctx.Fields = append([]string{field}, fields...) - grbuild := &GroupMembershipGroupBy{build: gmq} - grbuild.flds = &gmq.ctx.Fields +func (_q *GroupMembershipQuery) GroupBy(field string, fields ...string) *GroupMembershipGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &GroupMembershipGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = groupmembership.Label grbuild.scan = grbuild.Scan return grbuild @@ -367,83 +367,83 @@ func (gmq *GroupMembershipQuery) GroupBy(field string, fields ...string) *GroupM // client.GroupMembership.Query(). // Select(groupmembership.FieldGroupID). // Scan(ctx, &v) -func (gmq *GroupMembershipQuery) Select(fields ...string) *GroupMembershipSelect { - gmq.ctx.Fields = append(gmq.ctx.Fields, fields...) - sbuild := &GroupMembershipSelect{GroupMembershipQuery: gmq} +func (_q *GroupMembershipQuery) Select(fields ...string) *GroupMembershipSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &GroupMembershipSelect{GroupMembershipQuery: _q} sbuild.label = groupmembership.Label - sbuild.flds, sbuild.scan = &gmq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a GroupMembershipSelect configured with the given aggregations. -func (gmq *GroupMembershipQuery) Aggregate(fns ...AggregateFunc) *GroupMembershipSelect { - return gmq.Select().Aggregate(fns...) +func (_q *GroupMembershipQuery) Aggregate(fns ...AggregateFunc) *GroupMembershipSelect { + return _q.Select().Aggregate(fns...) } -func (gmq *GroupMembershipQuery) prepareQuery(ctx context.Context) error { - for _, inter := range gmq.inters { +func (_q *GroupMembershipQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, gmq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range gmq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !groupmembership.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if gmq.path != nil { - prev, err := gmq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - gmq.sql = prev + _q.sql = prev } return nil } -func (gmq *GroupMembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*GroupMembership, error) { +func (_q *GroupMembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*GroupMembership, error) { var ( nodes = []*GroupMembership{} - _spec = gmq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - gmq.withGroup != nil, - gmq.withUser != nil, + _q.withGroup != nil, + _q.withUser != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*GroupMembership).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &GroupMembership{config: gmq.config} + node := &GroupMembership{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(gmq.modifiers) > 0 { - _spec.Modifiers = gmq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, gmq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := gmq.withGroup; query != nil { - if err := gmq.loadGroup(ctx, query, nodes, nil, + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, func(n *GroupMembership, e *Group) { n.Edges.Group = e }); err != nil { return nil, err } } - if query := gmq.withUser; query != nil { - if err := gmq.loadUser(ctx, query, nodes, nil, + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, func(n *GroupMembership, e *User) { n.Edges.User = e }); err != nil { return nil, err } @@ -451,7 +451,7 @@ func (gmq *GroupMembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) return nodes, nil } -func (gmq *GroupMembershipQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*GroupMembership, init func(*GroupMembership), assign func(*GroupMembership, *Group)) error { +func (_q *GroupMembershipQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*GroupMembership, init func(*GroupMembership), assign func(*GroupMembership, *Group)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*GroupMembership) for i := range nodes { @@ -480,7 +480,7 @@ func (gmq *GroupMembershipQuery) loadGroup(ctx context.Context, query *GroupQuer } return nil } -func (gmq *GroupMembershipQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*GroupMembership, init func(*GroupMembership), assign func(*GroupMembership, *User)) error { +func (_q *GroupMembershipQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*GroupMembership, init func(*GroupMembership), assign func(*GroupMembership, *User)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*GroupMembership) for i := range nodes { @@ -510,27 +510,27 @@ func (gmq *GroupMembershipQuery) loadUser(ctx context.Context, query *UserQuery, return nil } -func (gmq *GroupMembershipQuery) sqlCount(ctx context.Context) (int, error) { - _spec := gmq.querySpec() - if len(gmq.modifiers) > 0 { - _spec.Modifiers = gmq.modifiers +func (_q *GroupMembershipQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = gmq.ctx.Fields - if len(gmq.ctx.Fields) > 0 { - _spec.Unique = gmq.ctx.Unique != nil && *gmq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, gmq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (gmq *GroupMembershipQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *GroupMembershipQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(groupmembership.Table, groupmembership.Columns, sqlgraph.NewFieldSpec(groupmembership.FieldID, field.TypeUUID)) - _spec.From = gmq.sql - if unique := gmq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if gmq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := gmq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, groupmembership.FieldID) for i := range fields { @@ -538,27 +538,27 @@ func (gmq *GroupMembershipQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if gmq.withGroup != nil { + if _q.withGroup != nil { _spec.Node.AddColumnOnce(groupmembership.FieldGroupID) } - if gmq.withUser != nil { + if _q.withUser != nil { _spec.Node.AddColumnOnce(groupmembership.FieldUserID) } } - if ps := gmq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := gmq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := gmq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := gmq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -568,36 +568,36 @@ func (gmq *GroupMembershipQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (gmq *GroupMembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(gmq.driver.Dialect()) +func (_q *GroupMembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(groupmembership.Table) - columns := gmq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = groupmembership.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if gmq.sql != nil { - selector = gmq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if gmq.ctx.Unique != nil && *gmq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range gmq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range gmq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range gmq.order { + for _, p := range _q.order { p(selector) } - if offset := gmq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := gmq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -606,33 +606,33 @@ func (gmq *GroupMembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (gmq *GroupMembershipQuery) ForUpdate(opts ...sql.LockOption) *GroupMembershipQuery { - if gmq.driver.Dialect() == dialect.Postgres { - gmq.Unique(false) +func (_q *GroupMembershipQuery) ForUpdate(opts ...sql.LockOption) *GroupMembershipQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - gmq.modifiers = append(gmq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return gmq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (gmq *GroupMembershipQuery) ForShare(opts ...sql.LockOption) *GroupMembershipQuery { - if gmq.driver.Dialect() == dialect.Postgres { - gmq.Unique(false) +func (_q *GroupMembershipQuery) ForShare(opts ...sql.LockOption) *GroupMembershipQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - gmq.modifiers = append(gmq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return gmq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (gmq *GroupMembershipQuery) Modify(modifiers ...func(s *sql.Selector)) *GroupMembershipSelect { - gmq.modifiers = append(gmq.modifiers, modifiers...) - return gmq.Select() +func (_q *GroupMembershipQuery) Modify(modifiers ...func(s *sql.Selector)) *GroupMembershipSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // GroupMembershipGroupBy is the group-by builder for GroupMembership entities. @@ -642,41 +642,41 @@ type GroupMembershipGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (gmgb *GroupMembershipGroupBy) Aggregate(fns ...AggregateFunc) *GroupMembershipGroupBy { - gmgb.fns = append(gmgb.fns, fns...) - return gmgb +func (_g *GroupMembershipGroupBy) Aggregate(fns ...AggregateFunc) *GroupMembershipGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (gmgb *GroupMembershipGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, gmgb.build.ctx, ent.OpQueryGroupBy) - if err := gmgb.build.prepareQuery(ctx); err != nil { +func (_g *GroupMembershipGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*GroupMembershipQuery, *GroupMembershipGroupBy](ctx, gmgb.build, gmgb, gmgb.build.inters, v) + return scanWithInterceptors[*GroupMembershipQuery, *GroupMembershipGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (gmgb *GroupMembershipGroupBy) sqlScan(ctx context.Context, root *GroupMembershipQuery, v any) error { +func (_g *GroupMembershipGroupBy) sqlScan(ctx context.Context, root *GroupMembershipQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(gmgb.fns)) - for _, fn := range gmgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*gmgb.flds)+len(gmgb.fns)) - for _, f := range *gmgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*gmgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := gmgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -690,27 +690,27 @@ type GroupMembershipSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (gms *GroupMembershipSelect) Aggregate(fns ...AggregateFunc) *GroupMembershipSelect { - gms.fns = append(gms.fns, fns...) - return gms +func (_s *GroupMembershipSelect) Aggregate(fns ...AggregateFunc) *GroupMembershipSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (gms *GroupMembershipSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, gms.ctx, ent.OpQuerySelect) - if err := gms.prepareQuery(ctx); err != nil { +func (_s *GroupMembershipSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*GroupMembershipQuery, *GroupMembershipSelect](ctx, gms.GroupMembershipQuery, gms, gms.inters, v) + return scanWithInterceptors[*GroupMembershipQuery, *GroupMembershipSelect](ctx, _s.GroupMembershipQuery, _s, _s.inters, v) } -func (gms *GroupMembershipSelect) sqlScan(ctx context.Context, root *GroupMembershipQuery, v any) error { +func (_s *GroupMembershipSelect) sqlScan(ctx context.Context, root *GroupMembershipQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(gms.fns)) - for _, fn := range gms.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*gms.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -718,7 +718,7 @@ func (gms *GroupMembershipSelect) sqlScan(ctx context.Context, root *GroupMember } rows := &sql.Rows{} query, args := selector.Query() - if err := gms.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -726,7 +726,7 @@ func (gms *GroupMembershipSelect) sqlScan(ctx context.Context, root *GroupMember } // Modify adds a query modifier for attaching custom logic to queries. -func (gms *GroupMembershipSelect) Modify(modifiers ...func(s *sql.Selector)) *GroupMembershipSelect { - gms.modifiers = append(gms.modifiers, modifiers...) - return gms +func (_s *GroupMembershipSelect) Modify(modifiers ...func(s *sql.Selector)) *GroupMembershipSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/groupmembership_update.go b/app/controlplane/pkg/data/ent/groupmembership_update.go index 368939ddc..7fa09b7b8 100644 --- a/app/controlplane/pkg/data/ent/groupmembership_update.go +++ b/app/controlplane/pkg/data/ent/groupmembership_update.go @@ -27,122 +27,122 @@ type GroupMembershipUpdate struct { } // Where appends a list predicates to the GroupMembershipUpdate builder. -func (gmu *GroupMembershipUpdate) Where(ps ...predicate.GroupMembership) *GroupMembershipUpdate { - gmu.mutation.Where(ps...) - return gmu +func (_u *GroupMembershipUpdate) Where(ps ...predicate.GroupMembership) *GroupMembershipUpdate { + _u.mutation.Where(ps...) + return _u } // SetGroupID sets the "group_id" field. -func (gmu *GroupMembershipUpdate) SetGroupID(u uuid.UUID) *GroupMembershipUpdate { - gmu.mutation.SetGroupID(u) - return gmu +func (_u *GroupMembershipUpdate) SetGroupID(v uuid.UUID) *GroupMembershipUpdate { + _u.mutation.SetGroupID(v) + return _u } // SetNillableGroupID sets the "group_id" field if the given value is not nil. -func (gmu *GroupMembershipUpdate) SetNillableGroupID(u *uuid.UUID) *GroupMembershipUpdate { - if u != nil { - gmu.SetGroupID(*u) +func (_u *GroupMembershipUpdate) SetNillableGroupID(v *uuid.UUID) *GroupMembershipUpdate { + if v != nil { + _u.SetGroupID(*v) } - return gmu + return _u } // SetUserID sets the "user_id" field. -func (gmu *GroupMembershipUpdate) SetUserID(u uuid.UUID) *GroupMembershipUpdate { - gmu.mutation.SetUserID(u) - return gmu +func (_u *GroupMembershipUpdate) SetUserID(v uuid.UUID) *GroupMembershipUpdate { + _u.mutation.SetUserID(v) + return _u } // SetNillableUserID sets the "user_id" field if the given value is not nil. -func (gmu *GroupMembershipUpdate) SetNillableUserID(u *uuid.UUID) *GroupMembershipUpdate { - if u != nil { - gmu.SetUserID(*u) +func (_u *GroupMembershipUpdate) SetNillableUserID(v *uuid.UUID) *GroupMembershipUpdate { + if v != nil { + _u.SetUserID(*v) } - return gmu + return _u } // SetMaintainer sets the "maintainer" field. -func (gmu *GroupMembershipUpdate) SetMaintainer(b bool) *GroupMembershipUpdate { - gmu.mutation.SetMaintainer(b) - return gmu +func (_u *GroupMembershipUpdate) SetMaintainer(v bool) *GroupMembershipUpdate { + _u.mutation.SetMaintainer(v) + return _u } // SetNillableMaintainer sets the "maintainer" field if the given value is not nil. -func (gmu *GroupMembershipUpdate) SetNillableMaintainer(b *bool) *GroupMembershipUpdate { - if b != nil { - gmu.SetMaintainer(*b) +func (_u *GroupMembershipUpdate) SetNillableMaintainer(v *bool) *GroupMembershipUpdate { + if v != nil { + _u.SetMaintainer(*v) } - return gmu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (gmu *GroupMembershipUpdate) SetUpdatedAt(t time.Time) *GroupMembershipUpdate { - gmu.mutation.SetUpdatedAt(t) - return gmu +func (_u *GroupMembershipUpdate) SetUpdatedAt(v time.Time) *GroupMembershipUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (gmu *GroupMembershipUpdate) SetNillableUpdatedAt(t *time.Time) *GroupMembershipUpdate { - if t != nil { - gmu.SetUpdatedAt(*t) +func (_u *GroupMembershipUpdate) SetNillableUpdatedAt(v *time.Time) *GroupMembershipUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return gmu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (gmu *GroupMembershipUpdate) SetDeletedAt(t time.Time) *GroupMembershipUpdate { - gmu.mutation.SetDeletedAt(t) - return gmu +func (_u *GroupMembershipUpdate) SetDeletedAt(v time.Time) *GroupMembershipUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (gmu *GroupMembershipUpdate) SetNillableDeletedAt(t *time.Time) *GroupMembershipUpdate { - if t != nil { - gmu.SetDeletedAt(*t) +func (_u *GroupMembershipUpdate) SetNillableDeletedAt(v *time.Time) *GroupMembershipUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return gmu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (gmu *GroupMembershipUpdate) ClearDeletedAt() *GroupMembershipUpdate { - gmu.mutation.ClearDeletedAt() - return gmu +func (_u *GroupMembershipUpdate) ClearDeletedAt() *GroupMembershipUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetGroup sets the "group" edge to the Group entity. -func (gmu *GroupMembershipUpdate) SetGroup(g *Group) *GroupMembershipUpdate { - return gmu.SetGroupID(g.ID) +func (_u *GroupMembershipUpdate) SetGroup(v *Group) *GroupMembershipUpdate { + return _u.SetGroupID(v.ID) } // SetUser sets the "user" edge to the User entity. -func (gmu *GroupMembershipUpdate) SetUser(u *User) *GroupMembershipUpdate { - return gmu.SetUserID(u.ID) +func (_u *GroupMembershipUpdate) SetUser(v *User) *GroupMembershipUpdate { + return _u.SetUserID(v.ID) } // Mutation returns the GroupMembershipMutation object of the builder. -func (gmu *GroupMembershipUpdate) Mutation() *GroupMembershipMutation { - return gmu.mutation +func (_u *GroupMembershipUpdate) Mutation() *GroupMembershipMutation { + return _u.mutation } // ClearGroup clears the "group" edge to the Group entity. -func (gmu *GroupMembershipUpdate) ClearGroup() *GroupMembershipUpdate { - gmu.mutation.ClearGroup() - return gmu +func (_u *GroupMembershipUpdate) ClearGroup() *GroupMembershipUpdate { + _u.mutation.ClearGroup() + return _u } // ClearUser clears the "user" edge to the User entity. -func (gmu *GroupMembershipUpdate) ClearUser() *GroupMembershipUpdate { - gmu.mutation.ClearUser() - return gmu +func (_u *GroupMembershipUpdate) ClearUser() *GroupMembershipUpdate { + _u.mutation.ClearUser() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (gmu *GroupMembershipUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, gmu.sqlSave, gmu.mutation, gmu.hooks) +func (_u *GroupMembershipUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (gmu *GroupMembershipUpdate) SaveX(ctx context.Context) int { - affected, err := gmu.Save(ctx) +func (_u *GroupMembershipUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -150,60 +150,60 @@ func (gmu *GroupMembershipUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (gmu *GroupMembershipUpdate) Exec(ctx context.Context) error { - _, err := gmu.Save(ctx) +func (_u *GroupMembershipUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gmu *GroupMembershipUpdate) ExecX(ctx context.Context) { - if err := gmu.Exec(ctx); err != nil { +func (_u *GroupMembershipUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (gmu *GroupMembershipUpdate) check() error { - if gmu.mutation.GroupCleared() && len(gmu.mutation.GroupIDs()) > 0 { +func (_u *GroupMembershipUpdate) check() error { + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "GroupMembership.group"`) } - if gmu.mutation.UserCleared() && len(gmu.mutation.UserIDs()) > 0 { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "GroupMembership.user"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (gmu *GroupMembershipUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupMembershipUpdate { - gmu.modifiers = append(gmu.modifiers, modifiers...) - return gmu +func (_u *GroupMembershipUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupMembershipUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := gmu.check(); err != nil { - return n, err +func (_u *GroupMembershipUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(groupmembership.Table, groupmembership.Columns, sqlgraph.NewFieldSpec(groupmembership.FieldID, field.TypeUUID)) - if ps := gmu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := gmu.mutation.Maintainer(); ok { + if value, ok := _u.mutation.Maintainer(); ok { _spec.SetField(groupmembership.FieldMaintainer, field.TypeBool, value) } - if value, ok := gmu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(groupmembership.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := gmu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(groupmembership.FieldDeletedAt, field.TypeTime, value) } - if gmu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(groupmembership.FieldDeletedAt, field.TypeTime) } - if gmu.mutation.GroupCleared() { + if _u.mutation.GroupCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -216,7 +216,7 @@ func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gmu.mutation.GroupIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -232,7 +232,7 @@ func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if gmu.mutation.UserCleared() { + if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -245,7 +245,7 @@ func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gmu.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -261,8 +261,8 @@ func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(gmu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, gmu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{groupmembership.Label} } else if sqlgraph.IsConstraintError(err) { @@ -270,8 +270,8 @@ func (gmu *GroupMembershipUpdate) sqlSave(ctx context.Context) (n int, err error } return 0, err } - gmu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // GroupMembershipUpdateOne is the builder for updating a single GroupMembership entity. @@ -284,129 +284,129 @@ type GroupMembershipUpdateOne struct { } // SetGroupID sets the "group_id" field. -func (gmuo *GroupMembershipUpdateOne) SetGroupID(u uuid.UUID) *GroupMembershipUpdateOne { - gmuo.mutation.SetGroupID(u) - return gmuo +func (_u *GroupMembershipUpdateOne) SetGroupID(v uuid.UUID) *GroupMembershipUpdateOne { + _u.mutation.SetGroupID(v) + return _u } // SetNillableGroupID sets the "group_id" field if the given value is not nil. -func (gmuo *GroupMembershipUpdateOne) SetNillableGroupID(u *uuid.UUID) *GroupMembershipUpdateOne { - if u != nil { - gmuo.SetGroupID(*u) +func (_u *GroupMembershipUpdateOne) SetNillableGroupID(v *uuid.UUID) *GroupMembershipUpdateOne { + if v != nil { + _u.SetGroupID(*v) } - return gmuo + return _u } // SetUserID sets the "user_id" field. -func (gmuo *GroupMembershipUpdateOne) SetUserID(u uuid.UUID) *GroupMembershipUpdateOne { - gmuo.mutation.SetUserID(u) - return gmuo +func (_u *GroupMembershipUpdateOne) SetUserID(v uuid.UUID) *GroupMembershipUpdateOne { + _u.mutation.SetUserID(v) + return _u } // SetNillableUserID sets the "user_id" field if the given value is not nil. -func (gmuo *GroupMembershipUpdateOne) SetNillableUserID(u *uuid.UUID) *GroupMembershipUpdateOne { - if u != nil { - gmuo.SetUserID(*u) +func (_u *GroupMembershipUpdateOne) SetNillableUserID(v *uuid.UUID) *GroupMembershipUpdateOne { + if v != nil { + _u.SetUserID(*v) } - return gmuo + return _u } // SetMaintainer sets the "maintainer" field. -func (gmuo *GroupMembershipUpdateOne) SetMaintainer(b bool) *GroupMembershipUpdateOne { - gmuo.mutation.SetMaintainer(b) - return gmuo +func (_u *GroupMembershipUpdateOne) SetMaintainer(v bool) *GroupMembershipUpdateOne { + _u.mutation.SetMaintainer(v) + return _u } // SetNillableMaintainer sets the "maintainer" field if the given value is not nil. -func (gmuo *GroupMembershipUpdateOne) SetNillableMaintainer(b *bool) *GroupMembershipUpdateOne { - if b != nil { - gmuo.SetMaintainer(*b) +func (_u *GroupMembershipUpdateOne) SetNillableMaintainer(v *bool) *GroupMembershipUpdateOne { + if v != nil { + _u.SetMaintainer(*v) } - return gmuo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (gmuo *GroupMembershipUpdateOne) SetUpdatedAt(t time.Time) *GroupMembershipUpdateOne { - gmuo.mutation.SetUpdatedAt(t) - return gmuo +func (_u *GroupMembershipUpdateOne) SetUpdatedAt(v time.Time) *GroupMembershipUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (gmuo *GroupMembershipUpdateOne) SetNillableUpdatedAt(t *time.Time) *GroupMembershipUpdateOne { - if t != nil { - gmuo.SetUpdatedAt(*t) +func (_u *GroupMembershipUpdateOne) SetNillableUpdatedAt(v *time.Time) *GroupMembershipUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return gmuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (gmuo *GroupMembershipUpdateOne) SetDeletedAt(t time.Time) *GroupMembershipUpdateOne { - gmuo.mutation.SetDeletedAt(t) - return gmuo +func (_u *GroupMembershipUpdateOne) SetDeletedAt(v time.Time) *GroupMembershipUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (gmuo *GroupMembershipUpdateOne) SetNillableDeletedAt(t *time.Time) *GroupMembershipUpdateOne { - if t != nil { - gmuo.SetDeletedAt(*t) +func (_u *GroupMembershipUpdateOne) SetNillableDeletedAt(v *time.Time) *GroupMembershipUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return gmuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (gmuo *GroupMembershipUpdateOne) ClearDeletedAt() *GroupMembershipUpdateOne { - gmuo.mutation.ClearDeletedAt() - return gmuo +func (_u *GroupMembershipUpdateOne) ClearDeletedAt() *GroupMembershipUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetGroup sets the "group" edge to the Group entity. -func (gmuo *GroupMembershipUpdateOne) SetGroup(g *Group) *GroupMembershipUpdateOne { - return gmuo.SetGroupID(g.ID) +func (_u *GroupMembershipUpdateOne) SetGroup(v *Group) *GroupMembershipUpdateOne { + return _u.SetGroupID(v.ID) } // SetUser sets the "user" edge to the User entity. -func (gmuo *GroupMembershipUpdateOne) SetUser(u *User) *GroupMembershipUpdateOne { - return gmuo.SetUserID(u.ID) +func (_u *GroupMembershipUpdateOne) SetUser(v *User) *GroupMembershipUpdateOne { + return _u.SetUserID(v.ID) } // Mutation returns the GroupMembershipMutation object of the builder. -func (gmuo *GroupMembershipUpdateOne) Mutation() *GroupMembershipMutation { - return gmuo.mutation +func (_u *GroupMembershipUpdateOne) Mutation() *GroupMembershipMutation { + return _u.mutation } // ClearGroup clears the "group" edge to the Group entity. -func (gmuo *GroupMembershipUpdateOne) ClearGroup() *GroupMembershipUpdateOne { - gmuo.mutation.ClearGroup() - return gmuo +func (_u *GroupMembershipUpdateOne) ClearGroup() *GroupMembershipUpdateOne { + _u.mutation.ClearGroup() + return _u } // ClearUser clears the "user" edge to the User entity. -func (gmuo *GroupMembershipUpdateOne) ClearUser() *GroupMembershipUpdateOne { - gmuo.mutation.ClearUser() - return gmuo +func (_u *GroupMembershipUpdateOne) ClearUser() *GroupMembershipUpdateOne { + _u.mutation.ClearUser() + return _u } // Where appends a list predicates to the GroupMembershipUpdate builder. -func (gmuo *GroupMembershipUpdateOne) Where(ps ...predicate.GroupMembership) *GroupMembershipUpdateOne { - gmuo.mutation.Where(ps...) - return gmuo +func (_u *GroupMembershipUpdateOne) Where(ps ...predicate.GroupMembership) *GroupMembershipUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (gmuo *GroupMembershipUpdateOne) Select(field string, fields ...string) *GroupMembershipUpdateOne { - gmuo.fields = append([]string{field}, fields...) - return gmuo +func (_u *GroupMembershipUpdateOne) Select(field string, fields ...string) *GroupMembershipUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated GroupMembership entity. -func (gmuo *GroupMembershipUpdateOne) Save(ctx context.Context) (*GroupMembership, error) { - return withHooks(ctx, gmuo.sqlSave, gmuo.mutation, gmuo.hooks) +func (_u *GroupMembershipUpdateOne) Save(ctx context.Context) (*GroupMembership, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (gmuo *GroupMembershipUpdateOne) SaveX(ctx context.Context) *GroupMembership { - node, err := gmuo.Save(ctx) +func (_u *GroupMembershipUpdateOne) SaveX(ctx context.Context) *GroupMembership { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -414,46 +414,46 @@ func (gmuo *GroupMembershipUpdateOne) SaveX(ctx context.Context) *GroupMembershi } // Exec executes the query on the entity. -func (gmuo *GroupMembershipUpdateOne) Exec(ctx context.Context) error { - _, err := gmuo.Save(ctx) +func (_u *GroupMembershipUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (gmuo *GroupMembershipUpdateOne) ExecX(ctx context.Context) { - if err := gmuo.Exec(ctx); err != nil { +func (_u *GroupMembershipUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (gmuo *GroupMembershipUpdateOne) check() error { - if gmuo.mutation.GroupCleared() && len(gmuo.mutation.GroupIDs()) > 0 { +func (_u *GroupMembershipUpdateOne) check() error { + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "GroupMembership.group"`) } - if gmuo.mutation.UserCleared() && len(gmuo.mutation.UserIDs()) > 0 { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "GroupMembership.user"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (gmuo *GroupMembershipUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupMembershipUpdateOne { - gmuo.modifiers = append(gmuo.modifiers, modifiers...) - return gmuo +func (_u *GroupMembershipUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *GroupMembershipUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *GroupMembership, err error) { - if err := gmuo.check(); err != nil { +func (_u *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *GroupMembership, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(groupmembership.Table, groupmembership.Columns, sqlgraph.NewFieldSpec(groupmembership.FieldID, field.TypeUUID)) - id, ok := gmuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "GroupMembership.id" for update`)} } _spec.Node.ID.Value = id - if fields := gmuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, groupmembership.FieldID) for _, f := range fields { @@ -465,26 +465,26 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } } } - if ps := gmuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := gmuo.mutation.Maintainer(); ok { + if value, ok := _u.mutation.Maintainer(); ok { _spec.SetField(groupmembership.FieldMaintainer, field.TypeBool, value) } - if value, ok := gmuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(groupmembership.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := gmuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(groupmembership.FieldDeletedAt, field.TypeTime, value) } - if gmuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(groupmembership.FieldDeletedAt, field.TypeTime) } - if gmuo.mutation.GroupCleared() { + if _u.mutation.GroupCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -497,7 +497,7 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gmuo.mutation.GroupIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -513,7 +513,7 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if gmuo.mutation.UserCleared() { + if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -526,7 +526,7 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := gmuo.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -542,11 +542,11 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(gmuo.modifiers...) - _node = &GroupMembership{config: gmuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &GroupMembership{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, gmuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{groupmembership.Label} } else if sqlgraph.IsConstraintError(err) { @@ -554,6 +554,6 @@ func (gmuo *GroupMembershipUpdateOne) sqlSave(ctx context.Context) (_node *Group } return nil, err } - gmuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/integration.go b/app/controlplane/pkg/data/ent/integration.go index 3cda52b58..ff08ed287 100644 --- a/app/controlplane/pkg/data/ent/integration.go +++ b/app/controlplane/pkg/data/ent/integration.go @@ -95,69 +95,69 @@ func (*Integration) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Integration fields. -func (i *Integration) assignValues(columns []string, values []any) error { +func (_m *Integration) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } - for j := range columns { - switch columns[j] { + for i := range columns { + switch columns[i] { case integration.FieldID: - if value, ok := values[j].(*uuid.UUID); !ok { - return fmt.Errorf("unexpected type %T for field id", values[j]) + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - i.ID = *value + _m.ID = *value } case integration.FieldName: - if value, ok := values[j].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field name", values[j]) + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - i.Name = value.String + _m.Name = value.String } case integration.FieldKind: - if value, ok := values[j].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field kind", values[j]) + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field kind", values[i]) } else if value.Valid { - i.Kind = value.String + _m.Kind = value.String } case integration.FieldDescription: - if value, ok := values[j].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field description", values[j]) + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - i.Description = value.String + _m.Description = value.String } case integration.FieldSecretName: - if value, ok := values[j].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field secret_name", values[j]) + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field secret_name", values[i]) } else if value.Valid { - i.SecretName = value.String + _m.SecretName = value.String } case integration.FieldCreatedAt: - if value, ok := values[j].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[j]) + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - i.CreatedAt = value.Time + _m.CreatedAt = value.Time } case integration.FieldConfiguration: - if value, ok := values[j].(*[]byte); !ok { - return fmt.Errorf("unexpected type %T for field configuration", values[j]) + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field configuration", values[i]) } else if value != nil { - i.Configuration = *value + _m.Configuration = *value } case integration.FieldDeletedAt: - if value, ok := values[j].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field deleted_at", values[j]) + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - i.DeletedAt = value.Time + _m.DeletedAt = value.Time } case integration.ForeignKeys[0]: - if value, ok := values[j].(*sql.NullScanner); !ok { - return fmt.Errorf("unexpected type %T for field organization_integrations", values[j]) + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field organization_integrations", values[i]) } else if value.Valid { - i.organization_integrations = new(uuid.UUID) - *i.organization_integrations = *value.S.(*uuid.UUID) + _m.organization_integrations = new(uuid.UUID) + *_m.organization_integrations = *value.S.(*uuid.UUID) } default: - i.selectValues.Set(columns[j], values[j]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -165,63 +165,63 @@ func (i *Integration) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Integration. // This includes values selected through modifiers, order, etc. -func (i *Integration) Value(name string) (ent.Value, error) { - return i.selectValues.Get(name) +func (_m *Integration) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryAttachments queries the "attachments" edge of the Integration entity. -func (i *Integration) QueryAttachments() *IntegrationAttachmentQuery { - return NewIntegrationClient(i.config).QueryAttachments(i) +func (_m *Integration) QueryAttachments() *IntegrationAttachmentQuery { + return NewIntegrationClient(_m.config).QueryAttachments(_m) } // QueryOrganization queries the "organization" edge of the Integration entity. -func (i *Integration) QueryOrganization() *OrganizationQuery { - return NewIntegrationClient(i.config).QueryOrganization(i) +func (_m *Integration) QueryOrganization() *OrganizationQuery { + return NewIntegrationClient(_m.config).QueryOrganization(_m) } // Update returns a builder for updating this Integration. // Note that you need to call Integration.Unwrap() before calling this method if this Integration // was returned from a transaction, and the transaction was committed or rolled back. -func (i *Integration) Update() *IntegrationUpdateOne { - return NewIntegrationClient(i.config).UpdateOne(i) +func (_m *Integration) Update() *IntegrationUpdateOne { + return NewIntegrationClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Integration entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (i *Integration) Unwrap() *Integration { - _tx, ok := i.config.driver.(*txDriver) +func (_m *Integration) Unwrap() *Integration { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Integration is not a transactional entity") } - i.config.driver = _tx.drv - return i + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (i *Integration) String() string { +func (_m *Integration) String() string { var builder strings.Builder builder.WriteString("Integration(") - builder.WriteString(fmt.Sprintf("id=%v, ", i.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(i.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("kind=") - builder.WriteString(i.Kind) + builder.WriteString(_m.Kind) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(i.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("secret_name=") - builder.WriteString(i.SecretName) + builder.WriteString(_m.SecretName) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(i.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("configuration=") - builder.WriteString(fmt.Sprintf("%v", i.Configuration)) + builder.WriteString(fmt.Sprintf("%v", _m.Configuration)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(i.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/integration_create.go b/app/controlplane/pkg/data/ent/integration_create.go index 8e421627b..cab832e07 100644 --- a/app/controlplane/pkg/data/ent/integration_create.go +++ b/app/controlplane/pkg/data/ent/integration_create.go @@ -27,125 +27,125 @@ type IntegrationCreate struct { } // SetName sets the "name" field. -func (ic *IntegrationCreate) SetName(s string) *IntegrationCreate { - ic.mutation.SetName(s) - return ic +func (_c *IntegrationCreate) SetName(v string) *IntegrationCreate { + _c.mutation.SetName(v) + return _c } // SetKind sets the "kind" field. -func (ic *IntegrationCreate) SetKind(s string) *IntegrationCreate { - ic.mutation.SetKind(s) - return ic +func (_c *IntegrationCreate) SetKind(v string) *IntegrationCreate { + _c.mutation.SetKind(v) + return _c } // SetDescription sets the "description" field. -func (ic *IntegrationCreate) SetDescription(s string) *IntegrationCreate { - ic.mutation.SetDescription(s) - return ic +func (_c *IntegrationCreate) SetDescription(v string) *IntegrationCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (ic *IntegrationCreate) SetNillableDescription(s *string) *IntegrationCreate { - if s != nil { - ic.SetDescription(*s) +func (_c *IntegrationCreate) SetNillableDescription(v *string) *IntegrationCreate { + if v != nil { + _c.SetDescription(*v) } - return ic + return _c } // SetSecretName sets the "secret_name" field. -func (ic *IntegrationCreate) SetSecretName(s string) *IntegrationCreate { - ic.mutation.SetSecretName(s) - return ic +func (_c *IntegrationCreate) SetSecretName(v string) *IntegrationCreate { + _c.mutation.SetSecretName(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (ic *IntegrationCreate) SetCreatedAt(t time.Time) *IntegrationCreate { - ic.mutation.SetCreatedAt(t) - return ic +func (_c *IntegrationCreate) SetCreatedAt(v time.Time) *IntegrationCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (ic *IntegrationCreate) SetNillableCreatedAt(t *time.Time) *IntegrationCreate { - if t != nil { - ic.SetCreatedAt(*t) +func (_c *IntegrationCreate) SetNillableCreatedAt(v *time.Time) *IntegrationCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return ic + return _c } // SetConfiguration sets the "configuration" field. -func (ic *IntegrationCreate) SetConfiguration(b []byte) *IntegrationCreate { - ic.mutation.SetConfiguration(b) - return ic +func (_c *IntegrationCreate) SetConfiguration(v []byte) *IntegrationCreate { + _c.mutation.SetConfiguration(v) + return _c } // SetDeletedAt sets the "deleted_at" field. -func (ic *IntegrationCreate) SetDeletedAt(t time.Time) *IntegrationCreate { - ic.mutation.SetDeletedAt(t) - return ic +func (_c *IntegrationCreate) SetDeletedAt(v time.Time) *IntegrationCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (ic *IntegrationCreate) SetNillableDeletedAt(t *time.Time) *IntegrationCreate { - if t != nil { - ic.SetDeletedAt(*t) +func (_c *IntegrationCreate) SetNillableDeletedAt(v *time.Time) *IntegrationCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return ic + return _c } // SetID sets the "id" field. -func (ic *IntegrationCreate) SetID(u uuid.UUID) *IntegrationCreate { - ic.mutation.SetID(u) - return ic +func (_c *IntegrationCreate) SetID(v uuid.UUID) *IntegrationCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (ic *IntegrationCreate) SetNillableID(u *uuid.UUID) *IntegrationCreate { - if u != nil { - ic.SetID(*u) +func (_c *IntegrationCreate) SetNillableID(v *uuid.UUID) *IntegrationCreate { + if v != nil { + _c.SetID(*v) } - return ic + return _c } // AddAttachmentIDs adds the "attachments" edge to the IntegrationAttachment entity by IDs. -func (ic *IntegrationCreate) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationCreate { - ic.mutation.AddAttachmentIDs(ids...) - return ic +func (_c *IntegrationCreate) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationCreate { + _c.mutation.AddAttachmentIDs(ids...) + return _c } // AddAttachments adds the "attachments" edges to the IntegrationAttachment entity. -func (ic *IntegrationCreate) AddAttachments(i ...*IntegrationAttachment) *IntegrationCreate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_c *IntegrationCreate) AddAttachments(v ...*IntegrationAttachment) *IntegrationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ic.AddAttachmentIDs(ids...) + return _c.AddAttachmentIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (ic *IntegrationCreate) SetOrganizationID(id uuid.UUID) *IntegrationCreate { - ic.mutation.SetOrganizationID(id) - return ic +func (_c *IntegrationCreate) SetOrganizationID(id uuid.UUID) *IntegrationCreate { + _c.mutation.SetOrganizationID(id) + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (ic *IntegrationCreate) SetOrganization(o *Organization) *IntegrationCreate { - return ic.SetOrganizationID(o.ID) +func (_c *IntegrationCreate) SetOrganization(v *Organization) *IntegrationCreate { + return _c.SetOrganizationID(v.ID) } // Mutation returns the IntegrationMutation object of the builder. -func (ic *IntegrationCreate) Mutation() *IntegrationMutation { - return ic.mutation +func (_c *IntegrationCreate) Mutation() *IntegrationMutation { + return _c.mutation } // Save creates the Integration in the database. -func (ic *IntegrationCreate) Save(ctx context.Context) (*Integration, error) { - ic.defaults() - return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks) +func (_c *IntegrationCreate) Save(ctx context.Context) (*Integration, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (ic *IntegrationCreate) SaveX(ctx context.Context) *Integration { - v, err := ic.Save(ctx) +func (_c *IntegrationCreate) SaveX(ctx context.Context) *Integration { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -153,56 +153,56 @@ func (ic *IntegrationCreate) SaveX(ctx context.Context) *Integration { } // Exec executes the query. -func (ic *IntegrationCreate) Exec(ctx context.Context) error { - _, err := ic.Save(ctx) +func (_c *IntegrationCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ic *IntegrationCreate) ExecX(ctx context.Context) { - if err := ic.Exec(ctx); err != nil { +func (_c *IntegrationCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (ic *IntegrationCreate) defaults() { - if _, ok := ic.mutation.CreatedAt(); !ok { +func (_c *IntegrationCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := integration.DefaultCreatedAt() - ic.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := ic.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := integration.DefaultID() - ic.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (ic *IntegrationCreate) check() error { - if _, ok := ic.mutation.Name(); !ok { +func (_c *IntegrationCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Integration.name"`)} } - if _, ok := ic.mutation.Kind(); !ok { + if _, ok := _c.mutation.Kind(); !ok { return &ValidationError{Name: "kind", err: errors.New(`ent: missing required field "Integration.kind"`)} } - if _, ok := ic.mutation.SecretName(); !ok { + if _, ok := _c.mutation.SecretName(); !ok { return &ValidationError{Name: "secret_name", err: errors.New(`ent: missing required field "Integration.secret_name"`)} } - if _, ok := ic.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Integration.created_at"`)} } - if len(ic.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "Integration.organization"`)} } return nil } -func (ic *IntegrationCreate) sqlSave(ctx context.Context) (*Integration, error) { - if err := ic.check(); err != nil { +func (_c *IntegrationCreate) sqlSave(ctx context.Context) (*Integration, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := ic.createSpec() - if err := sqlgraph.CreateNode(ctx, ic.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -215,50 +215,50 @@ func (ic *IntegrationCreate) sqlSave(ctx context.Context) (*Integration, error) return nil, err } } - ic.mutation.id = &_node.ID - ic.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (ic *IntegrationCreate) createSpec() (*Integration, *sqlgraph.CreateSpec) { +func (_c *IntegrationCreate) createSpec() (*Integration, *sqlgraph.CreateSpec) { var ( - _node = &Integration{config: ic.config} + _node = &Integration{config: _c.config} _spec = sqlgraph.NewCreateSpec(integration.Table, sqlgraph.NewFieldSpec(integration.FieldID, field.TypeUUID)) ) - _spec.OnConflict = ic.conflict - if id, ok := ic.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := ic.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(integration.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := ic.mutation.Kind(); ok { + if value, ok := _c.mutation.Kind(); ok { _spec.SetField(integration.FieldKind, field.TypeString, value) _node.Kind = value } - if value, ok := ic.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(integration.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := ic.mutation.SecretName(); ok { + if value, ok := _c.mutation.SecretName(); ok { _spec.SetField(integration.FieldSecretName, field.TypeString, value) _node.SecretName = value } - if value, ok := ic.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(integration.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := ic.mutation.Configuration(); ok { + if value, ok := _c.mutation.Configuration(); ok { _spec.SetField(integration.FieldConfiguration, field.TypeBytes, value) _node.Configuration = value } - if value, ok := ic.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(integration.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if nodes := ic.mutation.AttachmentsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.AttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -274,7 +274,7 @@ func (ic *IntegrationCreate) createSpec() (*Integration, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := ic.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -310,10 +310,10 @@ func (ic *IntegrationCreate) createSpec() (*Integration, *sqlgraph.CreateSpec) { // SetName(v+v). // }). // Exec(ctx) -func (ic *IntegrationCreate) OnConflict(opts ...sql.ConflictOption) *IntegrationUpsertOne { - ic.conflict = opts +func (_c *IntegrationCreate) OnConflict(opts ...sql.ConflictOption) *IntegrationUpsertOne { + _c.conflict = opts return &IntegrationUpsertOne{ - create: ic, + create: _c, } } @@ -323,10 +323,10 @@ func (ic *IntegrationCreate) OnConflict(opts ...sql.ConflictOption) *Integration // client.Integration.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (ic *IntegrationCreate) OnConflictColumns(columns ...string) *IntegrationUpsertOne { - ic.conflict = append(ic.conflict, sql.ConflictColumns(columns...)) +func (_c *IntegrationCreate) OnConflictColumns(columns ...string) *IntegrationUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &IntegrationUpsertOne{ - create: ic, + create: _c, } } @@ -567,16 +567,16 @@ type IntegrationCreateBulk struct { } // Save creates the Integration entities in the database. -func (icb *IntegrationCreateBulk) Save(ctx context.Context) ([]*Integration, error) { - if icb.err != nil { - return nil, icb.err - } - specs := make([]*sqlgraph.CreateSpec, len(icb.builders)) - nodes := make([]*Integration, len(icb.builders)) - mutators := make([]Mutator, len(icb.builders)) - for i := range icb.builders { +func (_c *IntegrationCreateBulk) Save(ctx context.Context) ([]*Integration, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Integration, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := icb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*IntegrationMutation) @@ -590,12 +590,12 @@ func (icb *IntegrationCreateBulk) Save(ctx context.Context) ([]*Integration, err var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = icb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, icb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -615,7 +615,7 @@ func (icb *IntegrationCreateBulk) Save(ctx context.Context) ([]*Integration, err }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, icb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -623,8 +623,8 @@ func (icb *IntegrationCreateBulk) Save(ctx context.Context) ([]*Integration, err } // SaveX is like Save, but panics if an error occurs. -func (icb *IntegrationCreateBulk) SaveX(ctx context.Context) []*Integration { - v, err := icb.Save(ctx) +func (_c *IntegrationCreateBulk) SaveX(ctx context.Context) []*Integration { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -632,14 +632,14 @@ func (icb *IntegrationCreateBulk) SaveX(ctx context.Context) []*Integration { } // Exec executes the query. -func (icb *IntegrationCreateBulk) Exec(ctx context.Context) error { - _, err := icb.Save(ctx) +func (_c *IntegrationCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (icb *IntegrationCreateBulk) ExecX(ctx context.Context) { - if err := icb.Exec(ctx); err != nil { +func (_c *IntegrationCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -659,10 +659,10 @@ func (icb *IntegrationCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (icb *IntegrationCreateBulk) OnConflict(opts ...sql.ConflictOption) *IntegrationUpsertBulk { - icb.conflict = opts +func (_c *IntegrationCreateBulk) OnConflict(opts ...sql.ConflictOption) *IntegrationUpsertBulk { + _c.conflict = opts return &IntegrationUpsertBulk{ - create: icb, + create: _c, } } @@ -672,10 +672,10 @@ func (icb *IntegrationCreateBulk) OnConflict(opts ...sql.ConflictOption) *Integr // client.Integration.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (icb *IntegrationCreateBulk) OnConflictColumns(columns ...string) *IntegrationUpsertBulk { - icb.conflict = append(icb.conflict, sql.ConflictColumns(columns...)) +func (_c *IntegrationCreateBulk) OnConflictColumns(columns ...string) *IntegrationUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &IntegrationUpsertBulk{ - create: icb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/integration_delete.go b/app/controlplane/pkg/data/ent/integration_delete.go index baf79e542..7eb5628ef 100644 --- a/app/controlplane/pkg/data/ent/integration_delete.go +++ b/app/controlplane/pkg/data/ent/integration_delete.go @@ -20,56 +20,56 @@ type IntegrationDelete struct { } // Where appends a list predicates to the IntegrationDelete builder. -func (id *IntegrationDelete) Where(ps ...predicate.Integration) *IntegrationDelete { - id.mutation.Where(ps...) - return id +func (_d *IntegrationDelete) Where(ps ...predicate.Integration) *IntegrationDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (id *IntegrationDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, id.sqlExec, id.mutation, id.hooks) +func (_d *IntegrationDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (id *IntegrationDelete) ExecX(ctx context.Context) int { - n, err := id.Exec(ctx) +func (_d *IntegrationDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (id *IntegrationDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *IntegrationDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(integration.Table, sqlgraph.NewFieldSpec(integration.FieldID, field.TypeUUID)) - if ps := id.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, id.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - id.mutation.done = true + _d.mutation.done = true return affected, err } // IntegrationDeleteOne is the builder for deleting a single Integration entity. type IntegrationDeleteOne struct { - id *IntegrationDelete + _d *IntegrationDelete } // Where appends a list predicates to the IntegrationDelete builder. -func (ido *IntegrationDeleteOne) Where(ps ...predicate.Integration) *IntegrationDeleteOne { - ido.id.mutation.Where(ps...) - return ido +func (_d *IntegrationDeleteOne) Where(ps ...predicate.Integration) *IntegrationDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (ido *IntegrationDeleteOne) Exec(ctx context.Context) error { - n, err := ido.id.Exec(ctx) +func (_d *IntegrationDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (ido *IntegrationDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (ido *IntegrationDeleteOne) ExecX(ctx context.Context) { - if err := ido.Exec(ctx); err != nil { +func (_d *IntegrationDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/integration_query.go b/app/controlplane/pkg/data/ent/integration_query.go index 7f448ecc0..769ac9e7e 100644 --- a/app/controlplane/pkg/data/ent/integration_query.go +++ b/app/controlplane/pkg/data/ent/integration_query.go @@ -37,44 +37,44 @@ type IntegrationQuery struct { } // Where adds a new predicate for the IntegrationQuery builder. -func (iq *IntegrationQuery) Where(ps ...predicate.Integration) *IntegrationQuery { - iq.predicates = append(iq.predicates, ps...) - return iq +func (_q *IntegrationQuery) Where(ps ...predicate.Integration) *IntegrationQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (iq *IntegrationQuery) Limit(limit int) *IntegrationQuery { - iq.ctx.Limit = &limit - return iq +func (_q *IntegrationQuery) Limit(limit int) *IntegrationQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (iq *IntegrationQuery) Offset(offset int) *IntegrationQuery { - iq.ctx.Offset = &offset - return iq +func (_q *IntegrationQuery) Offset(offset int) *IntegrationQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (iq *IntegrationQuery) Unique(unique bool) *IntegrationQuery { - iq.ctx.Unique = &unique - return iq +func (_q *IntegrationQuery) Unique(unique bool) *IntegrationQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (iq *IntegrationQuery) Order(o ...integration.OrderOption) *IntegrationQuery { - iq.order = append(iq.order, o...) - return iq +func (_q *IntegrationQuery) Order(o ...integration.OrderOption) *IntegrationQuery { + _q.order = append(_q.order, o...) + return _q } // QueryAttachments chains the current query on the "attachments" edge. -func (iq *IntegrationQuery) QueryAttachments() *IntegrationAttachmentQuery { - query := (&IntegrationAttachmentClient{config: iq.config}).Query() +func (_q *IntegrationQuery) QueryAttachments() *IntegrationAttachmentQuery { + query := (&IntegrationAttachmentClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := iq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := iq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -83,20 +83,20 @@ func (iq *IntegrationQuery) QueryAttachments() *IntegrationAttachmentQuery { sqlgraph.To(integrationattachment.Table, integrationattachment.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, integration.AttachmentsTable, integration.AttachmentsColumn), ) - fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryOrganization chains the current query on the "organization" edge. -func (iq *IntegrationQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: iq.config}).Query() +func (_q *IntegrationQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := iq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := iq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -105,7 +105,7 @@ func (iq *IntegrationQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, integration.OrganizationTable, integration.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -113,8 +113,8 @@ func (iq *IntegrationQuery) QueryOrganization() *OrganizationQuery { // First returns the first Integration entity from the query. // Returns a *NotFoundError when no Integration was found. -func (iq *IntegrationQuery) First(ctx context.Context) (*Integration, error) { - nodes, err := iq.Limit(1).All(setContextOp(ctx, iq.ctx, ent.OpQueryFirst)) +func (_q *IntegrationQuery) First(ctx context.Context) (*Integration, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -125,8 +125,8 @@ func (iq *IntegrationQuery) First(ctx context.Context) (*Integration, error) { } // FirstX is like First, but panics if an error occurs. -func (iq *IntegrationQuery) FirstX(ctx context.Context) *Integration { - node, err := iq.First(ctx) +func (_q *IntegrationQuery) FirstX(ctx context.Context) *Integration { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -135,9 +135,9 @@ func (iq *IntegrationQuery) FirstX(ctx context.Context) *Integration { // FirstID returns the first Integration ID from the query. // Returns a *NotFoundError when no Integration ID was found. -func (iq *IntegrationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *IntegrationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iq.Limit(1).IDs(setContextOp(ctx, iq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -148,8 +148,8 @@ func (iq *IntegrationQuery) FirstID(ctx context.Context) (id uuid.UUID, err erro } // FirstIDX is like FirstID, but panics if an error occurs. -func (iq *IntegrationQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := iq.FirstID(ctx) +func (_q *IntegrationQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -159,8 +159,8 @@ func (iq *IntegrationQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Integration entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Integration entity is found. // Returns a *NotFoundError when no Integration entities are found. -func (iq *IntegrationQuery) Only(ctx context.Context) (*Integration, error) { - nodes, err := iq.Limit(2).All(setContextOp(ctx, iq.ctx, ent.OpQueryOnly)) +func (_q *IntegrationQuery) Only(ctx context.Context) (*Integration, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -175,8 +175,8 @@ func (iq *IntegrationQuery) Only(ctx context.Context) (*Integration, error) { } // OnlyX is like Only, but panics if an error occurs. -func (iq *IntegrationQuery) OnlyX(ctx context.Context) *Integration { - node, err := iq.Only(ctx) +func (_q *IntegrationQuery) OnlyX(ctx context.Context) *Integration { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -186,9 +186,9 @@ func (iq *IntegrationQuery) OnlyX(ctx context.Context) *Integration { // OnlyID is like Only, but returns the only Integration ID in the query. // Returns a *NotSingularError when more than one Integration ID is found. // Returns a *NotFoundError when no entities are found. -func (iq *IntegrationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *IntegrationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iq.Limit(2).IDs(setContextOp(ctx, iq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -203,8 +203,8 @@ func (iq *IntegrationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (iq *IntegrationQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := iq.OnlyID(ctx) +func (_q *IntegrationQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -212,18 +212,18 @@ func (iq *IntegrationQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Integrations. -func (iq *IntegrationQuery) All(ctx context.Context) ([]*Integration, error) { - ctx = setContextOp(ctx, iq.ctx, ent.OpQueryAll) - if err := iq.prepareQuery(ctx); err != nil { +func (_q *IntegrationQuery) All(ctx context.Context) ([]*Integration, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Integration, *IntegrationQuery]() - return withInterceptors[[]*Integration](ctx, iq, qr, iq.inters) + return withInterceptors[[]*Integration](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (iq *IntegrationQuery) AllX(ctx context.Context) []*Integration { - nodes, err := iq.All(ctx) +func (_q *IntegrationQuery) AllX(ctx context.Context) []*Integration { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -231,20 +231,20 @@ func (iq *IntegrationQuery) AllX(ctx context.Context) []*Integration { } // IDs executes the query and returns a list of Integration IDs. -func (iq *IntegrationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if iq.ctx.Unique == nil && iq.path != nil { - iq.Unique(true) +func (_q *IntegrationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, iq.ctx, ent.OpQueryIDs) - if err = iq.Select(integration.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(integration.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (iq *IntegrationQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := iq.IDs(ctx) +func (_q *IntegrationQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -252,17 +252,17 @@ func (iq *IntegrationQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (iq *IntegrationQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, iq.ctx, ent.OpQueryCount) - if err := iq.prepareQuery(ctx); err != nil { +func (_q *IntegrationQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, iq, querierCount[*IntegrationQuery](), iq.inters) + return withInterceptors[int](ctx, _q, querierCount[*IntegrationQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (iq *IntegrationQuery) CountX(ctx context.Context) int { - count, err := iq.Count(ctx) +func (_q *IntegrationQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -270,9 +270,9 @@ func (iq *IntegrationQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (iq *IntegrationQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, iq.ctx, ent.OpQueryExist) - switch _, err := iq.FirstID(ctx); { +func (_q *IntegrationQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -283,8 +283,8 @@ func (iq *IntegrationQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (iq *IntegrationQuery) ExistX(ctx context.Context) bool { - exist, err := iq.Exist(ctx) +func (_q *IntegrationQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -293,45 +293,45 @@ func (iq *IntegrationQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the IntegrationQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (iq *IntegrationQuery) Clone() *IntegrationQuery { - if iq == nil { +func (_q *IntegrationQuery) Clone() *IntegrationQuery { + if _q == nil { return nil } return &IntegrationQuery{ - config: iq.config, - ctx: iq.ctx.Clone(), - order: append([]integration.OrderOption{}, iq.order...), - inters: append([]Interceptor{}, iq.inters...), - predicates: append([]predicate.Integration{}, iq.predicates...), - withAttachments: iq.withAttachments.Clone(), - withOrganization: iq.withOrganization.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]integration.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Integration{}, _q.predicates...), + withAttachments: _q.withAttachments.Clone(), + withOrganization: _q.withOrganization.Clone(), // clone intermediate query. - sql: iq.sql.Clone(), - path: iq.path, - modifiers: append([]func(*sql.Selector){}, iq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithAttachments tells the query-builder to eager-load the nodes that are connected to // the "attachments" edge. The optional arguments are used to configure the query builder of the edge. -func (iq *IntegrationQuery) WithAttachments(opts ...func(*IntegrationAttachmentQuery)) *IntegrationQuery { - query := (&IntegrationAttachmentClient{config: iq.config}).Query() +func (_q *IntegrationQuery) WithAttachments(opts ...func(*IntegrationAttachmentQuery)) *IntegrationQuery { + query := (&IntegrationAttachmentClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - iq.withAttachments = query - return iq + _q.withAttachments = query + return _q } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (iq *IntegrationQuery) WithOrganization(opts ...func(*OrganizationQuery)) *IntegrationQuery { - query := (&OrganizationClient{config: iq.config}).Query() +func (_q *IntegrationQuery) WithOrganization(opts ...func(*OrganizationQuery)) *IntegrationQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - iq.withOrganization = query - return iq + _q.withOrganization = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -348,10 +348,10 @@ func (iq *IntegrationQuery) WithOrganization(opts ...func(*OrganizationQuery)) * // GroupBy(integration.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (iq *IntegrationQuery) GroupBy(field string, fields ...string) *IntegrationGroupBy { - iq.ctx.Fields = append([]string{field}, fields...) - grbuild := &IntegrationGroupBy{build: iq} - grbuild.flds = &iq.ctx.Fields +func (_q *IntegrationQuery) GroupBy(field string, fields ...string) *IntegrationGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &IntegrationGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = integration.Label grbuild.scan = grbuild.Scan return grbuild @@ -369,56 +369,56 @@ func (iq *IntegrationQuery) GroupBy(field string, fields ...string) *Integration // client.Integration.Query(). // Select(integration.FieldName). // Scan(ctx, &v) -func (iq *IntegrationQuery) Select(fields ...string) *IntegrationSelect { - iq.ctx.Fields = append(iq.ctx.Fields, fields...) - sbuild := &IntegrationSelect{IntegrationQuery: iq} +func (_q *IntegrationQuery) Select(fields ...string) *IntegrationSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &IntegrationSelect{IntegrationQuery: _q} sbuild.label = integration.Label - sbuild.flds, sbuild.scan = &iq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a IntegrationSelect configured with the given aggregations. -func (iq *IntegrationQuery) Aggregate(fns ...AggregateFunc) *IntegrationSelect { - return iq.Select().Aggregate(fns...) +func (_q *IntegrationQuery) Aggregate(fns ...AggregateFunc) *IntegrationSelect { + return _q.Select().Aggregate(fns...) } -func (iq *IntegrationQuery) prepareQuery(ctx context.Context) error { - for _, inter := range iq.inters { +func (_q *IntegrationQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, iq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range iq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !integration.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if iq.path != nil { - prev, err := iq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - iq.sql = prev + _q.sql = prev } return nil } -func (iq *IntegrationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Integration, error) { +func (_q *IntegrationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Integration, error) { var ( nodes = []*Integration{} - withFKs = iq.withFKs - _spec = iq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [2]bool{ - iq.withAttachments != nil, - iq.withOrganization != nil, + _q.withAttachments != nil, + _q.withOrganization != nil, } ) - if iq.withOrganization != nil { + if _q.withOrganization != nil { withFKs = true } if withFKs { @@ -428,32 +428,32 @@ func (iq *IntegrationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return (*Integration).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Integration{config: iq.config} + node := &Integration{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(iq.modifiers) > 0 { - _spec.Modifiers = iq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, iq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := iq.withAttachments; query != nil { - if err := iq.loadAttachments(ctx, query, nodes, + if query := _q.withAttachments; query != nil { + if err := _q.loadAttachments(ctx, query, nodes, func(n *Integration) { n.Edges.Attachments = []*IntegrationAttachment{} }, func(n *Integration, e *IntegrationAttachment) { n.Edges.Attachments = append(n.Edges.Attachments, e) }); err != nil { return nil, err } } - if query := iq.withOrganization; query != nil { - if err := iq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *Integration, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } @@ -461,7 +461,7 @@ func (iq *IntegrationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* return nodes, nil } -func (iq *IntegrationQuery) loadAttachments(ctx context.Context, query *IntegrationAttachmentQuery, nodes []*Integration, init func(*Integration), assign func(*Integration, *IntegrationAttachment)) error { +func (_q *IntegrationQuery) loadAttachments(ctx context.Context, query *IntegrationAttachmentQuery, nodes []*Integration, init func(*Integration), assign func(*Integration, *IntegrationAttachment)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Integration) for i := range nodes { @@ -492,7 +492,7 @@ func (iq *IntegrationQuery) loadAttachments(ctx context.Context, query *Integrat } return nil } -func (iq *IntegrationQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Integration, init func(*Integration), assign func(*Integration, *Organization)) error { +func (_q *IntegrationQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Integration, init func(*Integration), assign func(*Integration, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Integration) for i := range nodes { @@ -525,27 +525,27 @@ func (iq *IntegrationQuery) loadOrganization(ctx context.Context, query *Organiz return nil } -func (iq *IntegrationQuery) sqlCount(ctx context.Context) (int, error) { - _spec := iq.querySpec() - if len(iq.modifiers) > 0 { - _spec.Modifiers = iq.modifiers +func (_q *IntegrationQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = iq.ctx.Fields - if len(iq.ctx.Fields) > 0 { - _spec.Unique = iq.ctx.Unique != nil && *iq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, iq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (iq *IntegrationQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *IntegrationQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(integration.Table, integration.Columns, sqlgraph.NewFieldSpec(integration.FieldID, field.TypeUUID)) - _spec.From = iq.sql - if unique := iq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if iq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := iq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, integration.FieldID) for i := range fields { @@ -554,20 +554,20 @@ func (iq *IntegrationQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := iq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := iq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := iq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := iq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -577,36 +577,36 @@ func (iq *IntegrationQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (iq *IntegrationQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(iq.driver.Dialect()) +func (_q *IntegrationQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(integration.Table) - columns := iq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = integration.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if iq.sql != nil { - selector = iq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if iq.ctx.Unique != nil && *iq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range iq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range iq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range iq.order { + for _, p := range _q.order { p(selector) } - if offset := iq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := iq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -615,33 +615,33 @@ func (iq *IntegrationQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (iq *IntegrationQuery) ForUpdate(opts ...sql.LockOption) *IntegrationQuery { - if iq.driver.Dialect() == dialect.Postgres { - iq.Unique(false) +func (_q *IntegrationQuery) ForUpdate(opts ...sql.LockOption) *IntegrationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - iq.modifiers = append(iq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return iq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (iq *IntegrationQuery) ForShare(opts ...sql.LockOption) *IntegrationQuery { - if iq.driver.Dialect() == dialect.Postgres { - iq.Unique(false) +func (_q *IntegrationQuery) ForShare(opts ...sql.LockOption) *IntegrationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - iq.modifiers = append(iq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return iq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (iq *IntegrationQuery) Modify(modifiers ...func(s *sql.Selector)) *IntegrationSelect { - iq.modifiers = append(iq.modifiers, modifiers...) - return iq.Select() +func (_q *IntegrationQuery) Modify(modifiers ...func(s *sql.Selector)) *IntegrationSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // IntegrationGroupBy is the group-by builder for Integration entities. @@ -651,41 +651,41 @@ type IntegrationGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (igb *IntegrationGroupBy) Aggregate(fns ...AggregateFunc) *IntegrationGroupBy { - igb.fns = append(igb.fns, fns...) - return igb +func (_g *IntegrationGroupBy) Aggregate(fns ...AggregateFunc) *IntegrationGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (igb *IntegrationGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, igb.build.ctx, ent.OpQueryGroupBy) - if err := igb.build.prepareQuery(ctx); err != nil { +func (_g *IntegrationGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*IntegrationQuery, *IntegrationGroupBy](ctx, igb.build, igb, igb.build.inters, v) + return scanWithInterceptors[*IntegrationQuery, *IntegrationGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (igb *IntegrationGroupBy) sqlScan(ctx context.Context, root *IntegrationQuery, v any) error { +func (_g *IntegrationGroupBy) sqlScan(ctx context.Context, root *IntegrationQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(igb.fns)) - for _, fn := range igb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*igb.flds)+len(igb.fns)) - for _, f := range *igb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*igb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := igb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -699,27 +699,27 @@ type IntegrationSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (is *IntegrationSelect) Aggregate(fns ...AggregateFunc) *IntegrationSelect { - is.fns = append(is.fns, fns...) - return is +func (_s *IntegrationSelect) Aggregate(fns ...AggregateFunc) *IntegrationSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (is *IntegrationSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, is.ctx, ent.OpQuerySelect) - if err := is.prepareQuery(ctx); err != nil { +func (_s *IntegrationSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*IntegrationQuery, *IntegrationSelect](ctx, is.IntegrationQuery, is, is.inters, v) + return scanWithInterceptors[*IntegrationQuery, *IntegrationSelect](ctx, _s.IntegrationQuery, _s, _s.inters, v) } -func (is *IntegrationSelect) sqlScan(ctx context.Context, root *IntegrationQuery, v any) error { +func (_s *IntegrationSelect) sqlScan(ctx context.Context, root *IntegrationQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(is.fns)) - for _, fn := range is.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*is.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -727,7 +727,7 @@ func (is *IntegrationSelect) sqlScan(ctx context.Context, root *IntegrationQuery } rows := &sql.Rows{} query, args := selector.Query() - if err := is.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -735,7 +735,7 @@ func (is *IntegrationSelect) sqlScan(ctx context.Context, root *IntegrationQuery } // Modify adds a query modifier for attaching custom logic to queries. -func (is *IntegrationSelect) Modify(modifiers ...func(s *sql.Selector)) *IntegrationSelect { - is.modifiers = append(is.modifiers, modifiers...) - return is +func (_s *IntegrationSelect) Modify(modifiers ...func(s *sql.Selector)) *IntegrationSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/integration_update.go b/app/controlplane/pkg/data/ent/integration_update.go index fa6091562..5d70a1325 100644 --- a/app/controlplane/pkg/data/ent/integration_update.go +++ b/app/controlplane/pkg/data/ent/integration_update.go @@ -27,129 +27,129 @@ type IntegrationUpdate struct { } // Where appends a list predicates to the IntegrationUpdate builder. -func (iu *IntegrationUpdate) Where(ps ...predicate.Integration) *IntegrationUpdate { - iu.mutation.Where(ps...) - return iu +func (_u *IntegrationUpdate) Where(ps ...predicate.Integration) *IntegrationUpdate { + _u.mutation.Where(ps...) + return _u } // SetDescription sets the "description" field. -func (iu *IntegrationUpdate) SetDescription(s string) *IntegrationUpdate { - iu.mutation.SetDescription(s) - return iu +func (_u *IntegrationUpdate) SetDescription(v string) *IntegrationUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (iu *IntegrationUpdate) SetNillableDescription(s *string) *IntegrationUpdate { - if s != nil { - iu.SetDescription(*s) +func (_u *IntegrationUpdate) SetNillableDescription(v *string) *IntegrationUpdate { + if v != nil { + _u.SetDescription(*v) } - return iu + return _u } // ClearDescription clears the value of the "description" field. -func (iu *IntegrationUpdate) ClearDescription() *IntegrationUpdate { - iu.mutation.ClearDescription() - return iu +func (_u *IntegrationUpdate) ClearDescription() *IntegrationUpdate { + _u.mutation.ClearDescription() + return _u } // SetConfiguration sets the "configuration" field. -func (iu *IntegrationUpdate) SetConfiguration(b []byte) *IntegrationUpdate { - iu.mutation.SetConfiguration(b) - return iu +func (_u *IntegrationUpdate) SetConfiguration(v []byte) *IntegrationUpdate { + _u.mutation.SetConfiguration(v) + return _u } // ClearConfiguration clears the value of the "configuration" field. -func (iu *IntegrationUpdate) ClearConfiguration() *IntegrationUpdate { - iu.mutation.ClearConfiguration() - return iu +func (_u *IntegrationUpdate) ClearConfiguration() *IntegrationUpdate { + _u.mutation.ClearConfiguration() + return _u } // SetDeletedAt sets the "deleted_at" field. -func (iu *IntegrationUpdate) SetDeletedAt(t time.Time) *IntegrationUpdate { - iu.mutation.SetDeletedAt(t) - return iu +func (_u *IntegrationUpdate) SetDeletedAt(v time.Time) *IntegrationUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (iu *IntegrationUpdate) SetNillableDeletedAt(t *time.Time) *IntegrationUpdate { - if t != nil { - iu.SetDeletedAt(*t) +func (_u *IntegrationUpdate) SetNillableDeletedAt(v *time.Time) *IntegrationUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return iu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (iu *IntegrationUpdate) ClearDeletedAt() *IntegrationUpdate { - iu.mutation.ClearDeletedAt() - return iu +func (_u *IntegrationUpdate) ClearDeletedAt() *IntegrationUpdate { + _u.mutation.ClearDeletedAt() + return _u } // AddAttachmentIDs adds the "attachments" edge to the IntegrationAttachment entity by IDs. -func (iu *IntegrationUpdate) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdate { - iu.mutation.AddAttachmentIDs(ids...) - return iu +func (_u *IntegrationUpdate) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdate { + _u.mutation.AddAttachmentIDs(ids...) + return _u } // AddAttachments adds the "attachments" edges to the IntegrationAttachment entity. -func (iu *IntegrationUpdate) AddAttachments(i ...*IntegrationAttachment) *IntegrationUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *IntegrationUpdate) AddAttachments(v ...*IntegrationAttachment) *IntegrationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return iu.AddAttachmentIDs(ids...) + return _u.AddAttachmentIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (iu *IntegrationUpdate) SetOrganizationID(id uuid.UUID) *IntegrationUpdate { - iu.mutation.SetOrganizationID(id) - return iu +func (_u *IntegrationUpdate) SetOrganizationID(id uuid.UUID) *IntegrationUpdate { + _u.mutation.SetOrganizationID(id) + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (iu *IntegrationUpdate) SetOrganization(o *Organization) *IntegrationUpdate { - return iu.SetOrganizationID(o.ID) +func (_u *IntegrationUpdate) SetOrganization(v *Organization) *IntegrationUpdate { + return _u.SetOrganizationID(v.ID) } // Mutation returns the IntegrationMutation object of the builder. -func (iu *IntegrationUpdate) Mutation() *IntegrationMutation { - return iu.mutation +func (_u *IntegrationUpdate) Mutation() *IntegrationMutation { + return _u.mutation } // ClearAttachments clears all "attachments" edges to the IntegrationAttachment entity. -func (iu *IntegrationUpdate) ClearAttachments() *IntegrationUpdate { - iu.mutation.ClearAttachments() - return iu +func (_u *IntegrationUpdate) ClearAttachments() *IntegrationUpdate { + _u.mutation.ClearAttachments() + return _u } // RemoveAttachmentIDs removes the "attachments" edge to IntegrationAttachment entities by IDs. -func (iu *IntegrationUpdate) RemoveAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdate { - iu.mutation.RemoveAttachmentIDs(ids...) - return iu +func (_u *IntegrationUpdate) RemoveAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdate { + _u.mutation.RemoveAttachmentIDs(ids...) + return _u } // RemoveAttachments removes "attachments" edges to IntegrationAttachment entities. -func (iu *IntegrationUpdate) RemoveAttachments(i ...*IntegrationAttachment) *IntegrationUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *IntegrationUpdate) RemoveAttachments(v ...*IntegrationAttachment) *IntegrationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return iu.RemoveAttachmentIDs(ids...) + return _u.RemoveAttachmentIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (iu *IntegrationUpdate) ClearOrganization() *IntegrationUpdate { - iu.mutation.ClearOrganization() - return iu +func (_u *IntegrationUpdate) ClearOrganization() *IntegrationUpdate { + _u.mutation.ClearOrganization() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (iu *IntegrationUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks) +func (_u *IntegrationUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (iu *IntegrationUpdate) SaveX(ctx context.Context) int { - affected, err := iu.Save(ctx) +func (_u *IntegrationUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -157,63 +157,63 @@ func (iu *IntegrationUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (iu *IntegrationUpdate) Exec(ctx context.Context) error { - _, err := iu.Save(ctx) +func (_u *IntegrationUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iu *IntegrationUpdate) ExecX(ctx context.Context) { - if err := iu.Exec(ctx); err != nil { +func (_u *IntegrationUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (iu *IntegrationUpdate) check() error { - if iu.mutation.OrganizationCleared() && len(iu.mutation.OrganizationIDs()) > 0 { +func (_u *IntegrationUpdate) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Integration.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (iu *IntegrationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationUpdate { - iu.modifiers = append(iu.modifiers, modifiers...) - return iu +func (_u *IntegrationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := iu.check(); err != nil { - return n, err +func (_u *IntegrationUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(integration.Table, integration.Columns, sqlgraph.NewFieldSpec(integration.FieldID, field.TypeUUID)) - if ps := iu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := iu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(integration.FieldDescription, field.TypeString, value) } - if iu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(integration.FieldDescription, field.TypeString) } - if value, ok := iu.mutation.Configuration(); ok { + if value, ok := _u.mutation.Configuration(); ok { _spec.SetField(integration.FieldConfiguration, field.TypeBytes, value) } - if iu.mutation.ConfigurationCleared() { + if _u.mutation.ConfigurationCleared() { _spec.ClearField(integration.FieldConfiguration, field.TypeBytes) } - if value, ok := iu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(integration.FieldDeletedAt, field.TypeTime, value) } - if iu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(integration.FieldDeletedAt, field.TypeTime) } - if iu.mutation.AttachmentsCleared() { + if _u.mutation.AttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -226,7 +226,7 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iu.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !iu.mutation.AttachmentsCleared() { + if nodes := _u.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !_u.mutation.AttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -242,7 +242,7 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iu.mutation.AttachmentsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.AttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -258,7 +258,7 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if iu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -271,7 +271,7 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -287,8 +287,8 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(iu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, iu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{integration.Label} } else if sqlgraph.IsConstraintError(err) { @@ -296,8 +296,8 @@ func (iu *IntegrationUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - iu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // IntegrationUpdateOne is the builder for updating a single Integration entity. @@ -310,136 +310,136 @@ type IntegrationUpdateOne struct { } // SetDescription sets the "description" field. -func (iuo *IntegrationUpdateOne) SetDescription(s string) *IntegrationUpdateOne { - iuo.mutation.SetDescription(s) - return iuo +func (_u *IntegrationUpdateOne) SetDescription(v string) *IntegrationUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (iuo *IntegrationUpdateOne) SetNillableDescription(s *string) *IntegrationUpdateOne { - if s != nil { - iuo.SetDescription(*s) +func (_u *IntegrationUpdateOne) SetNillableDescription(v *string) *IntegrationUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return iuo + return _u } // ClearDescription clears the value of the "description" field. -func (iuo *IntegrationUpdateOne) ClearDescription() *IntegrationUpdateOne { - iuo.mutation.ClearDescription() - return iuo +func (_u *IntegrationUpdateOne) ClearDescription() *IntegrationUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetConfiguration sets the "configuration" field. -func (iuo *IntegrationUpdateOne) SetConfiguration(b []byte) *IntegrationUpdateOne { - iuo.mutation.SetConfiguration(b) - return iuo +func (_u *IntegrationUpdateOne) SetConfiguration(v []byte) *IntegrationUpdateOne { + _u.mutation.SetConfiguration(v) + return _u } // ClearConfiguration clears the value of the "configuration" field. -func (iuo *IntegrationUpdateOne) ClearConfiguration() *IntegrationUpdateOne { - iuo.mutation.ClearConfiguration() - return iuo +func (_u *IntegrationUpdateOne) ClearConfiguration() *IntegrationUpdateOne { + _u.mutation.ClearConfiguration() + return _u } // SetDeletedAt sets the "deleted_at" field. -func (iuo *IntegrationUpdateOne) SetDeletedAt(t time.Time) *IntegrationUpdateOne { - iuo.mutation.SetDeletedAt(t) - return iuo +func (_u *IntegrationUpdateOne) SetDeletedAt(v time.Time) *IntegrationUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (iuo *IntegrationUpdateOne) SetNillableDeletedAt(t *time.Time) *IntegrationUpdateOne { - if t != nil { - iuo.SetDeletedAt(*t) +func (_u *IntegrationUpdateOne) SetNillableDeletedAt(v *time.Time) *IntegrationUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return iuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (iuo *IntegrationUpdateOne) ClearDeletedAt() *IntegrationUpdateOne { - iuo.mutation.ClearDeletedAt() - return iuo +func (_u *IntegrationUpdateOne) ClearDeletedAt() *IntegrationUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // AddAttachmentIDs adds the "attachments" edge to the IntegrationAttachment entity by IDs. -func (iuo *IntegrationUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdateOne { - iuo.mutation.AddAttachmentIDs(ids...) - return iuo +func (_u *IntegrationUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdateOne { + _u.mutation.AddAttachmentIDs(ids...) + return _u } // AddAttachments adds the "attachments" edges to the IntegrationAttachment entity. -func (iuo *IntegrationUpdateOne) AddAttachments(i ...*IntegrationAttachment) *IntegrationUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *IntegrationUpdateOne) AddAttachments(v ...*IntegrationAttachment) *IntegrationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return iuo.AddAttachmentIDs(ids...) + return _u.AddAttachmentIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (iuo *IntegrationUpdateOne) SetOrganizationID(id uuid.UUID) *IntegrationUpdateOne { - iuo.mutation.SetOrganizationID(id) - return iuo +func (_u *IntegrationUpdateOne) SetOrganizationID(id uuid.UUID) *IntegrationUpdateOne { + _u.mutation.SetOrganizationID(id) + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (iuo *IntegrationUpdateOne) SetOrganization(o *Organization) *IntegrationUpdateOne { - return iuo.SetOrganizationID(o.ID) +func (_u *IntegrationUpdateOne) SetOrganization(v *Organization) *IntegrationUpdateOne { + return _u.SetOrganizationID(v.ID) } // Mutation returns the IntegrationMutation object of the builder. -func (iuo *IntegrationUpdateOne) Mutation() *IntegrationMutation { - return iuo.mutation +func (_u *IntegrationUpdateOne) Mutation() *IntegrationMutation { + return _u.mutation } // ClearAttachments clears all "attachments" edges to the IntegrationAttachment entity. -func (iuo *IntegrationUpdateOne) ClearAttachments() *IntegrationUpdateOne { - iuo.mutation.ClearAttachments() - return iuo +func (_u *IntegrationUpdateOne) ClearAttachments() *IntegrationUpdateOne { + _u.mutation.ClearAttachments() + return _u } // RemoveAttachmentIDs removes the "attachments" edge to IntegrationAttachment entities by IDs. -func (iuo *IntegrationUpdateOne) RemoveAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdateOne { - iuo.mutation.RemoveAttachmentIDs(ids...) - return iuo +func (_u *IntegrationUpdateOne) RemoveAttachmentIDs(ids ...uuid.UUID) *IntegrationUpdateOne { + _u.mutation.RemoveAttachmentIDs(ids...) + return _u } // RemoveAttachments removes "attachments" edges to IntegrationAttachment entities. -func (iuo *IntegrationUpdateOne) RemoveAttachments(i ...*IntegrationAttachment) *IntegrationUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *IntegrationUpdateOne) RemoveAttachments(v ...*IntegrationAttachment) *IntegrationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return iuo.RemoveAttachmentIDs(ids...) + return _u.RemoveAttachmentIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (iuo *IntegrationUpdateOne) ClearOrganization() *IntegrationUpdateOne { - iuo.mutation.ClearOrganization() - return iuo +func (_u *IntegrationUpdateOne) ClearOrganization() *IntegrationUpdateOne { + _u.mutation.ClearOrganization() + return _u } // Where appends a list predicates to the IntegrationUpdate builder. -func (iuo *IntegrationUpdateOne) Where(ps ...predicate.Integration) *IntegrationUpdateOne { - iuo.mutation.Where(ps...) - return iuo +func (_u *IntegrationUpdateOne) Where(ps ...predicate.Integration) *IntegrationUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (iuo *IntegrationUpdateOne) Select(field string, fields ...string) *IntegrationUpdateOne { - iuo.fields = append([]string{field}, fields...) - return iuo +func (_u *IntegrationUpdateOne) Select(field string, fields ...string) *IntegrationUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Integration entity. -func (iuo *IntegrationUpdateOne) Save(ctx context.Context) (*Integration, error) { - return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks) +func (_u *IntegrationUpdateOne) Save(ctx context.Context) (*Integration, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (iuo *IntegrationUpdateOne) SaveX(ctx context.Context) *Integration { - node, err := iuo.Save(ctx) +func (_u *IntegrationUpdateOne) SaveX(ctx context.Context) *Integration { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -447,43 +447,43 @@ func (iuo *IntegrationUpdateOne) SaveX(ctx context.Context) *Integration { } // Exec executes the query on the entity. -func (iuo *IntegrationUpdateOne) Exec(ctx context.Context) error { - _, err := iuo.Save(ctx) +func (_u *IntegrationUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iuo *IntegrationUpdateOne) ExecX(ctx context.Context) { - if err := iuo.Exec(ctx); err != nil { +func (_u *IntegrationUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (iuo *IntegrationUpdateOne) check() error { - if iuo.mutation.OrganizationCleared() && len(iuo.mutation.OrganizationIDs()) > 0 { +func (_u *IntegrationUpdateOne) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Integration.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (iuo *IntegrationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationUpdateOne { - iuo.modifiers = append(iuo.modifiers, modifiers...) - return iuo +func (_u *IntegrationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integration, err error) { - if err := iuo.check(); err != nil { +func (_u *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integration, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(integration.Table, integration.Columns, sqlgraph.NewFieldSpec(integration.FieldID, field.TypeUUID)) - id, ok := iuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Integration.id" for update`)} } _spec.Node.ID.Value = id - if fields := iuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, integration.FieldID) for _, f := range fields { @@ -495,32 +495,32 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } } } - if ps := iuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := iuo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(integration.FieldDescription, field.TypeString, value) } - if iuo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(integration.FieldDescription, field.TypeString) } - if value, ok := iuo.mutation.Configuration(); ok { + if value, ok := _u.mutation.Configuration(); ok { _spec.SetField(integration.FieldConfiguration, field.TypeBytes, value) } - if iuo.mutation.ConfigurationCleared() { + if _u.mutation.ConfigurationCleared() { _spec.ClearField(integration.FieldConfiguration, field.TypeBytes) } - if value, ok := iuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(integration.FieldDeletedAt, field.TypeTime, value) } - if iuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(integration.FieldDeletedAt, field.TypeTime) } - if iuo.mutation.AttachmentsCleared() { + if _u.mutation.AttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -533,7 +533,7 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iuo.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !iuo.mutation.AttachmentsCleared() { + if nodes := _u.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !_u.mutation.AttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -549,7 +549,7 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iuo.mutation.AttachmentsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.AttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -565,7 +565,7 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if iuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -578,7 +578,7 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -594,11 +594,11 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(iuo.modifiers...) - _node = &Integration{config: iuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Integration{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, iuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{integration.Label} } else if sqlgraph.IsConstraintError(err) { @@ -606,6 +606,6 @@ func (iuo *IntegrationUpdateOne) sqlSave(ctx context.Context) (_node *Integratio } return nil, err } - iuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/integrationattachment.go b/app/controlplane/pkg/data/ent/integrationattachment.go index 7063f419f..194a6a83b 100644 --- a/app/controlplane/pkg/data/ent/integrationattachment.go +++ b/app/controlplane/pkg/data/ent/integrationattachment.go @@ -90,7 +90,7 @@ func (*IntegrationAttachment) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the IntegrationAttachment fields. -func (ia *IntegrationAttachment) assignValues(columns []string, values []any) error { +func (_m *IntegrationAttachment) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -100,41 +100,41 @@ func (ia *IntegrationAttachment) assignValues(columns []string, values []any) er if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - ia.ID = *value + _m.ID = *value } case integrationattachment.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - ia.CreatedAt = value.Time + _m.CreatedAt = value.Time } case integrationattachment.FieldConfiguration: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field configuration", values[i]) } else if value != nil { - ia.Configuration = *value + _m.Configuration = *value } case integrationattachment.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - ia.DeletedAt = value.Time + _m.DeletedAt = value.Time } case integrationattachment.FieldWorkflowID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field workflow_id", values[i]) } else if value != nil { - ia.WorkflowID = *value + _m.WorkflowID = *value } case integrationattachment.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field integration_attachment_integration", values[i]) } else if value.Valid { - ia.integration_attachment_integration = new(uuid.UUID) - *ia.integration_attachment_integration = *value.S.(*uuid.UUID) + _m.integration_attachment_integration = new(uuid.UUID) + *_m.integration_attachment_integration = *value.S.(*uuid.UUID) } default: - ia.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -142,54 +142,54 @@ func (ia *IntegrationAttachment) assignValues(columns []string, values []any) er // Value returns the ent.Value that was dynamically selected and assigned to the IntegrationAttachment. // This includes values selected through modifiers, order, etc. -func (ia *IntegrationAttachment) Value(name string) (ent.Value, error) { - return ia.selectValues.Get(name) +func (_m *IntegrationAttachment) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryIntegration queries the "integration" edge of the IntegrationAttachment entity. -func (ia *IntegrationAttachment) QueryIntegration() *IntegrationQuery { - return NewIntegrationAttachmentClient(ia.config).QueryIntegration(ia) +func (_m *IntegrationAttachment) QueryIntegration() *IntegrationQuery { + return NewIntegrationAttachmentClient(_m.config).QueryIntegration(_m) } // QueryWorkflow queries the "workflow" edge of the IntegrationAttachment entity. -func (ia *IntegrationAttachment) QueryWorkflow() *WorkflowQuery { - return NewIntegrationAttachmentClient(ia.config).QueryWorkflow(ia) +func (_m *IntegrationAttachment) QueryWorkflow() *WorkflowQuery { + return NewIntegrationAttachmentClient(_m.config).QueryWorkflow(_m) } // Update returns a builder for updating this IntegrationAttachment. // Note that you need to call IntegrationAttachment.Unwrap() before calling this method if this IntegrationAttachment // was returned from a transaction, and the transaction was committed or rolled back. -func (ia *IntegrationAttachment) Update() *IntegrationAttachmentUpdateOne { - return NewIntegrationAttachmentClient(ia.config).UpdateOne(ia) +func (_m *IntegrationAttachment) Update() *IntegrationAttachmentUpdateOne { + return NewIntegrationAttachmentClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the IntegrationAttachment entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (ia *IntegrationAttachment) Unwrap() *IntegrationAttachment { - _tx, ok := ia.config.driver.(*txDriver) +func (_m *IntegrationAttachment) Unwrap() *IntegrationAttachment { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: IntegrationAttachment is not a transactional entity") } - ia.config.driver = _tx.drv - return ia + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (ia *IntegrationAttachment) String() string { +func (_m *IntegrationAttachment) String() string { var builder strings.Builder builder.WriteString("IntegrationAttachment(") - builder.WriteString(fmt.Sprintf("id=%v, ", ia.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("created_at=") - builder.WriteString(ia.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("configuration=") - builder.WriteString(fmt.Sprintf("%v", ia.Configuration)) + builder.WriteString(fmt.Sprintf("%v", _m.Configuration)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(ia.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("workflow_id=") - builder.WriteString(fmt.Sprintf("%v", ia.WorkflowID)) + builder.WriteString(fmt.Sprintf("%v", _m.WorkflowID)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/integrationattachment_create.go b/app/controlplane/pkg/data/ent/integrationattachment_create.go index 1a9f283a2..539db76cf 100644 --- a/app/controlplane/pkg/data/ent/integrationattachment_create.go +++ b/app/controlplane/pkg/data/ent/integrationattachment_create.go @@ -27,89 +27,89 @@ type IntegrationAttachmentCreate struct { } // SetCreatedAt sets the "created_at" field. -func (iac *IntegrationAttachmentCreate) SetCreatedAt(t time.Time) *IntegrationAttachmentCreate { - iac.mutation.SetCreatedAt(t) - return iac +func (_c *IntegrationAttachmentCreate) SetCreatedAt(v time.Time) *IntegrationAttachmentCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (iac *IntegrationAttachmentCreate) SetNillableCreatedAt(t *time.Time) *IntegrationAttachmentCreate { - if t != nil { - iac.SetCreatedAt(*t) +func (_c *IntegrationAttachmentCreate) SetNillableCreatedAt(v *time.Time) *IntegrationAttachmentCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return iac + return _c } // SetConfiguration sets the "configuration" field. -func (iac *IntegrationAttachmentCreate) SetConfiguration(b []byte) *IntegrationAttachmentCreate { - iac.mutation.SetConfiguration(b) - return iac +func (_c *IntegrationAttachmentCreate) SetConfiguration(v []byte) *IntegrationAttachmentCreate { + _c.mutation.SetConfiguration(v) + return _c } // SetDeletedAt sets the "deleted_at" field. -func (iac *IntegrationAttachmentCreate) SetDeletedAt(t time.Time) *IntegrationAttachmentCreate { - iac.mutation.SetDeletedAt(t) - return iac +func (_c *IntegrationAttachmentCreate) SetDeletedAt(v time.Time) *IntegrationAttachmentCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (iac *IntegrationAttachmentCreate) SetNillableDeletedAt(t *time.Time) *IntegrationAttachmentCreate { - if t != nil { - iac.SetDeletedAt(*t) +func (_c *IntegrationAttachmentCreate) SetNillableDeletedAt(v *time.Time) *IntegrationAttachmentCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return iac + return _c } // SetWorkflowID sets the "workflow_id" field. -func (iac *IntegrationAttachmentCreate) SetWorkflowID(u uuid.UUID) *IntegrationAttachmentCreate { - iac.mutation.SetWorkflowID(u) - return iac +func (_c *IntegrationAttachmentCreate) SetWorkflowID(v uuid.UUID) *IntegrationAttachmentCreate { + _c.mutation.SetWorkflowID(v) + return _c } // SetID sets the "id" field. -func (iac *IntegrationAttachmentCreate) SetID(u uuid.UUID) *IntegrationAttachmentCreate { - iac.mutation.SetID(u) - return iac +func (_c *IntegrationAttachmentCreate) SetID(v uuid.UUID) *IntegrationAttachmentCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (iac *IntegrationAttachmentCreate) SetNillableID(u *uuid.UUID) *IntegrationAttachmentCreate { - if u != nil { - iac.SetID(*u) +func (_c *IntegrationAttachmentCreate) SetNillableID(v *uuid.UUID) *IntegrationAttachmentCreate { + if v != nil { + _c.SetID(*v) } - return iac + return _c } // SetIntegrationID sets the "integration" edge to the Integration entity by ID. -func (iac *IntegrationAttachmentCreate) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentCreate { - iac.mutation.SetIntegrationID(id) - return iac +func (_c *IntegrationAttachmentCreate) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentCreate { + _c.mutation.SetIntegrationID(id) + return _c } // SetIntegration sets the "integration" edge to the Integration entity. -func (iac *IntegrationAttachmentCreate) SetIntegration(i *Integration) *IntegrationAttachmentCreate { - return iac.SetIntegrationID(i.ID) +func (_c *IntegrationAttachmentCreate) SetIntegration(v *Integration) *IntegrationAttachmentCreate { + return _c.SetIntegrationID(v.ID) } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (iac *IntegrationAttachmentCreate) SetWorkflow(w *Workflow) *IntegrationAttachmentCreate { - return iac.SetWorkflowID(w.ID) +func (_c *IntegrationAttachmentCreate) SetWorkflow(v *Workflow) *IntegrationAttachmentCreate { + return _c.SetWorkflowID(v.ID) } // Mutation returns the IntegrationAttachmentMutation object of the builder. -func (iac *IntegrationAttachmentCreate) Mutation() *IntegrationAttachmentMutation { - return iac.mutation +func (_c *IntegrationAttachmentCreate) Mutation() *IntegrationAttachmentMutation { + return _c.mutation } // Save creates the IntegrationAttachment in the database. -func (iac *IntegrationAttachmentCreate) Save(ctx context.Context) (*IntegrationAttachment, error) { - iac.defaults() - return withHooks(ctx, iac.sqlSave, iac.mutation, iac.hooks) +func (_c *IntegrationAttachmentCreate) Save(ctx context.Context) (*IntegrationAttachment, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (iac *IntegrationAttachmentCreate) SaveX(ctx context.Context) *IntegrationAttachment { - v, err := iac.Save(ctx) +func (_c *IntegrationAttachmentCreate) SaveX(ctx context.Context) *IntegrationAttachment { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -117,53 +117,53 @@ func (iac *IntegrationAttachmentCreate) SaveX(ctx context.Context) *IntegrationA } // Exec executes the query. -func (iac *IntegrationAttachmentCreate) Exec(ctx context.Context) error { - _, err := iac.Save(ctx) +func (_c *IntegrationAttachmentCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iac *IntegrationAttachmentCreate) ExecX(ctx context.Context) { - if err := iac.Exec(ctx); err != nil { +func (_c *IntegrationAttachmentCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (iac *IntegrationAttachmentCreate) defaults() { - if _, ok := iac.mutation.CreatedAt(); !ok { +func (_c *IntegrationAttachmentCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := integrationattachment.DefaultCreatedAt() - iac.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := iac.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := integrationattachment.DefaultID() - iac.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (iac *IntegrationAttachmentCreate) check() error { - if _, ok := iac.mutation.CreatedAt(); !ok { +func (_c *IntegrationAttachmentCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "IntegrationAttachment.created_at"`)} } - if _, ok := iac.mutation.WorkflowID(); !ok { + if _, ok := _c.mutation.WorkflowID(); !ok { return &ValidationError{Name: "workflow_id", err: errors.New(`ent: missing required field "IntegrationAttachment.workflow_id"`)} } - if len(iac.mutation.IntegrationIDs()) == 0 { + if len(_c.mutation.IntegrationIDs()) == 0 { return &ValidationError{Name: "integration", err: errors.New(`ent: missing required edge "IntegrationAttachment.integration"`)} } - if len(iac.mutation.WorkflowIDs()) == 0 { + if len(_c.mutation.WorkflowIDs()) == 0 { return &ValidationError{Name: "workflow", err: errors.New(`ent: missing required edge "IntegrationAttachment.workflow"`)} } return nil } -func (iac *IntegrationAttachmentCreate) sqlSave(ctx context.Context) (*IntegrationAttachment, error) { - if err := iac.check(); err != nil { +func (_c *IntegrationAttachmentCreate) sqlSave(ctx context.Context) (*IntegrationAttachment, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := iac.createSpec() - if err := sqlgraph.CreateNode(ctx, iac.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -176,34 +176,34 @@ func (iac *IntegrationAttachmentCreate) sqlSave(ctx context.Context) (*Integrati return nil, err } } - iac.mutation.id = &_node.ID - iac.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (iac *IntegrationAttachmentCreate) createSpec() (*IntegrationAttachment, *sqlgraph.CreateSpec) { +func (_c *IntegrationAttachmentCreate) createSpec() (*IntegrationAttachment, *sqlgraph.CreateSpec) { var ( - _node = &IntegrationAttachment{config: iac.config} + _node = &IntegrationAttachment{config: _c.config} _spec = sqlgraph.NewCreateSpec(integrationattachment.Table, sqlgraph.NewFieldSpec(integrationattachment.FieldID, field.TypeUUID)) ) - _spec.OnConflict = iac.conflict - if id, ok := iac.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := iac.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(integrationattachment.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := iac.mutation.Configuration(); ok { + if value, ok := _c.mutation.Configuration(); ok { _spec.SetField(integrationattachment.FieldConfiguration, field.TypeBytes, value) _node.Configuration = value } - if value, ok := iac.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(integrationattachment.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if nodes := iac.mutation.IntegrationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.IntegrationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -220,7 +220,7 @@ func (iac *IntegrationAttachmentCreate) createSpec() (*IntegrationAttachment, *s _node.integration_attachment_integration = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := iac.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -256,10 +256,10 @@ func (iac *IntegrationAttachmentCreate) createSpec() (*IntegrationAttachment, *s // SetCreatedAt(v+v). // }). // Exec(ctx) -func (iac *IntegrationAttachmentCreate) OnConflict(opts ...sql.ConflictOption) *IntegrationAttachmentUpsertOne { - iac.conflict = opts +func (_c *IntegrationAttachmentCreate) OnConflict(opts ...sql.ConflictOption) *IntegrationAttachmentUpsertOne { + _c.conflict = opts return &IntegrationAttachmentUpsertOne{ - create: iac, + create: _c, } } @@ -269,10 +269,10 @@ func (iac *IntegrationAttachmentCreate) OnConflict(opts ...sql.ConflictOption) * // client.IntegrationAttachment.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (iac *IntegrationAttachmentCreate) OnConflictColumns(columns ...string) *IntegrationAttachmentUpsertOne { - iac.conflict = append(iac.conflict, sql.ConflictColumns(columns...)) +func (_c *IntegrationAttachmentCreate) OnConflictColumns(columns ...string) *IntegrationAttachmentUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &IntegrationAttachmentUpsertOne{ - create: iac, + create: _c, } } @@ -491,16 +491,16 @@ type IntegrationAttachmentCreateBulk struct { } // Save creates the IntegrationAttachment entities in the database. -func (iacb *IntegrationAttachmentCreateBulk) Save(ctx context.Context) ([]*IntegrationAttachment, error) { - if iacb.err != nil { - return nil, iacb.err - } - specs := make([]*sqlgraph.CreateSpec, len(iacb.builders)) - nodes := make([]*IntegrationAttachment, len(iacb.builders)) - mutators := make([]Mutator, len(iacb.builders)) - for i := range iacb.builders { +func (_c *IntegrationAttachmentCreateBulk) Save(ctx context.Context) ([]*IntegrationAttachment, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*IntegrationAttachment, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := iacb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*IntegrationAttachmentMutation) @@ -514,12 +514,12 @@ func (iacb *IntegrationAttachmentCreateBulk) Save(ctx context.Context) ([]*Integ var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, iacb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = iacb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, iacb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -539,7 +539,7 @@ func (iacb *IntegrationAttachmentCreateBulk) Save(ctx context.Context) ([]*Integ }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, iacb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -547,8 +547,8 @@ func (iacb *IntegrationAttachmentCreateBulk) Save(ctx context.Context) ([]*Integ } // SaveX is like Save, but panics if an error occurs. -func (iacb *IntegrationAttachmentCreateBulk) SaveX(ctx context.Context) []*IntegrationAttachment { - v, err := iacb.Save(ctx) +func (_c *IntegrationAttachmentCreateBulk) SaveX(ctx context.Context) []*IntegrationAttachment { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -556,14 +556,14 @@ func (iacb *IntegrationAttachmentCreateBulk) SaveX(ctx context.Context) []*Integ } // Exec executes the query. -func (iacb *IntegrationAttachmentCreateBulk) Exec(ctx context.Context) error { - _, err := iacb.Save(ctx) +func (_c *IntegrationAttachmentCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iacb *IntegrationAttachmentCreateBulk) ExecX(ctx context.Context) { - if err := iacb.Exec(ctx); err != nil { +func (_c *IntegrationAttachmentCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -583,10 +583,10 @@ func (iacb *IntegrationAttachmentCreateBulk) ExecX(ctx context.Context) { // SetCreatedAt(v+v). // }). // Exec(ctx) -func (iacb *IntegrationAttachmentCreateBulk) OnConflict(opts ...sql.ConflictOption) *IntegrationAttachmentUpsertBulk { - iacb.conflict = opts +func (_c *IntegrationAttachmentCreateBulk) OnConflict(opts ...sql.ConflictOption) *IntegrationAttachmentUpsertBulk { + _c.conflict = opts return &IntegrationAttachmentUpsertBulk{ - create: iacb, + create: _c, } } @@ -596,10 +596,10 @@ func (iacb *IntegrationAttachmentCreateBulk) OnConflict(opts ...sql.ConflictOpti // client.IntegrationAttachment.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (iacb *IntegrationAttachmentCreateBulk) OnConflictColumns(columns ...string) *IntegrationAttachmentUpsertBulk { - iacb.conflict = append(iacb.conflict, sql.ConflictColumns(columns...)) +func (_c *IntegrationAttachmentCreateBulk) OnConflictColumns(columns ...string) *IntegrationAttachmentUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &IntegrationAttachmentUpsertBulk{ - create: iacb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/integrationattachment_delete.go b/app/controlplane/pkg/data/ent/integrationattachment_delete.go index 9462ef1de..614a87539 100644 --- a/app/controlplane/pkg/data/ent/integrationattachment_delete.go +++ b/app/controlplane/pkg/data/ent/integrationattachment_delete.go @@ -20,56 +20,56 @@ type IntegrationAttachmentDelete struct { } // Where appends a list predicates to the IntegrationAttachmentDelete builder. -func (iad *IntegrationAttachmentDelete) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentDelete { - iad.mutation.Where(ps...) - return iad +func (_d *IntegrationAttachmentDelete) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (iad *IntegrationAttachmentDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, iad.sqlExec, iad.mutation, iad.hooks) +func (_d *IntegrationAttachmentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (iad *IntegrationAttachmentDelete) ExecX(ctx context.Context) int { - n, err := iad.Exec(ctx) +func (_d *IntegrationAttachmentDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (iad *IntegrationAttachmentDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *IntegrationAttachmentDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(integrationattachment.Table, sqlgraph.NewFieldSpec(integrationattachment.FieldID, field.TypeUUID)) - if ps := iad.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, iad.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - iad.mutation.done = true + _d.mutation.done = true return affected, err } // IntegrationAttachmentDeleteOne is the builder for deleting a single IntegrationAttachment entity. type IntegrationAttachmentDeleteOne struct { - iad *IntegrationAttachmentDelete + _d *IntegrationAttachmentDelete } // Where appends a list predicates to the IntegrationAttachmentDelete builder. -func (iado *IntegrationAttachmentDeleteOne) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentDeleteOne { - iado.iad.mutation.Where(ps...) - return iado +func (_d *IntegrationAttachmentDeleteOne) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (iado *IntegrationAttachmentDeleteOne) Exec(ctx context.Context) error { - n, err := iado.iad.Exec(ctx) +func (_d *IntegrationAttachmentDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (iado *IntegrationAttachmentDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (iado *IntegrationAttachmentDeleteOne) ExecX(ctx context.Context) { - if err := iado.Exec(ctx); err != nil { +func (_d *IntegrationAttachmentDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/integrationattachment_query.go b/app/controlplane/pkg/data/ent/integrationattachment_query.go index e24c11300..1215a2f11 100644 --- a/app/controlplane/pkg/data/ent/integrationattachment_query.go +++ b/app/controlplane/pkg/data/ent/integrationattachment_query.go @@ -36,44 +36,44 @@ type IntegrationAttachmentQuery struct { } // Where adds a new predicate for the IntegrationAttachmentQuery builder. -func (iaq *IntegrationAttachmentQuery) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentQuery { - iaq.predicates = append(iaq.predicates, ps...) - return iaq +func (_q *IntegrationAttachmentQuery) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (iaq *IntegrationAttachmentQuery) Limit(limit int) *IntegrationAttachmentQuery { - iaq.ctx.Limit = &limit - return iaq +func (_q *IntegrationAttachmentQuery) Limit(limit int) *IntegrationAttachmentQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (iaq *IntegrationAttachmentQuery) Offset(offset int) *IntegrationAttachmentQuery { - iaq.ctx.Offset = &offset - return iaq +func (_q *IntegrationAttachmentQuery) Offset(offset int) *IntegrationAttachmentQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (iaq *IntegrationAttachmentQuery) Unique(unique bool) *IntegrationAttachmentQuery { - iaq.ctx.Unique = &unique - return iaq +func (_q *IntegrationAttachmentQuery) Unique(unique bool) *IntegrationAttachmentQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (iaq *IntegrationAttachmentQuery) Order(o ...integrationattachment.OrderOption) *IntegrationAttachmentQuery { - iaq.order = append(iaq.order, o...) - return iaq +func (_q *IntegrationAttachmentQuery) Order(o ...integrationattachment.OrderOption) *IntegrationAttachmentQuery { + _q.order = append(_q.order, o...) + return _q } // QueryIntegration chains the current query on the "integration" edge. -func (iaq *IntegrationAttachmentQuery) QueryIntegration() *IntegrationQuery { - query := (&IntegrationClient{config: iaq.config}).Query() +func (_q *IntegrationAttachmentQuery) QueryIntegration() *IntegrationQuery { + query := (&IntegrationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := iaq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := iaq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -82,20 +82,20 @@ func (iaq *IntegrationAttachmentQuery) QueryIntegration() *IntegrationQuery { sqlgraph.To(integration.Table, integration.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, integrationattachment.IntegrationTable, integrationattachment.IntegrationColumn), ) - fromU = sqlgraph.SetNeighbors(iaq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflow chains the current query on the "workflow" edge. -func (iaq *IntegrationAttachmentQuery) QueryWorkflow() *WorkflowQuery { - query := (&WorkflowClient{config: iaq.config}).Query() +func (_q *IntegrationAttachmentQuery) QueryWorkflow() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := iaq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := iaq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -104,7 +104,7 @@ func (iaq *IntegrationAttachmentQuery) QueryWorkflow() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, integrationattachment.WorkflowTable, integrationattachment.WorkflowColumn), ) - fromU = sqlgraph.SetNeighbors(iaq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -112,8 +112,8 @@ func (iaq *IntegrationAttachmentQuery) QueryWorkflow() *WorkflowQuery { // First returns the first IntegrationAttachment entity from the query. // Returns a *NotFoundError when no IntegrationAttachment was found. -func (iaq *IntegrationAttachmentQuery) First(ctx context.Context) (*IntegrationAttachment, error) { - nodes, err := iaq.Limit(1).All(setContextOp(ctx, iaq.ctx, ent.OpQueryFirst)) +func (_q *IntegrationAttachmentQuery) First(ctx context.Context) (*IntegrationAttachment, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -124,8 +124,8 @@ func (iaq *IntegrationAttachmentQuery) First(ctx context.Context) (*IntegrationA } // FirstX is like First, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) FirstX(ctx context.Context) *IntegrationAttachment { - node, err := iaq.First(ctx) +func (_q *IntegrationAttachmentQuery) FirstX(ctx context.Context) *IntegrationAttachment { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,9 +134,9 @@ func (iaq *IntegrationAttachmentQuery) FirstX(ctx context.Context) *IntegrationA // FirstID returns the first IntegrationAttachment ID from the query. // Returns a *NotFoundError when no IntegrationAttachment ID was found. -func (iaq *IntegrationAttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *IntegrationAttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iaq.Limit(1).IDs(setContextOp(ctx, iaq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -147,8 +147,8 @@ func (iaq *IntegrationAttachmentQuery) FirstID(ctx context.Context) (id uuid.UUI } // FirstIDX is like FirstID, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := iaq.FirstID(ctx) +func (_q *IntegrationAttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,8 +158,8 @@ func (iaq *IntegrationAttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single IntegrationAttachment entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one IntegrationAttachment entity is found. // Returns a *NotFoundError when no IntegrationAttachment entities are found. -func (iaq *IntegrationAttachmentQuery) Only(ctx context.Context) (*IntegrationAttachment, error) { - nodes, err := iaq.Limit(2).All(setContextOp(ctx, iaq.ctx, ent.OpQueryOnly)) +func (_q *IntegrationAttachmentQuery) Only(ctx context.Context) (*IntegrationAttachment, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -174,8 +174,8 @@ func (iaq *IntegrationAttachmentQuery) Only(ctx context.Context) (*IntegrationAt } // OnlyX is like Only, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) OnlyX(ctx context.Context) *IntegrationAttachment { - node, err := iaq.Only(ctx) +func (_q *IntegrationAttachmentQuery) OnlyX(ctx context.Context) *IntegrationAttachment { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -185,9 +185,9 @@ func (iaq *IntegrationAttachmentQuery) OnlyX(ctx context.Context) *IntegrationAt // OnlyID is like Only, but returns the only IntegrationAttachment ID in the query. // Returns a *NotSingularError when more than one IntegrationAttachment ID is found. // Returns a *NotFoundError when no entities are found. -func (iaq *IntegrationAttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *IntegrationAttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iaq.Limit(2).IDs(setContextOp(ctx, iaq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -202,8 +202,8 @@ func (iaq *IntegrationAttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := iaq.OnlyID(ctx) +func (_q *IntegrationAttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -211,18 +211,18 @@ func (iaq *IntegrationAttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of IntegrationAttachments. -func (iaq *IntegrationAttachmentQuery) All(ctx context.Context) ([]*IntegrationAttachment, error) { - ctx = setContextOp(ctx, iaq.ctx, ent.OpQueryAll) - if err := iaq.prepareQuery(ctx); err != nil { +func (_q *IntegrationAttachmentQuery) All(ctx context.Context) ([]*IntegrationAttachment, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*IntegrationAttachment, *IntegrationAttachmentQuery]() - return withInterceptors[[]*IntegrationAttachment](ctx, iaq, qr, iaq.inters) + return withInterceptors[[]*IntegrationAttachment](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) AllX(ctx context.Context) []*IntegrationAttachment { - nodes, err := iaq.All(ctx) +func (_q *IntegrationAttachmentQuery) AllX(ctx context.Context) []*IntegrationAttachment { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -230,20 +230,20 @@ func (iaq *IntegrationAttachmentQuery) AllX(ctx context.Context) []*IntegrationA } // IDs executes the query and returns a list of IntegrationAttachment IDs. -func (iaq *IntegrationAttachmentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if iaq.ctx.Unique == nil && iaq.path != nil { - iaq.Unique(true) +func (_q *IntegrationAttachmentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, iaq.ctx, ent.OpQueryIDs) - if err = iaq.Select(integrationattachment.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(integrationattachment.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := iaq.IDs(ctx) +func (_q *IntegrationAttachmentQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -251,17 +251,17 @@ func (iaq *IntegrationAttachmentQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (iaq *IntegrationAttachmentQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, iaq.ctx, ent.OpQueryCount) - if err := iaq.prepareQuery(ctx); err != nil { +func (_q *IntegrationAttachmentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, iaq, querierCount[*IntegrationAttachmentQuery](), iaq.inters) + return withInterceptors[int](ctx, _q, querierCount[*IntegrationAttachmentQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) CountX(ctx context.Context) int { - count, err := iaq.Count(ctx) +func (_q *IntegrationAttachmentQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -269,9 +269,9 @@ func (iaq *IntegrationAttachmentQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (iaq *IntegrationAttachmentQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, iaq.ctx, ent.OpQueryExist) - switch _, err := iaq.FirstID(ctx); { +func (_q *IntegrationAttachmentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -282,8 +282,8 @@ func (iaq *IntegrationAttachmentQuery) Exist(ctx context.Context) (bool, error) } // ExistX is like Exist, but panics if an error occurs. -func (iaq *IntegrationAttachmentQuery) ExistX(ctx context.Context) bool { - exist, err := iaq.Exist(ctx) +func (_q *IntegrationAttachmentQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -292,45 +292,45 @@ func (iaq *IntegrationAttachmentQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the IntegrationAttachmentQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (iaq *IntegrationAttachmentQuery) Clone() *IntegrationAttachmentQuery { - if iaq == nil { +func (_q *IntegrationAttachmentQuery) Clone() *IntegrationAttachmentQuery { + if _q == nil { return nil } return &IntegrationAttachmentQuery{ - config: iaq.config, - ctx: iaq.ctx.Clone(), - order: append([]integrationattachment.OrderOption{}, iaq.order...), - inters: append([]Interceptor{}, iaq.inters...), - predicates: append([]predicate.IntegrationAttachment{}, iaq.predicates...), - withIntegration: iaq.withIntegration.Clone(), - withWorkflow: iaq.withWorkflow.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]integrationattachment.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.IntegrationAttachment{}, _q.predicates...), + withIntegration: _q.withIntegration.Clone(), + withWorkflow: _q.withWorkflow.Clone(), // clone intermediate query. - sql: iaq.sql.Clone(), - path: iaq.path, - modifiers: append([]func(*sql.Selector){}, iaq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithIntegration tells the query-builder to eager-load the nodes that are connected to // the "integration" edge. The optional arguments are used to configure the query builder of the edge. -func (iaq *IntegrationAttachmentQuery) WithIntegration(opts ...func(*IntegrationQuery)) *IntegrationAttachmentQuery { - query := (&IntegrationClient{config: iaq.config}).Query() +func (_q *IntegrationAttachmentQuery) WithIntegration(opts ...func(*IntegrationQuery)) *IntegrationAttachmentQuery { + query := (&IntegrationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - iaq.withIntegration = query - return iaq + _q.withIntegration = query + return _q } // WithWorkflow tells the query-builder to eager-load the nodes that are connected to // the "workflow" edge. The optional arguments are used to configure the query builder of the edge. -func (iaq *IntegrationAttachmentQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *IntegrationAttachmentQuery { - query := (&WorkflowClient{config: iaq.config}).Query() +func (_q *IntegrationAttachmentQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *IntegrationAttachmentQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - iaq.withWorkflow = query - return iaq + _q.withWorkflow = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -347,10 +347,10 @@ func (iaq *IntegrationAttachmentQuery) WithWorkflow(opts ...func(*WorkflowQuery) // GroupBy(integrationattachment.FieldCreatedAt). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (iaq *IntegrationAttachmentQuery) GroupBy(field string, fields ...string) *IntegrationAttachmentGroupBy { - iaq.ctx.Fields = append([]string{field}, fields...) - grbuild := &IntegrationAttachmentGroupBy{build: iaq} - grbuild.flds = &iaq.ctx.Fields +func (_q *IntegrationAttachmentQuery) GroupBy(field string, fields ...string) *IntegrationAttachmentGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &IntegrationAttachmentGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = integrationattachment.Label grbuild.scan = grbuild.Scan return grbuild @@ -368,56 +368,56 @@ func (iaq *IntegrationAttachmentQuery) GroupBy(field string, fields ...string) * // client.IntegrationAttachment.Query(). // Select(integrationattachment.FieldCreatedAt). // Scan(ctx, &v) -func (iaq *IntegrationAttachmentQuery) Select(fields ...string) *IntegrationAttachmentSelect { - iaq.ctx.Fields = append(iaq.ctx.Fields, fields...) - sbuild := &IntegrationAttachmentSelect{IntegrationAttachmentQuery: iaq} +func (_q *IntegrationAttachmentQuery) Select(fields ...string) *IntegrationAttachmentSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &IntegrationAttachmentSelect{IntegrationAttachmentQuery: _q} sbuild.label = integrationattachment.Label - sbuild.flds, sbuild.scan = &iaq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a IntegrationAttachmentSelect configured with the given aggregations. -func (iaq *IntegrationAttachmentQuery) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentSelect { - return iaq.Select().Aggregate(fns...) +func (_q *IntegrationAttachmentQuery) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentSelect { + return _q.Select().Aggregate(fns...) } -func (iaq *IntegrationAttachmentQuery) prepareQuery(ctx context.Context) error { - for _, inter := range iaq.inters { +func (_q *IntegrationAttachmentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, iaq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range iaq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !integrationattachment.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if iaq.path != nil { - prev, err := iaq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - iaq.sql = prev + _q.sql = prev } return nil } -func (iaq *IntegrationAttachmentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IntegrationAttachment, error) { +func (_q *IntegrationAttachmentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IntegrationAttachment, error) { var ( nodes = []*IntegrationAttachment{} - withFKs = iaq.withFKs - _spec = iaq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [2]bool{ - iaq.withIntegration != nil, - iaq.withWorkflow != nil, + _q.withIntegration != nil, + _q.withWorkflow != nil, } ) - if iaq.withIntegration != nil { + if _q.withIntegration != nil { withFKs = true } if withFKs { @@ -427,31 +427,31 @@ func (iaq *IntegrationAttachmentQuery) sqlAll(ctx context.Context, hooks ...quer return (*IntegrationAttachment).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &IntegrationAttachment{config: iaq.config} + node := &IntegrationAttachment{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(iaq.modifiers) > 0 { - _spec.Modifiers = iaq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, iaq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := iaq.withIntegration; query != nil { - if err := iaq.loadIntegration(ctx, query, nodes, nil, + if query := _q.withIntegration; query != nil { + if err := _q.loadIntegration(ctx, query, nodes, nil, func(n *IntegrationAttachment, e *Integration) { n.Edges.Integration = e }); err != nil { return nil, err } } - if query := iaq.withWorkflow; query != nil { - if err := iaq.loadWorkflow(ctx, query, nodes, nil, + if query := _q.withWorkflow; query != nil { + if err := _q.loadWorkflow(ctx, query, nodes, nil, func(n *IntegrationAttachment, e *Workflow) { n.Edges.Workflow = e }); err != nil { return nil, err } @@ -459,7 +459,7 @@ func (iaq *IntegrationAttachmentQuery) sqlAll(ctx context.Context, hooks ...quer return nodes, nil } -func (iaq *IntegrationAttachmentQuery) loadIntegration(ctx context.Context, query *IntegrationQuery, nodes []*IntegrationAttachment, init func(*IntegrationAttachment), assign func(*IntegrationAttachment, *Integration)) error { +func (_q *IntegrationAttachmentQuery) loadIntegration(ctx context.Context, query *IntegrationQuery, nodes []*IntegrationAttachment, init func(*IntegrationAttachment), assign func(*IntegrationAttachment, *Integration)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*IntegrationAttachment) for i := range nodes { @@ -491,7 +491,7 @@ func (iaq *IntegrationAttachmentQuery) loadIntegration(ctx context.Context, quer } return nil } -func (iaq *IntegrationAttachmentQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*IntegrationAttachment, init func(*IntegrationAttachment), assign func(*IntegrationAttachment, *Workflow)) error { +func (_q *IntegrationAttachmentQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*IntegrationAttachment, init func(*IntegrationAttachment), assign func(*IntegrationAttachment, *Workflow)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*IntegrationAttachment) for i := range nodes { @@ -521,27 +521,27 @@ func (iaq *IntegrationAttachmentQuery) loadWorkflow(ctx context.Context, query * return nil } -func (iaq *IntegrationAttachmentQuery) sqlCount(ctx context.Context) (int, error) { - _spec := iaq.querySpec() - if len(iaq.modifiers) > 0 { - _spec.Modifiers = iaq.modifiers +func (_q *IntegrationAttachmentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = iaq.ctx.Fields - if len(iaq.ctx.Fields) > 0 { - _spec.Unique = iaq.ctx.Unique != nil && *iaq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, iaq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (iaq *IntegrationAttachmentQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *IntegrationAttachmentQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(integrationattachment.Table, integrationattachment.Columns, sqlgraph.NewFieldSpec(integrationattachment.FieldID, field.TypeUUID)) - _spec.From = iaq.sql - if unique := iaq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if iaq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := iaq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, integrationattachment.FieldID) for i := range fields { @@ -549,24 +549,24 @@ func (iaq *IntegrationAttachmentQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if iaq.withWorkflow != nil { + if _q.withWorkflow != nil { _spec.Node.AddColumnOnce(integrationattachment.FieldWorkflowID) } } - if ps := iaq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := iaq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := iaq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := iaq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -576,36 +576,36 @@ func (iaq *IntegrationAttachmentQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (iaq *IntegrationAttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(iaq.driver.Dialect()) +func (_q *IntegrationAttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(integrationattachment.Table) - columns := iaq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = integrationattachment.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if iaq.sql != nil { - selector = iaq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if iaq.ctx.Unique != nil && *iaq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range iaq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range iaq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range iaq.order { + for _, p := range _q.order { p(selector) } - if offset := iaq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := iaq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -614,33 +614,33 @@ func (iaq *IntegrationAttachmentQuery) sqlQuery(ctx context.Context) *sql.Select // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (iaq *IntegrationAttachmentQuery) ForUpdate(opts ...sql.LockOption) *IntegrationAttachmentQuery { - if iaq.driver.Dialect() == dialect.Postgres { - iaq.Unique(false) +func (_q *IntegrationAttachmentQuery) ForUpdate(opts ...sql.LockOption) *IntegrationAttachmentQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - iaq.modifiers = append(iaq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return iaq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (iaq *IntegrationAttachmentQuery) ForShare(opts ...sql.LockOption) *IntegrationAttachmentQuery { - if iaq.driver.Dialect() == dialect.Postgres { - iaq.Unique(false) +func (_q *IntegrationAttachmentQuery) ForShare(opts ...sql.LockOption) *IntegrationAttachmentQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - iaq.modifiers = append(iaq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return iaq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (iaq *IntegrationAttachmentQuery) Modify(modifiers ...func(s *sql.Selector)) *IntegrationAttachmentSelect { - iaq.modifiers = append(iaq.modifiers, modifiers...) - return iaq.Select() +func (_q *IntegrationAttachmentQuery) Modify(modifiers ...func(s *sql.Selector)) *IntegrationAttachmentSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // IntegrationAttachmentGroupBy is the group-by builder for IntegrationAttachment entities. @@ -650,41 +650,41 @@ type IntegrationAttachmentGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (iagb *IntegrationAttachmentGroupBy) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentGroupBy { - iagb.fns = append(iagb.fns, fns...) - return iagb +func (_g *IntegrationAttachmentGroupBy) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (iagb *IntegrationAttachmentGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, iagb.build.ctx, ent.OpQueryGroupBy) - if err := iagb.build.prepareQuery(ctx); err != nil { +func (_g *IntegrationAttachmentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*IntegrationAttachmentQuery, *IntegrationAttachmentGroupBy](ctx, iagb.build, iagb, iagb.build.inters, v) + return scanWithInterceptors[*IntegrationAttachmentQuery, *IntegrationAttachmentGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (iagb *IntegrationAttachmentGroupBy) sqlScan(ctx context.Context, root *IntegrationAttachmentQuery, v any) error { +func (_g *IntegrationAttachmentGroupBy) sqlScan(ctx context.Context, root *IntegrationAttachmentQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(iagb.fns)) - for _, fn := range iagb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*iagb.flds)+len(iagb.fns)) - for _, f := range *iagb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*iagb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := iagb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -698,27 +698,27 @@ type IntegrationAttachmentSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ias *IntegrationAttachmentSelect) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentSelect { - ias.fns = append(ias.fns, fns...) - return ias +func (_s *IntegrationAttachmentSelect) Aggregate(fns ...AggregateFunc) *IntegrationAttachmentSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ias *IntegrationAttachmentSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ias.ctx, ent.OpQuerySelect) - if err := ias.prepareQuery(ctx); err != nil { +func (_s *IntegrationAttachmentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*IntegrationAttachmentQuery, *IntegrationAttachmentSelect](ctx, ias.IntegrationAttachmentQuery, ias, ias.inters, v) + return scanWithInterceptors[*IntegrationAttachmentQuery, *IntegrationAttachmentSelect](ctx, _s.IntegrationAttachmentQuery, _s, _s.inters, v) } -func (ias *IntegrationAttachmentSelect) sqlScan(ctx context.Context, root *IntegrationAttachmentQuery, v any) error { +func (_s *IntegrationAttachmentSelect) sqlScan(ctx context.Context, root *IntegrationAttachmentQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ias.fns)) - for _, fn := range ias.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ias.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -726,7 +726,7 @@ func (ias *IntegrationAttachmentSelect) sqlScan(ctx context.Context, root *Integ } rows := &sql.Rows{} query, args := selector.Query() - if err := ias.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -734,7 +734,7 @@ func (ias *IntegrationAttachmentSelect) sqlScan(ctx context.Context, root *Integ } // Modify adds a query modifier for attaching custom logic to queries. -func (ias *IntegrationAttachmentSelect) Modify(modifiers ...func(s *sql.Selector)) *IntegrationAttachmentSelect { - ias.modifiers = append(ias.modifiers, modifiers...) - return ias +func (_s *IntegrationAttachmentSelect) Modify(modifiers ...func(s *sql.Selector)) *IntegrationAttachmentSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/integrationattachment_update.go b/app/controlplane/pkg/data/ent/integrationattachment_update.go index e83ac0a75..dd9db699b 100644 --- a/app/controlplane/pkg/data/ent/integrationattachment_update.go +++ b/app/controlplane/pkg/data/ent/integrationattachment_update.go @@ -27,98 +27,98 @@ type IntegrationAttachmentUpdate struct { } // Where appends a list predicates to the IntegrationAttachmentUpdate builder. -func (iau *IntegrationAttachmentUpdate) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentUpdate { - iau.mutation.Where(ps...) - return iau +func (_u *IntegrationAttachmentUpdate) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentUpdate { + _u.mutation.Where(ps...) + return _u } // SetConfiguration sets the "configuration" field. -func (iau *IntegrationAttachmentUpdate) SetConfiguration(b []byte) *IntegrationAttachmentUpdate { - iau.mutation.SetConfiguration(b) - return iau +func (_u *IntegrationAttachmentUpdate) SetConfiguration(v []byte) *IntegrationAttachmentUpdate { + _u.mutation.SetConfiguration(v) + return _u } // ClearConfiguration clears the value of the "configuration" field. -func (iau *IntegrationAttachmentUpdate) ClearConfiguration() *IntegrationAttachmentUpdate { - iau.mutation.ClearConfiguration() - return iau +func (_u *IntegrationAttachmentUpdate) ClearConfiguration() *IntegrationAttachmentUpdate { + _u.mutation.ClearConfiguration() + return _u } // SetDeletedAt sets the "deleted_at" field. -func (iau *IntegrationAttachmentUpdate) SetDeletedAt(t time.Time) *IntegrationAttachmentUpdate { - iau.mutation.SetDeletedAt(t) - return iau +func (_u *IntegrationAttachmentUpdate) SetDeletedAt(v time.Time) *IntegrationAttachmentUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (iau *IntegrationAttachmentUpdate) SetNillableDeletedAt(t *time.Time) *IntegrationAttachmentUpdate { - if t != nil { - iau.SetDeletedAt(*t) +func (_u *IntegrationAttachmentUpdate) SetNillableDeletedAt(v *time.Time) *IntegrationAttachmentUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return iau + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (iau *IntegrationAttachmentUpdate) ClearDeletedAt() *IntegrationAttachmentUpdate { - iau.mutation.ClearDeletedAt() - return iau +func (_u *IntegrationAttachmentUpdate) ClearDeletedAt() *IntegrationAttachmentUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetWorkflowID sets the "workflow_id" field. -func (iau *IntegrationAttachmentUpdate) SetWorkflowID(u uuid.UUID) *IntegrationAttachmentUpdate { - iau.mutation.SetWorkflowID(u) - return iau +func (_u *IntegrationAttachmentUpdate) SetWorkflowID(v uuid.UUID) *IntegrationAttachmentUpdate { + _u.mutation.SetWorkflowID(v) + return _u } // SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. -func (iau *IntegrationAttachmentUpdate) SetNillableWorkflowID(u *uuid.UUID) *IntegrationAttachmentUpdate { - if u != nil { - iau.SetWorkflowID(*u) +func (_u *IntegrationAttachmentUpdate) SetNillableWorkflowID(v *uuid.UUID) *IntegrationAttachmentUpdate { + if v != nil { + _u.SetWorkflowID(*v) } - return iau + return _u } // SetIntegrationID sets the "integration" edge to the Integration entity by ID. -func (iau *IntegrationAttachmentUpdate) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentUpdate { - iau.mutation.SetIntegrationID(id) - return iau +func (_u *IntegrationAttachmentUpdate) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentUpdate { + _u.mutation.SetIntegrationID(id) + return _u } // SetIntegration sets the "integration" edge to the Integration entity. -func (iau *IntegrationAttachmentUpdate) SetIntegration(i *Integration) *IntegrationAttachmentUpdate { - return iau.SetIntegrationID(i.ID) +func (_u *IntegrationAttachmentUpdate) SetIntegration(v *Integration) *IntegrationAttachmentUpdate { + return _u.SetIntegrationID(v.ID) } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (iau *IntegrationAttachmentUpdate) SetWorkflow(w *Workflow) *IntegrationAttachmentUpdate { - return iau.SetWorkflowID(w.ID) +func (_u *IntegrationAttachmentUpdate) SetWorkflow(v *Workflow) *IntegrationAttachmentUpdate { + return _u.SetWorkflowID(v.ID) } // Mutation returns the IntegrationAttachmentMutation object of the builder. -func (iau *IntegrationAttachmentUpdate) Mutation() *IntegrationAttachmentMutation { - return iau.mutation +func (_u *IntegrationAttachmentUpdate) Mutation() *IntegrationAttachmentMutation { + return _u.mutation } // ClearIntegration clears the "integration" edge to the Integration entity. -func (iau *IntegrationAttachmentUpdate) ClearIntegration() *IntegrationAttachmentUpdate { - iau.mutation.ClearIntegration() - return iau +func (_u *IntegrationAttachmentUpdate) ClearIntegration() *IntegrationAttachmentUpdate { + _u.mutation.ClearIntegration() + return _u } // ClearWorkflow clears the "workflow" edge to the Workflow entity. -func (iau *IntegrationAttachmentUpdate) ClearWorkflow() *IntegrationAttachmentUpdate { - iau.mutation.ClearWorkflow() - return iau +func (_u *IntegrationAttachmentUpdate) ClearWorkflow() *IntegrationAttachmentUpdate { + _u.mutation.ClearWorkflow() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (iau *IntegrationAttachmentUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, iau.sqlSave, iau.mutation, iau.hooks) +func (_u *IntegrationAttachmentUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (iau *IntegrationAttachmentUpdate) SaveX(ctx context.Context) int { - affected, err := iau.Save(ctx) +func (_u *IntegrationAttachmentUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -126,60 +126,60 @@ func (iau *IntegrationAttachmentUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (iau *IntegrationAttachmentUpdate) Exec(ctx context.Context) error { - _, err := iau.Save(ctx) +func (_u *IntegrationAttachmentUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iau *IntegrationAttachmentUpdate) ExecX(ctx context.Context) { - if err := iau.Exec(ctx); err != nil { +func (_u *IntegrationAttachmentUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (iau *IntegrationAttachmentUpdate) check() error { - if iau.mutation.IntegrationCleared() && len(iau.mutation.IntegrationIDs()) > 0 { +func (_u *IntegrationAttachmentUpdate) check() error { + if _u.mutation.IntegrationCleared() && len(_u.mutation.IntegrationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "IntegrationAttachment.integration"`) } - if iau.mutation.WorkflowCleared() && len(iau.mutation.WorkflowIDs()) > 0 { + if _u.mutation.WorkflowCleared() && len(_u.mutation.WorkflowIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "IntegrationAttachment.workflow"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (iau *IntegrationAttachmentUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationAttachmentUpdate { - iau.modifiers = append(iau.modifiers, modifiers...) - return iau +func (_u *IntegrationAttachmentUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationAttachmentUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := iau.check(); err != nil { - return n, err +func (_u *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(integrationattachment.Table, integrationattachment.Columns, sqlgraph.NewFieldSpec(integrationattachment.FieldID, field.TypeUUID)) - if ps := iau.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := iau.mutation.Configuration(); ok { + if value, ok := _u.mutation.Configuration(); ok { _spec.SetField(integrationattachment.FieldConfiguration, field.TypeBytes, value) } - if iau.mutation.ConfigurationCleared() { + if _u.mutation.ConfigurationCleared() { _spec.ClearField(integrationattachment.FieldConfiguration, field.TypeBytes) } - if value, ok := iau.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(integrationattachment.FieldDeletedAt, field.TypeTime, value) } - if iau.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(integrationattachment.FieldDeletedAt, field.TypeTime) } - if iau.mutation.IntegrationCleared() { + if _u.mutation.IntegrationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -192,7 +192,7 @@ func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iau.mutation.IntegrationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -208,7 +208,7 @@ func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if iau.mutation.WorkflowCleared() { + if _u.mutation.WorkflowCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -221,7 +221,7 @@ func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iau.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -237,8 +237,8 @@ func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(iau.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, iau.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{integrationattachment.Label} } else if sqlgraph.IsConstraintError(err) { @@ -246,8 +246,8 @@ func (iau *IntegrationAttachmentUpdate) sqlSave(ctx context.Context) (n int, err } return 0, err } - iau.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // IntegrationAttachmentUpdateOne is the builder for updating a single IntegrationAttachment entity. @@ -260,105 +260,105 @@ type IntegrationAttachmentUpdateOne struct { } // SetConfiguration sets the "configuration" field. -func (iauo *IntegrationAttachmentUpdateOne) SetConfiguration(b []byte) *IntegrationAttachmentUpdateOne { - iauo.mutation.SetConfiguration(b) - return iauo +func (_u *IntegrationAttachmentUpdateOne) SetConfiguration(v []byte) *IntegrationAttachmentUpdateOne { + _u.mutation.SetConfiguration(v) + return _u } // ClearConfiguration clears the value of the "configuration" field. -func (iauo *IntegrationAttachmentUpdateOne) ClearConfiguration() *IntegrationAttachmentUpdateOne { - iauo.mutation.ClearConfiguration() - return iauo +func (_u *IntegrationAttachmentUpdateOne) ClearConfiguration() *IntegrationAttachmentUpdateOne { + _u.mutation.ClearConfiguration() + return _u } // SetDeletedAt sets the "deleted_at" field. -func (iauo *IntegrationAttachmentUpdateOne) SetDeletedAt(t time.Time) *IntegrationAttachmentUpdateOne { - iauo.mutation.SetDeletedAt(t) - return iauo +func (_u *IntegrationAttachmentUpdateOne) SetDeletedAt(v time.Time) *IntegrationAttachmentUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (iauo *IntegrationAttachmentUpdateOne) SetNillableDeletedAt(t *time.Time) *IntegrationAttachmentUpdateOne { - if t != nil { - iauo.SetDeletedAt(*t) +func (_u *IntegrationAttachmentUpdateOne) SetNillableDeletedAt(v *time.Time) *IntegrationAttachmentUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return iauo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (iauo *IntegrationAttachmentUpdateOne) ClearDeletedAt() *IntegrationAttachmentUpdateOne { - iauo.mutation.ClearDeletedAt() - return iauo +func (_u *IntegrationAttachmentUpdateOne) ClearDeletedAt() *IntegrationAttachmentUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetWorkflowID sets the "workflow_id" field. -func (iauo *IntegrationAttachmentUpdateOne) SetWorkflowID(u uuid.UUID) *IntegrationAttachmentUpdateOne { - iauo.mutation.SetWorkflowID(u) - return iauo +func (_u *IntegrationAttachmentUpdateOne) SetWorkflowID(v uuid.UUID) *IntegrationAttachmentUpdateOne { + _u.mutation.SetWorkflowID(v) + return _u } // SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. -func (iauo *IntegrationAttachmentUpdateOne) SetNillableWorkflowID(u *uuid.UUID) *IntegrationAttachmentUpdateOne { - if u != nil { - iauo.SetWorkflowID(*u) +func (_u *IntegrationAttachmentUpdateOne) SetNillableWorkflowID(v *uuid.UUID) *IntegrationAttachmentUpdateOne { + if v != nil { + _u.SetWorkflowID(*v) } - return iauo + return _u } // SetIntegrationID sets the "integration" edge to the Integration entity by ID. -func (iauo *IntegrationAttachmentUpdateOne) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentUpdateOne { - iauo.mutation.SetIntegrationID(id) - return iauo +func (_u *IntegrationAttachmentUpdateOne) SetIntegrationID(id uuid.UUID) *IntegrationAttachmentUpdateOne { + _u.mutation.SetIntegrationID(id) + return _u } // SetIntegration sets the "integration" edge to the Integration entity. -func (iauo *IntegrationAttachmentUpdateOne) SetIntegration(i *Integration) *IntegrationAttachmentUpdateOne { - return iauo.SetIntegrationID(i.ID) +func (_u *IntegrationAttachmentUpdateOne) SetIntegration(v *Integration) *IntegrationAttachmentUpdateOne { + return _u.SetIntegrationID(v.ID) } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (iauo *IntegrationAttachmentUpdateOne) SetWorkflow(w *Workflow) *IntegrationAttachmentUpdateOne { - return iauo.SetWorkflowID(w.ID) +func (_u *IntegrationAttachmentUpdateOne) SetWorkflow(v *Workflow) *IntegrationAttachmentUpdateOne { + return _u.SetWorkflowID(v.ID) } // Mutation returns the IntegrationAttachmentMutation object of the builder. -func (iauo *IntegrationAttachmentUpdateOne) Mutation() *IntegrationAttachmentMutation { - return iauo.mutation +func (_u *IntegrationAttachmentUpdateOne) Mutation() *IntegrationAttachmentMutation { + return _u.mutation } // ClearIntegration clears the "integration" edge to the Integration entity. -func (iauo *IntegrationAttachmentUpdateOne) ClearIntegration() *IntegrationAttachmentUpdateOne { - iauo.mutation.ClearIntegration() - return iauo +func (_u *IntegrationAttachmentUpdateOne) ClearIntegration() *IntegrationAttachmentUpdateOne { + _u.mutation.ClearIntegration() + return _u } // ClearWorkflow clears the "workflow" edge to the Workflow entity. -func (iauo *IntegrationAttachmentUpdateOne) ClearWorkflow() *IntegrationAttachmentUpdateOne { - iauo.mutation.ClearWorkflow() - return iauo +func (_u *IntegrationAttachmentUpdateOne) ClearWorkflow() *IntegrationAttachmentUpdateOne { + _u.mutation.ClearWorkflow() + return _u } // Where appends a list predicates to the IntegrationAttachmentUpdate builder. -func (iauo *IntegrationAttachmentUpdateOne) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentUpdateOne { - iauo.mutation.Where(ps...) - return iauo +func (_u *IntegrationAttachmentUpdateOne) Where(ps ...predicate.IntegrationAttachment) *IntegrationAttachmentUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (iauo *IntegrationAttachmentUpdateOne) Select(field string, fields ...string) *IntegrationAttachmentUpdateOne { - iauo.fields = append([]string{field}, fields...) - return iauo +func (_u *IntegrationAttachmentUpdateOne) Select(field string, fields ...string) *IntegrationAttachmentUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated IntegrationAttachment entity. -func (iauo *IntegrationAttachmentUpdateOne) Save(ctx context.Context) (*IntegrationAttachment, error) { - return withHooks(ctx, iauo.sqlSave, iauo.mutation, iauo.hooks) +func (_u *IntegrationAttachmentUpdateOne) Save(ctx context.Context) (*IntegrationAttachment, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (iauo *IntegrationAttachmentUpdateOne) SaveX(ctx context.Context) *IntegrationAttachment { - node, err := iauo.Save(ctx) +func (_u *IntegrationAttachmentUpdateOne) SaveX(ctx context.Context) *IntegrationAttachment { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -366,46 +366,46 @@ func (iauo *IntegrationAttachmentUpdateOne) SaveX(ctx context.Context) *Integrat } // Exec executes the query on the entity. -func (iauo *IntegrationAttachmentUpdateOne) Exec(ctx context.Context) error { - _, err := iauo.Save(ctx) +func (_u *IntegrationAttachmentUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (iauo *IntegrationAttachmentUpdateOne) ExecX(ctx context.Context) { - if err := iauo.Exec(ctx); err != nil { +func (_u *IntegrationAttachmentUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (iauo *IntegrationAttachmentUpdateOne) check() error { - if iauo.mutation.IntegrationCleared() && len(iauo.mutation.IntegrationIDs()) > 0 { +func (_u *IntegrationAttachmentUpdateOne) check() error { + if _u.mutation.IntegrationCleared() && len(_u.mutation.IntegrationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "IntegrationAttachment.integration"`) } - if iauo.mutation.WorkflowCleared() && len(iauo.mutation.WorkflowIDs()) > 0 { + if _u.mutation.WorkflowCleared() && len(_u.mutation.WorkflowIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "IntegrationAttachment.workflow"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (iauo *IntegrationAttachmentUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationAttachmentUpdateOne { - iauo.modifiers = append(iauo.modifiers, modifiers...) - return iauo +func (_u *IntegrationAttachmentUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *IntegrationAttachmentUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node *IntegrationAttachment, err error) { - if err := iauo.check(); err != nil { +func (_u *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node *IntegrationAttachment, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(integrationattachment.Table, integrationattachment.Columns, sqlgraph.NewFieldSpec(integrationattachment.FieldID, field.TypeUUID)) - id, ok := iauo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IntegrationAttachment.id" for update`)} } _spec.Node.ID.Value = id - if fields := iauo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, integrationattachment.FieldID) for _, f := range fields { @@ -417,26 +417,26 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } } } - if ps := iauo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := iauo.mutation.Configuration(); ok { + if value, ok := _u.mutation.Configuration(); ok { _spec.SetField(integrationattachment.FieldConfiguration, field.TypeBytes, value) } - if iauo.mutation.ConfigurationCleared() { + if _u.mutation.ConfigurationCleared() { _spec.ClearField(integrationattachment.FieldConfiguration, field.TypeBytes) } - if value, ok := iauo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(integrationattachment.FieldDeletedAt, field.TypeTime, value) } - if iauo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(integrationattachment.FieldDeletedAt, field.TypeTime) } - if iauo.mutation.IntegrationCleared() { + if _u.mutation.IntegrationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -449,7 +449,7 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iauo.mutation.IntegrationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -465,7 +465,7 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if iauo.mutation.WorkflowCleared() { + if _u.mutation.WorkflowCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -478,7 +478,7 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := iauo.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -494,11 +494,11 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(iauo.modifiers...) - _node = &IntegrationAttachment{config: iauo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &IntegrationAttachment{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, iauo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{integrationattachment.Label} } else if sqlgraph.IsConstraintError(err) { @@ -506,6 +506,6 @@ func (iauo *IntegrationAttachmentUpdateOne) sqlSave(ctx context.Context) (_node } return nil, err } - iauo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/membership.go b/app/controlplane/pkg/data/ent/membership.go index 4527b29c8..9ab121217 100644 --- a/app/controlplane/pkg/data/ent/membership.go +++ b/app/controlplane/pkg/data/ent/membership.go @@ -132,7 +132,7 @@ func (*Membership) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Membership fields. -func (m *Membership) assignValues(columns []string, values []any) error { +func (_m *Membership) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -142,79 +142,79 @@ func (m *Membership) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - m.ID = *value + _m.ID = *value } case membership.FieldCurrent: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field current", values[i]) } else if value.Valid { - m.Current = value.Bool + _m.Current = value.Bool } case membership.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = value.Time + _m.CreatedAt = value.Time } case membership.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case membership.FieldRole: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field role", values[i]) } else if value.Valid { - m.Role = authz.Role(value.String) + _m.Role = authz.Role(value.String) } case membership.FieldMembershipType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field membership_type", values[i]) } else if value.Valid { - m.MembershipType = authz.MembershipType(value.String) + _m.MembershipType = authz.MembershipType(value.String) } case membership.FieldMemberID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field member_id", values[i]) } else if value != nil { - m.MemberID = *value + _m.MemberID = *value } case membership.FieldResourceType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field resource_type", values[i]) } else if value.Valid { - m.ResourceType = authz.ResourceType(value.String) + _m.ResourceType = authz.ResourceType(value.String) } case membership.FieldResourceID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field resource_id", values[i]) } else if value != nil { - m.ResourceID = *value + _m.ResourceID = *value } case membership.FieldParentID: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field parent_id", values[i]) } else if value.Valid { - m.ParentID = new(uuid.UUID) - *m.ParentID = *value.S.(*uuid.UUID) + _m.ParentID = new(uuid.UUID) + *_m.ParentID = *value.S.(*uuid.UUID) } case membership.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field organization_memberships", values[i]) } else if value.Valid { - m.organization_memberships = new(uuid.UUID) - *m.organization_memberships = *value.S.(*uuid.UUID) + _m.organization_memberships = new(uuid.UUID) + *_m.organization_memberships = *value.S.(*uuid.UUID) } case membership.ForeignKeys[1]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field user_memberships", values[i]) } else if value.Valid { - m.user_memberships = new(uuid.UUID) - *m.user_memberships = *value.S.(*uuid.UUID) + _m.user_memberships = new(uuid.UUID) + *_m.user_memberships = *value.S.(*uuid.UUID) } default: - m.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -222,78 +222,78 @@ func (m *Membership) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Membership. // This includes values selected through modifiers, order, etc. -func (m *Membership) Value(name string) (ent.Value, error) { - return m.selectValues.Get(name) +func (_m *Membership) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryOrganization queries the "organization" edge of the Membership entity. -func (m *Membership) QueryOrganization() *OrganizationQuery { - return NewMembershipClient(m.config).QueryOrganization(m) +func (_m *Membership) QueryOrganization() *OrganizationQuery { + return NewMembershipClient(_m.config).QueryOrganization(_m) } // QueryUser queries the "user" edge of the Membership entity. -func (m *Membership) QueryUser() *UserQuery { - return NewMembershipClient(m.config).QueryUser(m) +func (_m *Membership) QueryUser() *UserQuery { + return NewMembershipClient(_m.config).QueryUser(_m) } // QueryParent queries the "parent" edge of the Membership entity. -func (m *Membership) QueryParent() *MembershipQuery { - return NewMembershipClient(m.config).QueryParent(m) +func (_m *Membership) QueryParent() *MembershipQuery { + return NewMembershipClient(_m.config).QueryParent(_m) } // QueryChildren queries the "children" edge of the Membership entity. -func (m *Membership) QueryChildren() *MembershipQuery { - return NewMembershipClient(m.config).QueryChildren(m) +func (_m *Membership) QueryChildren() *MembershipQuery { + return NewMembershipClient(_m.config).QueryChildren(_m) } // Update returns a builder for updating this Membership. // Note that you need to call Membership.Unwrap() before calling this method if this Membership // was returned from a transaction, and the transaction was committed or rolled back. -func (m *Membership) Update() *MembershipUpdateOne { - return NewMembershipClient(m.config).UpdateOne(m) +func (_m *Membership) Update() *MembershipUpdateOne { + return NewMembershipClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Membership entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (m *Membership) Unwrap() *Membership { - _tx, ok := m.config.driver.(*txDriver) +func (_m *Membership) Unwrap() *Membership { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Membership is not a transactional entity") } - m.config.driver = _tx.drv - return m + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (m *Membership) String() string { +func (_m *Membership) String() string { var builder strings.Builder builder.WriteString("Membership(") - builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("current=") - builder.WriteString(fmt.Sprintf("%v", m.Current)) + builder.WriteString(fmt.Sprintf("%v", _m.Current)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("role=") - builder.WriteString(fmt.Sprintf("%v", m.Role)) + builder.WriteString(fmt.Sprintf("%v", _m.Role)) builder.WriteString(", ") builder.WriteString("membership_type=") - builder.WriteString(fmt.Sprintf("%v", m.MembershipType)) + builder.WriteString(fmt.Sprintf("%v", _m.MembershipType)) builder.WriteString(", ") builder.WriteString("member_id=") - builder.WriteString(fmt.Sprintf("%v", m.MemberID)) + builder.WriteString(fmt.Sprintf("%v", _m.MemberID)) builder.WriteString(", ") builder.WriteString("resource_type=") - builder.WriteString(fmt.Sprintf("%v", m.ResourceType)) + builder.WriteString(fmt.Sprintf("%v", _m.ResourceType)) builder.WriteString(", ") builder.WriteString("resource_id=") - builder.WriteString(fmt.Sprintf("%v", m.ResourceID)) + builder.WriteString(fmt.Sprintf("%v", _m.ResourceID)) builder.WriteString(", ") - if v := m.ParentID; v != nil { + if v := _m.ParentID; v != nil { builder.WriteString("parent_id=") builder.WriteString(fmt.Sprintf("%v", *v)) } diff --git a/app/controlplane/pkg/data/ent/membership_create.go b/app/controlplane/pkg/data/ent/membership_create.go index 20d3d9c0b..a2a90baf8 100644 --- a/app/controlplane/pkg/data/ent/membership_create.go +++ b/app/controlplane/pkg/data/ent/membership_create.go @@ -28,209 +28,209 @@ type MembershipCreate struct { } // SetCurrent sets the "current" field. -func (mc *MembershipCreate) SetCurrent(b bool) *MembershipCreate { - mc.mutation.SetCurrent(b) - return mc +func (_c *MembershipCreate) SetCurrent(v bool) *MembershipCreate { + _c.mutation.SetCurrent(v) + return _c } // SetNillableCurrent sets the "current" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableCurrent(b *bool) *MembershipCreate { - if b != nil { - mc.SetCurrent(*b) +func (_c *MembershipCreate) SetNillableCurrent(v *bool) *MembershipCreate { + if v != nil { + _c.SetCurrent(*v) } - return mc + return _c } // SetCreatedAt sets the "created_at" field. -func (mc *MembershipCreate) SetCreatedAt(t time.Time) *MembershipCreate { - mc.mutation.SetCreatedAt(t) - return mc +func (_c *MembershipCreate) SetCreatedAt(v time.Time) *MembershipCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableCreatedAt(t *time.Time) *MembershipCreate { - if t != nil { - mc.SetCreatedAt(*t) +func (_c *MembershipCreate) SetNillableCreatedAt(v *time.Time) *MembershipCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return mc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (mc *MembershipCreate) SetUpdatedAt(t time.Time) *MembershipCreate { - mc.mutation.SetUpdatedAt(t) - return mc +func (_c *MembershipCreate) SetUpdatedAt(v time.Time) *MembershipCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableUpdatedAt(t *time.Time) *MembershipCreate { - if t != nil { - mc.SetUpdatedAt(*t) +func (_c *MembershipCreate) SetNillableUpdatedAt(v *time.Time) *MembershipCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return mc + return _c } // SetRole sets the "role" field. -func (mc *MembershipCreate) SetRole(a authz.Role) *MembershipCreate { - mc.mutation.SetRole(a) - return mc +func (_c *MembershipCreate) SetRole(v authz.Role) *MembershipCreate { + _c.mutation.SetRole(v) + return _c } // SetMembershipType sets the "membership_type" field. -func (mc *MembershipCreate) SetMembershipType(at authz.MembershipType) *MembershipCreate { - mc.mutation.SetMembershipType(at) - return mc +func (_c *MembershipCreate) SetMembershipType(v authz.MembershipType) *MembershipCreate { + _c.mutation.SetMembershipType(v) + return _c } // SetNillableMembershipType sets the "membership_type" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableMembershipType(at *authz.MembershipType) *MembershipCreate { - if at != nil { - mc.SetMembershipType(*at) +func (_c *MembershipCreate) SetNillableMembershipType(v *authz.MembershipType) *MembershipCreate { + if v != nil { + _c.SetMembershipType(*v) } - return mc + return _c } // SetMemberID sets the "member_id" field. -func (mc *MembershipCreate) SetMemberID(u uuid.UUID) *MembershipCreate { - mc.mutation.SetMemberID(u) - return mc +func (_c *MembershipCreate) SetMemberID(v uuid.UUID) *MembershipCreate { + _c.mutation.SetMemberID(v) + return _c } // SetNillableMemberID sets the "member_id" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableMemberID(u *uuid.UUID) *MembershipCreate { - if u != nil { - mc.SetMemberID(*u) +func (_c *MembershipCreate) SetNillableMemberID(v *uuid.UUID) *MembershipCreate { + if v != nil { + _c.SetMemberID(*v) } - return mc + return _c } // SetResourceType sets the "resource_type" field. -func (mc *MembershipCreate) SetResourceType(at authz.ResourceType) *MembershipCreate { - mc.mutation.SetResourceType(at) - return mc +func (_c *MembershipCreate) SetResourceType(v authz.ResourceType) *MembershipCreate { + _c.mutation.SetResourceType(v) + return _c } // SetNillableResourceType sets the "resource_type" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableResourceType(at *authz.ResourceType) *MembershipCreate { - if at != nil { - mc.SetResourceType(*at) +func (_c *MembershipCreate) SetNillableResourceType(v *authz.ResourceType) *MembershipCreate { + if v != nil { + _c.SetResourceType(*v) } - return mc + return _c } // SetResourceID sets the "resource_id" field. -func (mc *MembershipCreate) SetResourceID(u uuid.UUID) *MembershipCreate { - mc.mutation.SetResourceID(u) - return mc +func (_c *MembershipCreate) SetResourceID(v uuid.UUID) *MembershipCreate { + _c.mutation.SetResourceID(v) + return _c } // SetNillableResourceID sets the "resource_id" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableResourceID(u *uuid.UUID) *MembershipCreate { - if u != nil { - mc.SetResourceID(*u) +func (_c *MembershipCreate) SetNillableResourceID(v *uuid.UUID) *MembershipCreate { + if v != nil { + _c.SetResourceID(*v) } - return mc + return _c } // SetParentID sets the "parent_id" field. -func (mc *MembershipCreate) SetParentID(u uuid.UUID) *MembershipCreate { - mc.mutation.SetParentID(u) - return mc +func (_c *MembershipCreate) SetParentID(v uuid.UUID) *MembershipCreate { + _c.mutation.SetParentID(v) + return _c } // SetNillableParentID sets the "parent_id" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableParentID(u *uuid.UUID) *MembershipCreate { - if u != nil { - mc.SetParentID(*u) +func (_c *MembershipCreate) SetNillableParentID(v *uuid.UUID) *MembershipCreate { + if v != nil { + _c.SetParentID(*v) } - return mc + return _c } // SetID sets the "id" field. -func (mc *MembershipCreate) SetID(u uuid.UUID) *MembershipCreate { - mc.mutation.SetID(u) - return mc +func (_c *MembershipCreate) SetID(v uuid.UUID) *MembershipCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (mc *MembershipCreate) SetNillableID(u *uuid.UUID) *MembershipCreate { - if u != nil { - mc.SetID(*u) +func (_c *MembershipCreate) SetNillableID(v *uuid.UUID) *MembershipCreate { + if v != nil { + _c.SetID(*v) } - return mc + return _c } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (mc *MembershipCreate) SetOrganizationID(id uuid.UUID) *MembershipCreate { - mc.mutation.SetOrganizationID(id) - return mc +func (_c *MembershipCreate) SetOrganizationID(id uuid.UUID) *MembershipCreate { + _c.mutation.SetOrganizationID(id) + return _c } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (mc *MembershipCreate) SetNillableOrganizationID(id *uuid.UUID) *MembershipCreate { +func (_c *MembershipCreate) SetNillableOrganizationID(id *uuid.UUID) *MembershipCreate { if id != nil { - mc = mc.SetOrganizationID(*id) + _c = _c.SetOrganizationID(*id) } - return mc + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (mc *MembershipCreate) SetOrganization(o *Organization) *MembershipCreate { - return mc.SetOrganizationID(o.ID) +func (_c *MembershipCreate) SetOrganization(v *Organization) *MembershipCreate { + return _c.SetOrganizationID(v.ID) } // SetUserID sets the "user" edge to the User entity by ID. -func (mc *MembershipCreate) SetUserID(id uuid.UUID) *MembershipCreate { - mc.mutation.SetUserID(id) - return mc +func (_c *MembershipCreate) SetUserID(id uuid.UUID) *MembershipCreate { + _c.mutation.SetUserID(id) + return _c } // SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. -func (mc *MembershipCreate) SetNillableUserID(id *uuid.UUID) *MembershipCreate { +func (_c *MembershipCreate) SetNillableUserID(id *uuid.UUID) *MembershipCreate { if id != nil { - mc = mc.SetUserID(*id) + _c = _c.SetUserID(*id) } - return mc + return _c } // SetUser sets the "user" edge to the User entity. -func (mc *MembershipCreate) SetUser(u *User) *MembershipCreate { - return mc.SetUserID(u.ID) +func (_c *MembershipCreate) SetUser(v *User) *MembershipCreate { + return _c.SetUserID(v.ID) } // SetParent sets the "parent" edge to the Membership entity. -func (mc *MembershipCreate) SetParent(m *Membership) *MembershipCreate { - return mc.SetParentID(m.ID) +func (_c *MembershipCreate) SetParent(v *Membership) *MembershipCreate { + return _c.SetParentID(v.ID) } // AddChildIDs adds the "children" edge to the Membership entity by IDs. -func (mc *MembershipCreate) AddChildIDs(ids ...uuid.UUID) *MembershipCreate { - mc.mutation.AddChildIDs(ids...) - return mc +func (_c *MembershipCreate) AddChildIDs(ids ...uuid.UUID) *MembershipCreate { + _c.mutation.AddChildIDs(ids...) + return _c } // AddChildren adds the "children" edges to the Membership entity. -func (mc *MembershipCreate) AddChildren(m ...*Membership) *MembershipCreate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_c *MembershipCreate) AddChildren(v ...*Membership) *MembershipCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return mc.AddChildIDs(ids...) + return _c.AddChildIDs(ids...) } // Mutation returns the MembershipMutation object of the builder. -func (mc *MembershipCreate) Mutation() *MembershipMutation { - return mc.mutation +func (_c *MembershipCreate) Mutation() *MembershipMutation { + return _c.mutation } // Save creates the Membership in the database. -func (mc *MembershipCreate) Save(ctx context.Context) (*Membership, error) { - mc.defaults() - return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +func (_c *MembershipCreate) Save(ctx context.Context) (*Membership, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (mc *MembershipCreate) SaveX(ctx context.Context) *Membership { - v, err := mc.Save(ctx) +func (_c *MembershipCreate) SaveX(ctx context.Context) *Membership { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -238,63 +238,63 @@ func (mc *MembershipCreate) SaveX(ctx context.Context) *Membership { } // Exec executes the query. -func (mc *MembershipCreate) Exec(ctx context.Context) error { - _, err := mc.Save(ctx) +func (_c *MembershipCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (mc *MembershipCreate) ExecX(ctx context.Context) { - if err := mc.Exec(ctx); err != nil { +func (_c *MembershipCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (mc *MembershipCreate) defaults() { - if _, ok := mc.mutation.Current(); !ok { +func (_c *MembershipCreate) defaults() { + if _, ok := _c.mutation.Current(); !ok { v := membership.DefaultCurrent - mc.mutation.SetCurrent(v) + _c.mutation.SetCurrent(v) } - if _, ok := mc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := membership.DefaultCreatedAt() - mc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := mc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := membership.DefaultUpdatedAt() - mc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := mc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := membership.DefaultID() - mc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (mc *MembershipCreate) check() error { - if _, ok := mc.mutation.Current(); !ok { +func (_c *MembershipCreate) check() error { + if _, ok := _c.mutation.Current(); !ok { return &ValidationError{Name: "current", err: errors.New(`ent: missing required field "Membership.current"`)} } - if _, ok := mc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Membership.created_at"`)} } - if _, ok := mc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Membership.updated_at"`)} } - if _, ok := mc.mutation.Role(); !ok { + if _, ok := _c.mutation.Role(); !ok { return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "Membership.role"`)} } - if v, ok := mc.mutation.Role(); ok { + if v, ok := _c.mutation.Role(); ok { if err := membership.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "Membership.role": %w`, err)} } } - if v, ok := mc.mutation.MembershipType(); ok { + if v, ok := _c.mutation.MembershipType(); ok { if err := membership.MembershipTypeValidator(v); err != nil { return &ValidationError{Name: "membership_type", err: fmt.Errorf(`ent: validator failed for field "Membership.membership_type": %w`, err)} } } - if v, ok := mc.mutation.ResourceType(); ok { + if v, ok := _c.mutation.ResourceType(); ok { if err := membership.ResourceTypeValidator(v); err != nil { return &ValidationError{Name: "resource_type", err: fmt.Errorf(`ent: validator failed for field "Membership.resource_type": %w`, err)} } @@ -302,12 +302,12 @@ func (mc *MembershipCreate) check() error { return nil } -func (mc *MembershipCreate) sqlSave(ctx context.Context) (*Membership, error) { - if err := mc.check(); err != nil { +func (_c *MembershipCreate) sqlSave(ctx context.Context) (*Membership, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := mc.createSpec() - if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -320,54 +320,54 @@ func (mc *MembershipCreate) sqlSave(ctx context.Context) (*Membership, error) { return nil, err } } - mc.mutation.id = &_node.ID - mc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (mc *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { +func (_c *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { var ( - _node = &Membership{config: mc.config} + _node = &Membership{config: _c.config} _spec = sqlgraph.NewCreateSpec(membership.Table, sqlgraph.NewFieldSpec(membership.FieldID, field.TypeUUID)) ) - _spec.OnConflict = mc.conflict - if id, ok := mc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := mc.mutation.Current(); ok { + if value, ok := _c.mutation.Current(); ok { _spec.SetField(membership.FieldCurrent, field.TypeBool, value) _node.Current = value } - if value, ok := mc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(membership.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := mc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(membership.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := mc.mutation.Role(); ok { + if value, ok := _c.mutation.Role(); ok { _spec.SetField(membership.FieldRole, field.TypeEnum, value) _node.Role = value } - if value, ok := mc.mutation.MembershipType(); ok { + if value, ok := _c.mutation.MembershipType(); ok { _spec.SetField(membership.FieldMembershipType, field.TypeEnum, value) _node.MembershipType = value } - if value, ok := mc.mutation.MemberID(); ok { + if value, ok := _c.mutation.MemberID(); ok { _spec.SetField(membership.FieldMemberID, field.TypeUUID, value) _node.MemberID = value } - if value, ok := mc.mutation.ResourceType(); ok { + if value, ok := _c.mutation.ResourceType(); ok { _spec.SetField(membership.FieldResourceType, field.TypeEnum, value) _node.ResourceType = value } - if value, ok := mc.mutation.ResourceID(); ok { + if value, ok := _c.mutation.ResourceID(); ok { _spec.SetField(membership.FieldResourceID, field.TypeUUID, value) _node.ResourceID = value } - if nodes := mc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -384,7 +384,7 @@ func (mc *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { _node.organization_memberships = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := mc.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -401,7 +401,7 @@ func (mc *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { _node.user_memberships = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := mc.mutation.ParentIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ParentIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -418,7 +418,7 @@ func (mc *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { _node.ParentID = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := mc.mutation.ChildrenIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ChildrenIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -453,10 +453,10 @@ func (mc *MembershipCreate) createSpec() (*Membership, *sqlgraph.CreateSpec) { // SetCurrent(v+v). // }). // Exec(ctx) -func (mc *MembershipCreate) OnConflict(opts ...sql.ConflictOption) *MembershipUpsertOne { - mc.conflict = opts +func (_c *MembershipCreate) OnConflict(opts ...sql.ConflictOption) *MembershipUpsertOne { + _c.conflict = opts return &MembershipUpsertOne{ - create: mc, + create: _c, } } @@ -466,10 +466,10 @@ func (mc *MembershipCreate) OnConflict(opts ...sql.ConflictOption) *MembershipUp // client.Membership.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (mc *MembershipCreate) OnConflictColumns(columns ...string) *MembershipUpsertOne { - mc.conflict = append(mc.conflict, sql.ConflictColumns(columns...)) +func (_c *MembershipCreate) OnConflictColumns(columns ...string) *MembershipUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &MembershipUpsertOne{ - create: mc, + create: _c, } } @@ -857,16 +857,16 @@ type MembershipCreateBulk struct { } // Save creates the Membership entities in the database. -func (mcb *MembershipCreateBulk) Save(ctx context.Context) ([]*Membership, error) { - if mcb.err != nil { - return nil, mcb.err +func (_c *MembershipCreateBulk) Save(ctx context.Context) ([]*Membership, error) { + if _c.err != nil { + return nil, _c.err } - specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) - nodes := make([]*Membership, len(mcb.builders)) - mutators := make([]Mutator, len(mcb.builders)) - for i := range mcb.builders { + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Membership, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := mcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*MembershipMutation) @@ -880,12 +880,12 @@ func (mcb *MembershipCreateBulk) Save(ctx context.Context) ([]*Membership, error var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = mcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -905,7 +905,7 @@ func (mcb *MembershipCreateBulk) Save(ctx context.Context) ([]*Membership, error }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -913,8 +913,8 @@ func (mcb *MembershipCreateBulk) Save(ctx context.Context) ([]*Membership, error } // SaveX is like Save, but panics if an error occurs. -func (mcb *MembershipCreateBulk) SaveX(ctx context.Context) []*Membership { - v, err := mcb.Save(ctx) +func (_c *MembershipCreateBulk) SaveX(ctx context.Context) []*Membership { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -922,14 +922,14 @@ func (mcb *MembershipCreateBulk) SaveX(ctx context.Context) []*Membership { } // Exec executes the query. -func (mcb *MembershipCreateBulk) Exec(ctx context.Context) error { - _, err := mcb.Save(ctx) +func (_c *MembershipCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (mcb *MembershipCreateBulk) ExecX(ctx context.Context) { - if err := mcb.Exec(ctx); err != nil { +func (_c *MembershipCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -949,10 +949,10 @@ func (mcb *MembershipCreateBulk) ExecX(ctx context.Context) { // SetCurrent(v+v). // }). // Exec(ctx) -func (mcb *MembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *MembershipUpsertBulk { - mcb.conflict = opts +func (_c *MembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *MembershipUpsertBulk { + _c.conflict = opts return &MembershipUpsertBulk{ - create: mcb, + create: _c, } } @@ -962,10 +962,10 @@ func (mcb *MembershipCreateBulk) OnConflict(opts ...sql.ConflictOption) *Members // client.Membership.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (mcb *MembershipCreateBulk) OnConflictColumns(columns ...string) *MembershipUpsertBulk { - mcb.conflict = append(mcb.conflict, sql.ConflictColumns(columns...)) +func (_c *MembershipCreateBulk) OnConflictColumns(columns ...string) *MembershipUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &MembershipUpsertBulk{ - create: mcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/membership_delete.go b/app/controlplane/pkg/data/ent/membership_delete.go index 1a8b2bc0c..936371cdc 100644 --- a/app/controlplane/pkg/data/ent/membership_delete.go +++ b/app/controlplane/pkg/data/ent/membership_delete.go @@ -20,56 +20,56 @@ type MembershipDelete struct { } // Where appends a list predicates to the MembershipDelete builder. -func (md *MembershipDelete) Where(ps ...predicate.Membership) *MembershipDelete { - md.mutation.Where(ps...) - return md +func (_d *MembershipDelete) Where(ps ...predicate.Membership) *MembershipDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (md *MembershipDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +func (_d *MembershipDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (md *MembershipDelete) ExecX(ctx context.Context) int { - n, err := md.Exec(ctx) +func (_d *MembershipDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (md *MembershipDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *MembershipDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(membership.Table, sqlgraph.NewFieldSpec(membership.FieldID, field.TypeUUID)) - if ps := md.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - md.mutation.done = true + _d.mutation.done = true return affected, err } // MembershipDeleteOne is the builder for deleting a single Membership entity. type MembershipDeleteOne struct { - md *MembershipDelete + _d *MembershipDelete } // Where appends a list predicates to the MembershipDelete builder. -func (mdo *MembershipDeleteOne) Where(ps ...predicate.Membership) *MembershipDeleteOne { - mdo.md.mutation.Where(ps...) - return mdo +func (_d *MembershipDeleteOne) Where(ps ...predicate.Membership) *MembershipDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (mdo *MembershipDeleteOne) Exec(ctx context.Context) error { - n, err := mdo.md.Exec(ctx) +func (_d *MembershipDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (mdo *MembershipDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (mdo *MembershipDeleteOne) ExecX(ctx context.Context) { - if err := mdo.Exec(ctx); err != nil { +func (_d *MembershipDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/membership_query.go b/app/controlplane/pkg/data/ent/membership_query.go index 15ac2fd3c..71e1f8add 100644 --- a/app/controlplane/pkg/data/ent/membership_query.go +++ b/app/controlplane/pkg/data/ent/membership_query.go @@ -39,44 +39,44 @@ type MembershipQuery struct { } // Where adds a new predicate for the MembershipQuery builder. -func (mq *MembershipQuery) Where(ps ...predicate.Membership) *MembershipQuery { - mq.predicates = append(mq.predicates, ps...) - return mq +func (_q *MembershipQuery) Where(ps ...predicate.Membership) *MembershipQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (mq *MembershipQuery) Limit(limit int) *MembershipQuery { - mq.ctx.Limit = &limit - return mq +func (_q *MembershipQuery) Limit(limit int) *MembershipQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (mq *MembershipQuery) Offset(offset int) *MembershipQuery { - mq.ctx.Offset = &offset - return mq +func (_q *MembershipQuery) Offset(offset int) *MembershipQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (mq *MembershipQuery) Unique(unique bool) *MembershipQuery { - mq.ctx.Unique = &unique - return mq +func (_q *MembershipQuery) Unique(unique bool) *MembershipQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (mq *MembershipQuery) Order(o ...membership.OrderOption) *MembershipQuery { - mq.order = append(mq.order, o...) - return mq +func (_q *MembershipQuery) Order(o ...membership.OrderOption) *MembershipQuery { + _q.order = append(_q.order, o...) + return _q } // QueryOrganization chains the current query on the "organization" edge. -func (mq *MembershipQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: mq.config}).Query() +func (_q *MembershipQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := mq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -85,20 +85,20 @@ func (mq *MembershipQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.OrganizationTable, membership.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryUser chains the current query on the "user" edge. -func (mq *MembershipQuery) QueryUser() *UserQuery { - query := (&UserClient{config: mq.config}).Query() +func (_q *MembershipQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := mq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -107,20 +107,20 @@ func (mq *MembershipQuery) QueryUser() *UserQuery { sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.UserTable, membership.UserColumn), ) - fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryParent chains the current query on the "parent" edge. -func (mq *MembershipQuery) QueryParent() *MembershipQuery { - query := (&MembershipClient{config: mq.config}).Query() +func (_q *MembershipQuery) QueryParent() *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := mq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -129,20 +129,20 @@ func (mq *MembershipQuery) QueryParent() *MembershipQuery { sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, membership.ParentTable, membership.ParentColumn), ) - fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryChildren chains the current query on the "children" edge. -func (mq *MembershipQuery) QueryChildren() *MembershipQuery { - query := (&MembershipClient{config: mq.config}).Query() +func (_q *MembershipQuery) QueryChildren() *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := mq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -151,7 +151,7 @@ func (mq *MembershipQuery) QueryChildren() *MembershipQuery { sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, membership.ChildrenTable, membership.ChildrenColumn), ) - fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -159,8 +159,8 @@ func (mq *MembershipQuery) QueryChildren() *MembershipQuery { // First returns the first Membership entity from the query. // Returns a *NotFoundError when no Membership was found. -func (mq *MembershipQuery) First(ctx context.Context) (*Membership, error) { - nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) +func (_q *MembershipQuery) First(ctx context.Context) (*Membership, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -171,8 +171,8 @@ func (mq *MembershipQuery) First(ctx context.Context) (*Membership, error) { } // FirstX is like First, but panics if an error occurs. -func (mq *MembershipQuery) FirstX(ctx context.Context) *Membership { - node, err := mq.First(ctx) +func (_q *MembershipQuery) FirstX(ctx context.Context) *Membership { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -181,9 +181,9 @@ func (mq *MembershipQuery) FirstX(ctx context.Context) *Membership { // FirstID returns the first Membership ID from the query. // Returns a *NotFoundError when no Membership ID was found. -func (mq *MembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *MembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -194,8 +194,8 @@ func (mq *MembershipQuery) FirstID(ctx context.Context) (id uuid.UUID, err error } // FirstIDX is like FirstID, but panics if an error occurs. -func (mq *MembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := mq.FirstID(ctx) +func (_q *MembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -205,8 +205,8 @@ func (mq *MembershipQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Membership entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Membership entity is found. // Returns a *NotFoundError when no Membership entities are found. -func (mq *MembershipQuery) Only(ctx context.Context) (*Membership, error) { - nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) +func (_q *MembershipQuery) Only(ctx context.Context) (*Membership, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -221,8 +221,8 @@ func (mq *MembershipQuery) Only(ctx context.Context) (*Membership, error) { } // OnlyX is like Only, but panics if an error occurs. -func (mq *MembershipQuery) OnlyX(ctx context.Context) *Membership { - node, err := mq.Only(ctx) +func (_q *MembershipQuery) OnlyX(ctx context.Context) *Membership { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -232,9 +232,9 @@ func (mq *MembershipQuery) OnlyX(ctx context.Context) *Membership { // OnlyID is like Only, but returns the only Membership ID in the query. // Returns a *NotSingularError when more than one Membership ID is found. // Returns a *NotFoundError when no entities are found. -func (mq *MembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *MembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -249,8 +249,8 @@ func (mq *MembershipQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (mq *MembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := mq.OnlyID(ctx) +func (_q *MembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -258,18 +258,18 @@ func (mq *MembershipQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Memberships. -func (mq *MembershipQuery) All(ctx context.Context) ([]*Membership, error) { - ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) - if err := mq.prepareQuery(ctx); err != nil { +func (_q *MembershipQuery) All(ctx context.Context) ([]*Membership, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Membership, *MembershipQuery]() - return withInterceptors[[]*Membership](ctx, mq, qr, mq.inters) + return withInterceptors[[]*Membership](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (mq *MembershipQuery) AllX(ctx context.Context) []*Membership { - nodes, err := mq.All(ctx) +func (_q *MembershipQuery) AllX(ctx context.Context) []*Membership { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -277,20 +277,20 @@ func (mq *MembershipQuery) AllX(ctx context.Context) []*Membership { } // IDs executes the query and returns a list of Membership IDs. -func (mq *MembershipQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if mq.ctx.Unique == nil && mq.path != nil { - mq.Unique(true) +func (_q *MembershipQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) - if err = mq.Select(membership.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(membership.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (mq *MembershipQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := mq.IDs(ctx) +func (_q *MembershipQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -298,17 +298,17 @@ func (mq *MembershipQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (mq *MembershipQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) - if err := mq.prepareQuery(ctx); err != nil { +func (_q *MembershipQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, mq, querierCount[*MembershipQuery](), mq.inters) + return withInterceptors[int](ctx, _q, querierCount[*MembershipQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (mq *MembershipQuery) CountX(ctx context.Context) int { - count, err := mq.Count(ctx) +func (_q *MembershipQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -316,9 +316,9 @@ func (mq *MembershipQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (mq *MembershipQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) - switch _, err := mq.FirstID(ctx); { +func (_q *MembershipQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -329,8 +329,8 @@ func (mq *MembershipQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (mq *MembershipQuery) ExistX(ctx context.Context) bool { - exist, err := mq.Exist(ctx) +func (_q *MembershipQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -339,69 +339,69 @@ func (mq *MembershipQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the MembershipQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (mq *MembershipQuery) Clone() *MembershipQuery { - if mq == nil { +func (_q *MembershipQuery) Clone() *MembershipQuery { + if _q == nil { return nil } return &MembershipQuery{ - config: mq.config, - ctx: mq.ctx.Clone(), - order: append([]membership.OrderOption{}, mq.order...), - inters: append([]Interceptor{}, mq.inters...), - predicates: append([]predicate.Membership{}, mq.predicates...), - withOrganization: mq.withOrganization.Clone(), - withUser: mq.withUser.Clone(), - withParent: mq.withParent.Clone(), - withChildren: mq.withChildren.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]membership.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Membership{}, _q.predicates...), + withOrganization: _q.withOrganization.Clone(), + withUser: _q.withUser.Clone(), + withParent: _q.withParent.Clone(), + withChildren: _q.withChildren.Clone(), // clone intermediate query. - sql: mq.sql.Clone(), - path: mq.path, - modifiers: append([]func(*sql.Selector){}, mq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (mq *MembershipQuery) WithOrganization(opts ...func(*OrganizationQuery)) *MembershipQuery { - query := (&OrganizationClient{config: mq.config}).Query() +func (_q *MembershipQuery) WithOrganization(opts ...func(*OrganizationQuery)) *MembershipQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - mq.withOrganization = query - return mq + _q.withOrganization = query + return _q } // WithUser tells the query-builder to eager-load the nodes that are connected to // the "user" edge. The optional arguments are used to configure the query builder of the edge. -func (mq *MembershipQuery) WithUser(opts ...func(*UserQuery)) *MembershipQuery { - query := (&UserClient{config: mq.config}).Query() +func (_q *MembershipQuery) WithUser(opts ...func(*UserQuery)) *MembershipQuery { + query := (&UserClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - mq.withUser = query - return mq + _q.withUser = query + return _q } // WithParent tells the query-builder to eager-load the nodes that are connected to // the "parent" edge. The optional arguments are used to configure the query builder of the edge. -func (mq *MembershipQuery) WithParent(opts ...func(*MembershipQuery)) *MembershipQuery { - query := (&MembershipClient{config: mq.config}).Query() +func (_q *MembershipQuery) WithParent(opts ...func(*MembershipQuery)) *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - mq.withParent = query - return mq + _q.withParent = query + return _q } // WithChildren tells the query-builder to eager-load the nodes that are connected to // the "children" edge. The optional arguments are used to configure the query builder of the edge. -func (mq *MembershipQuery) WithChildren(opts ...func(*MembershipQuery)) *MembershipQuery { - query := (&MembershipClient{config: mq.config}).Query() +func (_q *MembershipQuery) WithChildren(opts ...func(*MembershipQuery)) *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - mq.withChildren = query - return mq + _q.withChildren = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -418,10 +418,10 @@ func (mq *MembershipQuery) WithChildren(opts ...func(*MembershipQuery)) *Members // GroupBy(membership.FieldCurrent). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (mq *MembershipQuery) GroupBy(field string, fields ...string) *MembershipGroupBy { - mq.ctx.Fields = append([]string{field}, fields...) - grbuild := &MembershipGroupBy{build: mq} - grbuild.flds = &mq.ctx.Fields +func (_q *MembershipQuery) GroupBy(field string, fields ...string) *MembershipGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &MembershipGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = membership.Label grbuild.scan = grbuild.Scan return grbuild @@ -439,58 +439,58 @@ func (mq *MembershipQuery) GroupBy(field string, fields ...string) *MembershipGr // client.Membership.Query(). // Select(membership.FieldCurrent). // Scan(ctx, &v) -func (mq *MembershipQuery) Select(fields ...string) *MembershipSelect { - mq.ctx.Fields = append(mq.ctx.Fields, fields...) - sbuild := &MembershipSelect{MembershipQuery: mq} +func (_q *MembershipQuery) Select(fields ...string) *MembershipSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &MembershipSelect{MembershipQuery: _q} sbuild.label = membership.Label - sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a MembershipSelect configured with the given aggregations. -func (mq *MembershipQuery) Aggregate(fns ...AggregateFunc) *MembershipSelect { - return mq.Select().Aggregate(fns...) +func (_q *MembershipQuery) Aggregate(fns ...AggregateFunc) *MembershipSelect { + return _q.Select().Aggregate(fns...) } -func (mq *MembershipQuery) prepareQuery(ctx context.Context) error { - for _, inter := range mq.inters { +func (_q *MembershipQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, mq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range mq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !membership.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if mq.path != nil { - prev, err := mq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - mq.sql = prev + _q.sql = prev } return nil } -func (mq *MembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Membership, error) { +func (_q *MembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Membership, error) { var ( nodes = []*Membership{} - withFKs = mq.withFKs - _spec = mq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [4]bool{ - mq.withOrganization != nil, - mq.withUser != nil, - mq.withParent != nil, - mq.withChildren != nil, + _q.withOrganization != nil, + _q.withUser != nil, + _q.withParent != nil, + _q.withChildren != nil, } ) - if mq.withOrganization != nil || mq.withUser != nil { + if _q.withOrganization != nil || _q.withUser != nil { withFKs = true } if withFKs { @@ -500,43 +500,43 @@ func (mq *MembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*M return (*Membership).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Membership{config: mq.config} + node := &Membership{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(mq.modifiers) > 0 { - _spec.Modifiers = mq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := mq.withOrganization; query != nil { - if err := mq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *Membership, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := mq.withUser; query != nil { - if err := mq.loadUser(ctx, query, nodes, nil, + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, func(n *Membership, e *User) { n.Edges.User = e }); err != nil { return nil, err } } - if query := mq.withParent; query != nil { - if err := mq.loadParent(ctx, query, nodes, nil, + if query := _q.withParent; query != nil { + if err := _q.loadParent(ctx, query, nodes, nil, func(n *Membership, e *Membership) { n.Edges.Parent = e }); err != nil { return nil, err } } - if query := mq.withChildren; query != nil { - if err := mq.loadChildren(ctx, query, nodes, + if query := _q.withChildren; query != nil { + if err := _q.loadChildren(ctx, query, nodes, func(n *Membership) { n.Edges.Children = []*Membership{} }, func(n *Membership, e *Membership) { n.Edges.Children = append(n.Edges.Children, e) }); err != nil { return nil, err @@ -545,7 +545,7 @@ func (mq *MembershipQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*M return nodes, nil } -func (mq *MembershipQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Organization)) error { +func (_q *MembershipQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Membership) for i := range nodes { @@ -577,7 +577,7 @@ func (mq *MembershipQuery) loadOrganization(ctx context.Context, query *Organiza } return nil } -func (mq *MembershipQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *User)) error { +func (_q *MembershipQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *User)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Membership) for i := range nodes { @@ -609,7 +609,7 @@ func (mq *MembershipQuery) loadUser(ctx context.Context, query *UserQuery, nodes } return nil } -func (mq *MembershipQuery) loadParent(ctx context.Context, query *MembershipQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Membership)) error { +func (_q *MembershipQuery) loadParent(ctx context.Context, query *MembershipQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Membership)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Membership) for i := range nodes { @@ -641,7 +641,7 @@ func (mq *MembershipQuery) loadParent(ctx context.Context, query *MembershipQuer } return nil } -func (mq *MembershipQuery) loadChildren(ctx context.Context, query *MembershipQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Membership)) error { +func (_q *MembershipQuery) loadChildren(ctx context.Context, query *MembershipQuery, nodes []*Membership, init func(*Membership), assign func(*Membership, *Membership)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Membership) for i := range nodes { @@ -676,27 +676,27 @@ func (mq *MembershipQuery) loadChildren(ctx context.Context, query *MembershipQu return nil } -func (mq *MembershipQuery) sqlCount(ctx context.Context) (int, error) { - _spec := mq.querySpec() - if len(mq.modifiers) > 0 { - _spec.Modifiers = mq.modifiers +func (_q *MembershipQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = mq.ctx.Fields - if len(mq.ctx.Fields) > 0 { - _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, mq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (mq *MembershipQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *MembershipQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(membership.Table, membership.Columns, sqlgraph.NewFieldSpec(membership.FieldID, field.TypeUUID)) - _spec.From = mq.sql - if unique := mq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if mq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := mq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, membership.FieldID) for i := range fields { @@ -704,24 +704,24 @@ func (mq *MembershipQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if mq.withParent != nil { + if _q.withParent != nil { _spec.Node.AddColumnOnce(membership.FieldParentID) } } - if ps := mq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := mq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := mq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := mq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -731,36 +731,36 @@ func (mq *MembershipQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (mq *MembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(mq.driver.Dialect()) +func (_q *MembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(membership.Table) - columns := mq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = membership.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if mq.sql != nil { - selector = mq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if mq.ctx.Unique != nil && *mq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range mq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range mq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range mq.order { + for _, p := range _q.order { p(selector) } - if offset := mq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := mq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -769,33 +769,33 @@ func (mq *MembershipQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (mq *MembershipQuery) ForUpdate(opts ...sql.LockOption) *MembershipQuery { - if mq.driver.Dialect() == dialect.Postgres { - mq.Unique(false) +func (_q *MembershipQuery) ForUpdate(opts ...sql.LockOption) *MembershipQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - mq.modifiers = append(mq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return mq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (mq *MembershipQuery) ForShare(opts ...sql.LockOption) *MembershipQuery { - if mq.driver.Dialect() == dialect.Postgres { - mq.Unique(false) +func (_q *MembershipQuery) ForShare(opts ...sql.LockOption) *MembershipQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - mq.modifiers = append(mq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return mq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (mq *MembershipQuery) Modify(modifiers ...func(s *sql.Selector)) *MembershipSelect { - mq.modifiers = append(mq.modifiers, modifiers...) - return mq.Select() +func (_q *MembershipQuery) Modify(modifiers ...func(s *sql.Selector)) *MembershipSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // MembershipGroupBy is the group-by builder for Membership entities. @@ -805,41 +805,41 @@ type MembershipGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (mgb *MembershipGroupBy) Aggregate(fns ...AggregateFunc) *MembershipGroupBy { - mgb.fns = append(mgb.fns, fns...) - return mgb +func (_g *MembershipGroupBy) Aggregate(fns ...AggregateFunc) *MembershipGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (mgb *MembershipGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) - if err := mgb.build.prepareQuery(ctx); err != nil { +func (_g *MembershipGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*MembershipQuery, *MembershipGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) + return scanWithInterceptors[*MembershipQuery, *MembershipGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (mgb *MembershipGroupBy) sqlScan(ctx context.Context, root *MembershipQuery, v any) error { +func (_g *MembershipGroupBy) sqlScan(ctx context.Context, root *MembershipQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(mgb.fns)) - for _, fn := range mgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) - for _, f := range *mgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*mgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -853,27 +853,27 @@ type MembershipSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ms *MembershipSelect) Aggregate(fns ...AggregateFunc) *MembershipSelect { - ms.fns = append(ms.fns, fns...) - return ms +func (_s *MembershipSelect) Aggregate(fns ...AggregateFunc) *MembershipSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ms *MembershipSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) - if err := ms.prepareQuery(ctx); err != nil { +func (_s *MembershipSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*MembershipQuery, *MembershipSelect](ctx, ms.MembershipQuery, ms, ms.inters, v) + return scanWithInterceptors[*MembershipQuery, *MembershipSelect](ctx, _s.MembershipQuery, _s, _s.inters, v) } -func (ms *MembershipSelect) sqlScan(ctx context.Context, root *MembershipQuery, v any) error { +func (_s *MembershipSelect) sqlScan(ctx context.Context, root *MembershipQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ms.fns)) - for _, fn := range ms.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ms.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -881,7 +881,7 @@ func (ms *MembershipSelect) sqlScan(ctx context.Context, root *MembershipQuery, } rows := &sql.Rows{} query, args := selector.Query() - if err := ms.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -889,7 +889,7 @@ func (ms *MembershipSelect) sqlScan(ctx context.Context, root *MembershipQuery, } // Modify adds a query modifier for attaching custom logic to queries. -func (ms *MembershipSelect) Modify(modifiers ...func(s *sql.Selector)) *MembershipSelect { - ms.modifiers = append(ms.modifiers, modifiers...) - return ms +func (_s *MembershipSelect) Modify(modifiers ...func(s *sql.Selector)) *MembershipSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/membership_update.go b/app/controlplane/pkg/data/ent/membership_update.go index 81a15ef58..102d7f382 100644 --- a/app/controlplane/pkg/data/ent/membership_update.go +++ b/app/controlplane/pkg/data/ent/membership_update.go @@ -28,263 +28,263 @@ type MembershipUpdate struct { } // Where appends a list predicates to the MembershipUpdate builder. -func (mu *MembershipUpdate) Where(ps ...predicate.Membership) *MembershipUpdate { - mu.mutation.Where(ps...) - return mu +func (_u *MembershipUpdate) Where(ps ...predicate.Membership) *MembershipUpdate { + _u.mutation.Where(ps...) + return _u } // SetCurrent sets the "current" field. -func (mu *MembershipUpdate) SetCurrent(b bool) *MembershipUpdate { - mu.mutation.SetCurrent(b) - return mu +func (_u *MembershipUpdate) SetCurrent(v bool) *MembershipUpdate { + _u.mutation.SetCurrent(v) + return _u } // SetNillableCurrent sets the "current" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableCurrent(b *bool) *MembershipUpdate { - if b != nil { - mu.SetCurrent(*b) +func (_u *MembershipUpdate) SetNillableCurrent(v *bool) *MembershipUpdate { + if v != nil { + _u.SetCurrent(*v) } - return mu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (mu *MembershipUpdate) SetUpdatedAt(t time.Time) *MembershipUpdate { - mu.mutation.SetUpdatedAt(t) - return mu +func (_u *MembershipUpdate) SetUpdatedAt(v time.Time) *MembershipUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableUpdatedAt(t *time.Time) *MembershipUpdate { - if t != nil { - mu.SetUpdatedAt(*t) +func (_u *MembershipUpdate) SetNillableUpdatedAt(v *time.Time) *MembershipUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return mu + return _u } // SetRole sets the "role" field. -func (mu *MembershipUpdate) SetRole(a authz.Role) *MembershipUpdate { - mu.mutation.SetRole(a) - return mu +func (_u *MembershipUpdate) SetRole(v authz.Role) *MembershipUpdate { + _u.mutation.SetRole(v) + return _u } // SetNillableRole sets the "role" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableRole(a *authz.Role) *MembershipUpdate { - if a != nil { - mu.SetRole(*a) +func (_u *MembershipUpdate) SetNillableRole(v *authz.Role) *MembershipUpdate { + if v != nil { + _u.SetRole(*v) } - return mu + return _u } // SetMembershipType sets the "membership_type" field. -func (mu *MembershipUpdate) SetMembershipType(at authz.MembershipType) *MembershipUpdate { - mu.mutation.SetMembershipType(at) - return mu +func (_u *MembershipUpdate) SetMembershipType(v authz.MembershipType) *MembershipUpdate { + _u.mutation.SetMembershipType(v) + return _u } // SetNillableMembershipType sets the "membership_type" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableMembershipType(at *authz.MembershipType) *MembershipUpdate { - if at != nil { - mu.SetMembershipType(*at) +func (_u *MembershipUpdate) SetNillableMembershipType(v *authz.MembershipType) *MembershipUpdate { + if v != nil { + _u.SetMembershipType(*v) } - return mu + return _u } // ClearMembershipType clears the value of the "membership_type" field. -func (mu *MembershipUpdate) ClearMembershipType() *MembershipUpdate { - mu.mutation.ClearMembershipType() - return mu +func (_u *MembershipUpdate) ClearMembershipType() *MembershipUpdate { + _u.mutation.ClearMembershipType() + return _u } // SetMemberID sets the "member_id" field. -func (mu *MembershipUpdate) SetMemberID(u uuid.UUID) *MembershipUpdate { - mu.mutation.SetMemberID(u) - return mu +func (_u *MembershipUpdate) SetMemberID(v uuid.UUID) *MembershipUpdate { + _u.mutation.SetMemberID(v) + return _u } // SetNillableMemberID sets the "member_id" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableMemberID(u *uuid.UUID) *MembershipUpdate { - if u != nil { - mu.SetMemberID(*u) +func (_u *MembershipUpdate) SetNillableMemberID(v *uuid.UUID) *MembershipUpdate { + if v != nil { + _u.SetMemberID(*v) } - return mu + return _u } // ClearMemberID clears the value of the "member_id" field. -func (mu *MembershipUpdate) ClearMemberID() *MembershipUpdate { - mu.mutation.ClearMemberID() - return mu +func (_u *MembershipUpdate) ClearMemberID() *MembershipUpdate { + _u.mutation.ClearMemberID() + return _u } // SetResourceType sets the "resource_type" field. -func (mu *MembershipUpdate) SetResourceType(at authz.ResourceType) *MembershipUpdate { - mu.mutation.SetResourceType(at) - return mu +func (_u *MembershipUpdate) SetResourceType(v authz.ResourceType) *MembershipUpdate { + _u.mutation.SetResourceType(v) + return _u } // SetNillableResourceType sets the "resource_type" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableResourceType(at *authz.ResourceType) *MembershipUpdate { - if at != nil { - mu.SetResourceType(*at) +func (_u *MembershipUpdate) SetNillableResourceType(v *authz.ResourceType) *MembershipUpdate { + if v != nil { + _u.SetResourceType(*v) } - return mu + return _u } // ClearResourceType clears the value of the "resource_type" field. -func (mu *MembershipUpdate) ClearResourceType() *MembershipUpdate { - mu.mutation.ClearResourceType() - return mu +func (_u *MembershipUpdate) ClearResourceType() *MembershipUpdate { + _u.mutation.ClearResourceType() + return _u } // SetResourceID sets the "resource_id" field. -func (mu *MembershipUpdate) SetResourceID(u uuid.UUID) *MembershipUpdate { - mu.mutation.SetResourceID(u) - return mu +func (_u *MembershipUpdate) SetResourceID(v uuid.UUID) *MembershipUpdate { + _u.mutation.SetResourceID(v) + return _u } // SetNillableResourceID sets the "resource_id" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableResourceID(u *uuid.UUID) *MembershipUpdate { - if u != nil { - mu.SetResourceID(*u) +func (_u *MembershipUpdate) SetNillableResourceID(v *uuid.UUID) *MembershipUpdate { + if v != nil { + _u.SetResourceID(*v) } - return mu + return _u } // ClearResourceID clears the value of the "resource_id" field. -func (mu *MembershipUpdate) ClearResourceID() *MembershipUpdate { - mu.mutation.ClearResourceID() - return mu +func (_u *MembershipUpdate) ClearResourceID() *MembershipUpdate { + _u.mutation.ClearResourceID() + return _u } // SetParentID sets the "parent_id" field. -func (mu *MembershipUpdate) SetParentID(u uuid.UUID) *MembershipUpdate { - mu.mutation.SetParentID(u) - return mu +func (_u *MembershipUpdate) SetParentID(v uuid.UUID) *MembershipUpdate { + _u.mutation.SetParentID(v) + return _u } // SetNillableParentID sets the "parent_id" field if the given value is not nil. -func (mu *MembershipUpdate) SetNillableParentID(u *uuid.UUID) *MembershipUpdate { - if u != nil { - mu.SetParentID(*u) +func (_u *MembershipUpdate) SetNillableParentID(v *uuid.UUID) *MembershipUpdate { + if v != nil { + _u.SetParentID(*v) } - return mu + return _u } // ClearParentID clears the value of the "parent_id" field. -func (mu *MembershipUpdate) ClearParentID() *MembershipUpdate { - mu.mutation.ClearParentID() - return mu +func (_u *MembershipUpdate) ClearParentID() *MembershipUpdate { + _u.mutation.ClearParentID() + return _u } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (mu *MembershipUpdate) SetOrganizationID(id uuid.UUID) *MembershipUpdate { - mu.mutation.SetOrganizationID(id) - return mu +func (_u *MembershipUpdate) SetOrganizationID(id uuid.UUID) *MembershipUpdate { + _u.mutation.SetOrganizationID(id) + return _u } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (mu *MembershipUpdate) SetNillableOrganizationID(id *uuid.UUID) *MembershipUpdate { +func (_u *MembershipUpdate) SetNillableOrganizationID(id *uuid.UUID) *MembershipUpdate { if id != nil { - mu = mu.SetOrganizationID(*id) + _u = _u.SetOrganizationID(*id) } - return mu + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (mu *MembershipUpdate) SetOrganization(o *Organization) *MembershipUpdate { - return mu.SetOrganizationID(o.ID) +func (_u *MembershipUpdate) SetOrganization(v *Organization) *MembershipUpdate { + return _u.SetOrganizationID(v.ID) } // SetUserID sets the "user" edge to the User entity by ID. -func (mu *MembershipUpdate) SetUserID(id uuid.UUID) *MembershipUpdate { - mu.mutation.SetUserID(id) - return mu +func (_u *MembershipUpdate) SetUserID(id uuid.UUID) *MembershipUpdate { + _u.mutation.SetUserID(id) + return _u } // SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. -func (mu *MembershipUpdate) SetNillableUserID(id *uuid.UUID) *MembershipUpdate { +func (_u *MembershipUpdate) SetNillableUserID(id *uuid.UUID) *MembershipUpdate { if id != nil { - mu = mu.SetUserID(*id) + _u = _u.SetUserID(*id) } - return mu + return _u } // SetUser sets the "user" edge to the User entity. -func (mu *MembershipUpdate) SetUser(u *User) *MembershipUpdate { - return mu.SetUserID(u.ID) +func (_u *MembershipUpdate) SetUser(v *User) *MembershipUpdate { + return _u.SetUserID(v.ID) } // SetParent sets the "parent" edge to the Membership entity. -func (mu *MembershipUpdate) SetParent(m *Membership) *MembershipUpdate { - return mu.SetParentID(m.ID) +func (_u *MembershipUpdate) SetParent(v *Membership) *MembershipUpdate { + return _u.SetParentID(v.ID) } // AddChildIDs adds the "children" edge to the Membership entity by IDs. -func (mu *MembershipUpdate) AddChildIDs(ids ...uuid.UUID) *MembershipUpdate { - mu.mutation.AddChildIDs(ids...) - return mu +func (_u *MembershipUpdate) AddChildIDs(ids ...uuid.UUID) *MembershipUpdate { + _u.mutation.AddChildIDs(ids...) + return _u } // AddChildren adds the "children" edges to the Membership entity. -func (mu *MembershipUpdate) AddChildren(m ...*Membership) *MembershipUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *MembershipUpdate) AddChildren(v ...*Membership) *MembershipUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return mu.AddChildIDs(ids...) + return _u.AddChildIDs(ids...) } // Mutation returns the MembershipMutation object of the builder. -func (mu *MembershipUpdate) Mutation() *MembershipMutation { - return mu.mutation +func (_u *MembershipUpdate) Mutation() *MembershipMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (mu *MembershipUpdate) ClearOrganization() *MembershipUpdate { - mu.mutation.ClearOrganization() - return mu +func (_u *MembershipUpdate) ClearOrganization() *MembershipUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearUser clears the "user" edge to the User entity. -func (mu *MembershipUpdate) ClearUser() *MembershipUpdate { - mu.mutation.ClearUser() - return mu +func (_u *MembershipUpdate) ClearUser() *MembershipUpdate { + _u.mutation.ClearUser() + return _u } // ClearParent clears the "parent" edge to the Membership entity. -func (mu *MembershipUpdate) ClearParent() *MembershipUpdate { - mu.mutation.ClearParent() - return mu +func (_u *MembershipUpdate) ClearParent() *MembershipUpdate { + _u.mutation.ClearParent() + return _u } // ClearChildren clears all "children" edges to the Membership entity. -func (mu *MembershipUpdate) ClearChildren() *MembershipUpdate { - mu.mutation.ClearChildren() - return mu +func (_u *MembershipUpdate) ClearChildren() *MembershipUpdate { + _u.mutation.ClearChildren() + return _u } // RemoveChildIDs removes the "children" edge to Membership entities by IDs. -func (mu *MembershipUpdate) RemoveChildIDs(ids ...uuid.UUID) *MembershipUpdate { - mu.mutation.RemoveChildIDs(ids...) - return mu +func (_u *MembershipUpdate) RemoveChildIDs(ids ...uuid.UUID) *MembershipUpdate { + _u.mutation.RemoveChildIDs(ids...) + return _u } // RemoveChildren removes "children" edges to Membership entities. -func (mu *MembershipUpdate) RemoveChildren(m ...*Membership) *MembershipUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *MembershipUpdate) RemoveChildren(v ...*Membership) *MembershipUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return mu.RemoveChildIDs(ids...) + return _u.RemoveChildIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (mu *MembershipUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +func (_u *MembershipUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (mu *MembershipUpdate) SaveX(ctx context.Context) int { - affected, err := mu.Save(ctx) +func (_u *MembershipUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -292,31 +292,31 @@ func (mu *MembershipUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (mu *MembershipUpdate) Exec(ctx context.Context) error { - _, err := mu.Save(ctx) +func (_u *MembershipUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (mu *MembershipUpdate) ExecX(ctx context.Context) { - if err := mu.Exec(ctx); err != nil { +func (_u *MembershipUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (mu *MembershipUpdate) check() error { - if v, ok := mu.mutation.Role(); ok { +func (_u *MembershipUpdate) check() error { + if v, ok := _u.mutation.Role(); ok { if err := membership.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "Membership.role": %w`, err)} } } - if v, ok := mu.mutation.MembershipType(); ok { + if v, ok := _u.mutation.MembershipType(); ok { if err := membership.MembershipTypeValidator(v); err != nil { return &ValidationError{Name: "membership_type", err: fmt.Errorf(`ent: validator failed for field "Membership.membership_type": %w`, err)} } } - if v, ok := mu.mutation.ResourceType(); ok { + if v, ok := _u.mutation.ResourceType(); ok { if err := membership.ResourceTypeValidator(v); err != nil { return &ValidationError{Name: "resource_type", err: fmt.Errorf(`ent: validator failed for field "Membership.resource_type": %w`, err)} } @@ -325,57 +325,57 @@ func (mu *MembershipUpdate) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (mu *MembershipUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *MembershipUpdate { - mu.modifiers = append(mu.modifiers, modifiers...) - return mu +func (_u *MembershipUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *MembershipUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := mu.check(); err != nil { - return n, err +func (_u *MembershipUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(membership.Table, membership.Columns, sqlgraph.NewFieldSpec(membership.FieldID, field.TypeUUID)) - if ps := mu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := mu.mutation.Current(); ok { + if value, ok := _u.mutation.Current(); ok { _spec.SetField(membership.FieldCurrent, field.TypeBool, value) } - if value, ok := mu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(membership.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := mu.mutation.Role(); ok { + if value, ok := _u.mutation.Role(); ok { _spec.SetField(membership.FieldRole, field.TypeEnum, value) } - if value, ok := mu.mutation.MembershipType(); ok { + if value, ok := _u.mutation.MembershipType(); ok { _spec.SetField(membership.FieldMembershipType, field.TypeEnum, value) } - if mu.mutation.MembershipTypeCleared() { + if _u.mutation.MembershipTypeCleared() { _spec.ClearField(membership.FieldMembershipType, field.TypeEnum) } - if value, ok := mu.mutation.MemberID(); ok { + if value, ok := _u.mutation.MemberID(); ok { _spec.SetField(membership.FieldMemberID, field.TypeUUID, value) } - if mu.mutation.MemberIDCleared() { + if _u.mutation.MemberIDCleared() { _spec.ClearField(membership.FieldMemberID, field.TypeUUID) } - if value, ok := mu.mutation.ResourceType(); ok { + if value, ok := _u.mutation.ResourceType(); ok { _spec.SetField(membership.FieldResourceType, field.TypeEnum, value) } - if mu.mutation.ResourceTypeCleared() { + if _u.mutation.ResourceTypeCleared() { _spec.ClearField(membership.FieldResourceType, field.TypeEnum) } - if value, ok := mu.mutation.ResourceID(); ok { + if value, ok := _u.mutation.ResourceID(); ok { _spec.SetField(membership.FieldResourceID, field.TypeUUID, value) } - if mu.mutation.ResourceIDCleared() { + if _u.mutation.ResourceIDCleared() { _spec.ClearField(membership.FieldResourceID, field.TypeUUID) } - if mu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -388,7 +388,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := mu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -404,7 +404,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if mu.mutation.UserCleared() { + if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -417,7 +417,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := mu.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -433,7 +433,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if mu.mutation.ParentCleared() { + if _u.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -446,7 +446,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := mu.mutation.ParentIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ParentIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -462,7 +462,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if mu.mutation.ChildrenCleared() { + if _u.mutation.ChildrenCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -475,7 +475,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := mu.mutation.RemovedChildrenIDs(); len(nodes) > 0 && !mu.mutation.ChildrenCleared() { + if nodes := _u.mutation.RemovedChildrenIDs(); len(nodes) > 0 && !_u.mutation.ChildrenCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -491,7 +491,7 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := mu.mutation.ChildrenIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ChildrenIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -507,8 +507,8 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(mu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{membership.Label} } else if sqlgraph.IsConstraintError(err) { @@ -516,8 +516,8 @@ func (mu *MembershipUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - mu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // MembershipUpdateOne is the builder for updating a single Membership entity. @@ -530,270 +530,270 @@ type MembershipUpdateOne struct { } // SetCurrent sets the "current" field. -func (muo *MembershipUpdateOne) SetCurrent(b bool) *MembershipUpdateOne { - muo.mutation.SetCurrent(b) - return muo +func (_u *MembershipUpdateOne) SetCurrent(v bool) *MembershipUpdateOne { + _u.mutation.SetCurrent(v) + return _u } // SetNillableCurrent sets the "current" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableCurrent(b *bool) *MembershipUpdateOne { - if b != nil { - muo.SetCurrent(*b) +func (_u *MembershipUpdateOne) SetNillableCurrent(v *bool) *MembershipUpdateOne { + if v != nil { + _u.SetCurrent(*v) } - return muo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (muo *MembershipUpdateOne) SetUpdatedAt(t time.Time) *MembershipUpdateOne { - muo.mutation.SetUpdatedAt(t) - return muo +func (_u *MembershipUpdateOne) SetUpdatedAt(v time.Time) *MembershipUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableUpdatedAt(t *time.Time) *MembershipUpdateOne { - if t != nil { - muo.SetUpdatedAt(*t) +func (_u *MembershipUpdateOne) SetNillableUpdatedAt(v *time.Time) *MembershipUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return muo + return _u } // SetRole sets the "role" field. -func (muo *MembershipUpdateOne) SetRole(a authz.Role) *MembershipUpdateOne { - muo.mutation.SetRole(a) - return muo +func (_u *MembershipUpdateOne) SetRole(v authz.Role) *MembershipUpdateOne { + _u.mutation.SetRole(v) + return _u } // SetNillableRole sets the "role" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableRole(a *authz.Role) *MembershipUpdateOne { - if a != nil { - muo.SetRole(*a) +func (_u *MembershipUpdateOne) SetNillableRole(v *authz.Role) *MembershipUpdateOne { + if v != nil { + _u.SetRole(*v) } - return muo + return _u } // SetMembershipType sets the "membership_type" field. -func (muo *MembershipUpdateOne) SetMembershipType(at authz.MembershipType) *MembershipUpdateOne { - muo.mutation.SetMembershipType(at) - return muo +func (_u *MembershipUpdateOne) SetMembershipType(v authz.MembershipType) *MembershipUpdateOne { + _u.mutation.SetMembershipType(v) + return _u } // SetNillableMembershipType sets the "membership_type" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableMembershipType(at *authz.MembershipType) *MembershipUpdateOne { - if at != nil { - muo.SetMembershipType(*at) +func (_u *MembershipUpdateOne) SetNillableMembershipType(v *authz.MembershipType) *MembershipUpdateOne { + if v != nil { + _u.SetMembershipType(*v) } - return muo + return _u } // ClearMembershipType clears the value of the "membership_type" field. -func (muo *MembershipUpdateOne) ClearMembershipType() *MembershipUpdateOne { - muo.mutation.ClearMembershipType() - return muo +func (_u *MembershipUpdateOne) ClearMembershipType() *MembershipUpdateOne { + _u.mutation.ClearMembershipType() + return _u } // SetMemberID sets the "member_id" field. -func (muo *MembershipUpdateOne) SetMemberID(u uuid.UUID) *MembershipUpdateOne { - muo.mutation.SetMemberID(u) - return muo +func (_u *MembershipUpdateOne) SetMemberID(v uuid.UUID) *MembershipUpdateOne { + _u.mutation.SetMemberID(v) + return _u } // SetNillableMemberID sets the "member_id" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableMemberID(u *uuid.UUID) *MembershipUpdateOne { - if u != nil { - muo.SetMemberID(*u) +func (_u *MembershipUpdateOne) SetNillableMemberID(v *uuid.UUID) *MembershipUpdateOne { + if v != nil { + _u.SetMemberID(*v) } - return muo + return _u } // ClearMemberID clears the value of the "member_id" field. -func (muo *MembershipUpdateOne) ClearMemberID() *MembershipUpdateOne { - muo.mutation.ClearMemberID() - return muo +func (_u *MembershipUpdateOne) ClearMemberID() *MembershipUpdateOne { + _u.mutation.ClearMemberID() + return _u } // SetResourceType sets the "resource_type" field. -func (muo *MembershipUpdateOne) SetResourceType(at authz.ResourceType) *MembershipUpdateOne { - muo.mutation.SetResourceType(at) - return muo +func (_u *MembershipUpdateOne) SetResourceType(v authz.ResourceType) *MembershipUpdateOne { + _u.mutation.SetResourceType(v) + return _u } // SetNillableResourceType sets the "resource_type" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableResourceType(at *authz.ResourceType) *MembershipUpdateOne { - if at != nil { - muo.SetResourceType(*at) +func (_u *MembershipUpdateOne) SetNillableResourceType(v *authz.ResourceType) *MembershipUpdateOne { + if v != nil { + _u.SetResourceType(*v) } - return muo + return _u } // ClearResourceType clears the value of the "resource_type" field. -func (muo *MembershipUpdateOne) ClearResourceType() *MembershipUpdateOne { - muo.mutation.ClearResourceType() - return muo +func (_u *MembershipUpdateOne) ClearResourceType() *MembershipUpdateOne { + _u.mutation.ClearResourceType() + return _u } // SetResourceID sets the "resource_id" field. -func (muo *MembershipUpdateOne) SetResourceID(u uuid.UUID) *MembershipUpdateOne { - muo.mutation.SetResourceID(u) - return muo +func (_u *MembershipUpdateOne) SetResourceID(v uuid.UUID) *MembershipUpdateOne { + _u.mutation.SetResourceID(v) + return _u } // SetNillableResourceID sets the "resource_id" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableResourceID(u *uuid.UUID) *MembershipUpdateOne { - if u != nil { - muo.SetResourceID(*u) +func (_u *MembershipUpdateOne) SetNillableResourceID(v *uuid.UUID) *MembershipUpdateOne { + if v != nil { + _u.SetResourceID(*v) } - return muo + return _u } // ClearResourceID clears the value of the "resource_id" field. -func (muo *MembershipUpdateOne) ClearResourceID() *MembershipUpdateOne { - muo.mutation.ClearResourceID() - return muo +func (_u *MembershipUpdateOne) ClearResourceID() *MembershipUpdateOne { + _u.mutation.ClearResourceID() + return _u } // SetParentID sets the "parent_id" field. -func (muo *MembershipUpdateOne) SetParentID(u uuid.UUID) *MembershipUpdateOne { - muo.mutation.SetParentID(u) - return muo +func (_u *MembershipUpdateOne) SetParentID(v uuid.UUID) *MembershipUpdateOne { + _u.mutation.SetParentID(v) + return _u } // SetNillableParentID sets the "parent_id" field if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableParentID(u *uuid.UUID) *MembershipUpdateOne { - if u != nil { - muo.SetParentID(*u) +func (_u *MembershipUpdateOne) SetNillableParentID(v *uuid.UUID) *MembershipUpdateOne { + if v != nil { + _u.SetParentID(*v) } - return muo + return _u } // ClearParentID clears the value of the "parent_id" field. -func (muo *MembershipUpdateOne) ClearParentID() *MembershipUpdateOne { - muo.mutation.ClearParentID() - return muo +func (_u *MembershipUpdateOne) ClearParentID() *MembershipUpdateOne { + _u.mutation.ClearParentID() + return _u } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (muo *MembershipUpdateOne) SetOrganizationID(id uuid.UUID) *MembershipUpdateOne { - muo.mutation.SetOrganizationID(id) - return muo +func (_u *MembershipUpdateOne) SetOrganizationID(id uuid.UUID) *MembershipUpdateOne { + _u.mutation.SetOrganizationID(id) + return _u } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableOrganizationID(id *uuid.UUID) *MembershipUpdateOne { +func (_u *MembershipUpdateOne) SetNillableOrganizationID(id *uuid.UUID) *MembershipUpdateOne { if id != nil { - muo = muo.SetOrganizationID(*id) + _u = _u.SetOrganizationID(*id) } - return muo + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (muo *MembershipUpdateOne) SetOrganization(o *Organization) *MembershipUpdateOne { - return muo.SetOrganizationID(o.ID) +func (_u *MembershipUpdateOne) SetOrganization(v *Organization) *MembershipUpdateOne { + return _u.SetOrganizationID(v.ID) } // SetUserID sets the "user" edge to the User entity by ID. -func (muo *MembershipUpdateOne) SetUserID(id uuid.UUID) *MembershipUpdateOne { - muo.mutation.SetUserID(id) - return muo +func (_u *MembershipUpdateOne) SetUserID(id uuid.UUID) *MembershipUpdateOne { + _u.mutation.SetUserID(id) + return _u } // SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. -func (muo *MembershipUpdateOne) SetNillableUserID(id *uuid.UUID) *MembershipUpdateOne { +func (_u *MembershipUpdateOne) SetNillableUserID(id *uuid.UUID) *MembershipUpdateOne { if id != nil { - muo = muo.SetUserID(*id) + _u = _u.SetUserID(*id) } - return muo + return _u } // SetUser sets the "user" edge to the User entity. -func (muo *MembershipUpdateOne) SetUser(u *User) *MembershipUpdateOne { - return muo.SetUserID(u.ID) +func (_u *MembershipUpdateOne) SetUser(v *User) *MembershipUpdateOne { + return _u.SetUserID(v.ID) } // SetParent sets the "parent" edge to the Membership entity. -func (muo *MembershipUpdateOne) SetParent(m *Membership) *MembershipUpdateOne { - return muo.SetParentID(m.ID) +func (_u *MembershipUpdateOne) SetParent(v *Membership) *MembershipUpdateOne { + return _u.SetParentID(v.ID) } // AddChildIDs adds the "children" edge to the Membership entity by IDs. -func (muo *MembershipUpdateOne) AddChildIDs(ids ...uuid.UUID) *MembershipUpdateOne { - muo.mutation.AddChildIDs(ids...) - return muo +func (_u *MembershipUpdateOne) AddChildIDs(ids ...uuid.UUID) *MembershipUpdateOne { + _u.mutation.AddChildIDs(ids...) + return _u } // AddChildren adds the "children" edges to the Membership entity. -func (muo *MembershipUpdateOne) AddChildren(m ...*Membership) *MembershipUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *MembershipUpdateOne) AddChildren(v ...*Membership) *MembershipUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return muo.AddChildIDs(ids...) + return _u.AddChildIDs(ids...) } // Mutation returns the MembershipMutation object of the builder. -func (muo *MembershipUpdateOne) Mutation() *MembershipMutation { - return muo.mutation +func (_u *MembershipUpdateOne) Mutation() *MembershipMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (muo *MembershipUpdateOne) ClearOrganization() *MembershipUpdateOne { - muo.mutation.ClearOrganization() - return muo +func (_u *MembershipUpdateOne) ClearOrganization() *MembershipUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearUser clears the "user" edge to the User entity. -func (muo *MembershipUpdateOne) ClearUser() *MembershipUpdateOne { - muo.mutation.ClearUser() - return muo +func (_u *MembershipUpdateOne) ClearUser() *MembershipUpdateOne { + _u.mutation.ClearUser() + return _u } // ClearParent clears the "parent" edge to the Membership entity. -func (muo *MembershipUpdateOne) ClearParent() *MembershipUpdateOne { - muo.mutation.ClearParent() - return muo +func (_u *MembershipUpdateOne) ClearParent() *MembershipUpdateOne { + _u.mutation.ClearParent() + return _u } // ClearChildren clears all "children" edges to the Membership entity. -func (muo *MembershipUpdateOne) ClearChildren() *MembershipUpdateOne { - muo.mutation.ClearChildren() - return muo +func (_u *MembershipUpdateOne) ClearChildren() *MembershipUpdateOne { + _u.mutation.ClearChildren() + return _u } // RemoveChildIDs removes the "children" edge to Membership entities by IDs. -func (muo *MembershipUpdateOne) RemoveChildIDs(ids ...uuid.UUID) *MembershipUpdateOne { - muo.mutation.RemoveChildIDs(ids...) - return muo +func (_u *MembershipUpdateOne) RemoveChildIDs(ids ...uuid.UUID) *MembershipUpdateOne { + _u.mutation.RemoveChildIDs(ids...) + return _u } // RemoveChildren removes "children" edges to Membership entities. -func (muo *MembershipUpdateOne) RemoveChildren(m ...*Membership) *MembershipUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *MembershipUpdateOne) RemoveChildren(v ...*Membership) *MembershipUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return muo.RemoveChildIDs(ids...) + return _u.RemoveChildIDs(ids...) } // Where appends a list predicates to the MembershipUpdate builder. -func (muo *MembershipUpdateOne) Where(ps ...predicate.Membership) *MembershipUpdateOne { - muo.mutation.Where(ps...) - return muo +func (_u *MembershipUpdateOne) Where(ps ...predicate.Membership) *MembershipUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (muo *MembershipUpdateOne) Select(field string, fields ...string) *MembershipUpdateOne { - muo.fields = append([]string{field}, fields...) - return muo +func (_u *MembershipUpdateOne) Select(field string, fields ...string) *MembershipUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Membership entity. -func (muo *MembershipUpdateOne) Save(ctx context.Context) (*Membership, error) { - return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +func (_u *MembershipUpdateOne) Save(ctx context.Context) (*Membership, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (muo *MembershipUpdateOne) SaveX(ctx context.Context) *Membership { - node, err := muo.Save(ctx) +func (_u *MembershipUpdateOne) SaveX(ctx context.Context) *Membership { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -801,31 +801,31 @@ func (muo *MembershipUpdateOne) SaveX(ctx context.Context) *Membership { } // Exec executes the query on the entity. -func (muo *MembershipUpdateOne) Exec(ctx context.Context) error { - _, err := muo.Save(ctx) +func (_u *MembershipUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (muo *MembershipUpdateOne) ExecX(ctx context.Context) { - if err := muo.Exec(ctx); err != nil { +func (_u *MembershipUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (muo *MembershipUpdateOne) check() error { - if v, ok := muo.mutation.Role(); ok { +func (_u *MembershipUpdateOne) check() error { + if v, ok := _u.mutation.Role(); ok { if err := membership.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "Membership.role": %w`, err)} } } - if v, ok := muo.mutation.MembershipType(); ok { + if v, ok := _u.mutation.MembershipType(); ok { if err := membership.MembershipTypeValidator(v); err != nil { return &ValidationError{Name: "membership_type", err: fmt.Errorf(`ent: validator failed for field "Membership.membership_type": %w`, err)} } } - if v, ok := muo.mutation.ResourceType(); ok { + if v, ok := _u.mutation.ResourceType(); ok { if err := membership.ResourceTypeValidator(v); err != nil { return &ValidationError{Name: "resource_type", err: fmt.Errorf(`ent: validator failed for field "Membership.resource_type": %w`, err)} } @@ -834,22 +834,22 @@ func (muo *MembershipUpdateOne) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (muo *MembershipUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *MembershipUpdateOne { - muo.modifiers = append(muo.modifiers, modifiers...) - return muo +func (_u *MembershipUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *MembershipUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, err error) { - if err := muo.check(); err != nil { +func (_u *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(membership.Table, membership.Columns, sqlgraph.NewFieldSpec(membership.FieldID, field.TypeUUID)) - id, ok := muo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Membership.id" for update`)} } _spec.Node.ID.Value = id - if fields := muo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, membership.FieldID) for _, f := range fields { @@ -861,47 +861,47 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } } } - if ps := muo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := muo.mutation.Current(); ok { + if value, ok := _u.mutation.Current(); ok { _spec.SetField(membership.FieldCurrent, field.TypeBool, value) } - if value, ok := muo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(membership.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := muo.mutation.Role(); ok { + if value, ok := _u.mutation.Role(); ok { _spec.SetField(membership.FieldRole, field.TypeEnum, value) } - if value, ok := muo.mutation.MembershipType(); ok { + if value, ok := _u.mutation.MembershipType(); ok { _spec.SetField(membership.FieldMembershipType, field.TypeEnum, value) } - if muo.mutation.MembershipTypeCleared() { + if _u.mutation.MembershipTypeCleared() { _spec.ClearField(membership.FieldMembershipType, field.TypeEnum) } - if value, ok := muo.mutation.MemberID(); ok { + if value, ok := _u.mutation.MemberID(); ok { _spec.SetField(membership.FieldMemberID, field.TypeUUID, value) } - if muo.mutation.MemberIDCleared() { + if _u.mutation.MemberIDCleared() { _spec.ClearField(membership.FieldMemberID, field.TypeUUID) } - if value, ok := muo.mutation.ResourceType(); ok { + if value, ok := _u.mutation.ResourceType(); ok { _spec.SetField(membership.FieldResourceType, field.TypeEnum, value) } - if muo.mutation.ResourceTypeCleared() { + if _u.mutation.ResourceTypeCleared() { _spec.ClearField(membership.FieldResourceType, field.TypeEnum) } - if value, ok := muo.mutation.ResourceID(); ok { + if value, ok := _u.mutation.ResourceID(); ok { _spec.SetField(membership.FieldResourceID, field.TypeUUID, value) } - if muo.mutation.ResourceIDCleared() { + if _u.mutation.ResourceIDCleared() { _spec.ClearField(membership.FieldResourceID, field.TypeUUID) } - if muo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -914,7 +914,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := muo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -930,7 +930,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if muo.mutation.UserCleared() { + if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -943,7 +943,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := muo.mutation.UserIDs(); len(nodes) > 0 { + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -959,7 +959,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if muo.mutation.ParentCleared() { + if _u.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -972,7 +972,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := muo.mutation.ParentIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ParentIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -988,7 +988,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if muo.mutation.ChildrenCleared() { + if _u.mutation.ChildrenCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1001,7 +1001,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := muo.mutation.RemovedChildrenIDs(); len(nodes) > 0 && !muo.mutation.ChildrenCleared() { + if nodes := _u.mutation.RemovedChildrenIDs(); len(nodes) > 0 && !_u.mutation.ChildrenCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1017,7 +1017,7 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := muo.mutation.ChildrenIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ChildrenIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1033,11 +1033,11 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(muo.modifiers...) - _node = &Membership{config: muo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Membership{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{membership.Label} } else if sqlgraph.IsConstraintError(err) { @@ -1045,6 +1045,6 @@ func (muo *MembershipUpdateOne) sqlSave(ctx context.Context) (_node *Membership, } return nil, err } - muo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/migrate/migrations/20251217164302.sql b/app/controlplane/pkg/data/ent/migrate/migrations/20251217164302.sql new file mode 100644 index 000000000..ce4063467 --- /dev/null +++ b/app/controlplane/pkg/data/ent/migrate/migrations/20251217164302.sql @@ -0,0 +1,2 @@ +-- Rename an index from "membership_membership_type_mem_69a8fe555e26fd9532f5e3fe38ba2651" to "membership_membership_type_member_id_resource_type_resource_id_" +ALTER INDEX "membership_membership_type_mem_69a8fe555e26fd9532f5e3fe38ba2651" RENAME TO "membership_membership_type_member_id_resource_type_resource_id_"; diff --git a/app/controlplane/pkg/data/ent/migrate/migrations/atlas.sum b/app/controlplane/pkg/data/ent/migrate/migrations/atlas.sum index 77f8d42e6..aad719a8f 100644 --- a/app/controlplane/pkg/data/ent/migrate/migrations/atlas.sum +++ b/app/controlplane/pkg/data/ent/migrate/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:QYIHH5zsR/0t1wr7+F9jSRgXWFVAaYgSGomoG82EARI= +h1:F5OlLQoOXh5aKu7gY5y8xBRBqjAxsW729schjakKjJk= 20230706165452_init-schema.sql h1:VvqbNFEQnCvUVyj2iDYVQQxDM0+sSXqocpt/5H64k8M= 20230710111950-cas-backend.sql h1:A8iBuSzZIEbdsv9ipBtscZQuaBp3V5/VMw7eZH6GX+g= 20230712094107-cas-backends-workflow-runs.sql h1:a5rzxpVGyd56nLRSsKrmCFc9sebg65RWzLghKHh5xvI= @@ -121,3 +121,4 @@ h1:QYIHH5zsR/0t1wr7+F9jSRgXWFVAaYgSGomoG82EARI= 20251111162946.sql h1:oNke93PdAreUH6F5AD5FE6uI6riJpE4KPb77LXTeuRU= 20251114174059.sql h1:f/wB/OlhZxIc9AVCxTNu4dFmPd1T3sCY0nS8Zb9ZS9Q= 20251212115308.sql h1:CmwHDA9X91++2dnThzk57++5sBDAGw2IQnHzO3/bRlk= +20251217164302.sql h1:OL3OCqWsMtv06RfIlQNcdLMbt4Tz91Lijpbkxqwt7zM= diff --git a/app/controlplane/pkg/data/ent/organization.go b/app/controlplane/pkg/data/ent/organization.go index 5a2b9fb55..c9d7d2715 100644 --- a/app/controlplane/pkg/data/ent/organization.go +++ b/app/controlplane/pkg/data/ent/organization.go @@ -160,7 +160,7 @@ func (*Organization) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Organization fields. -func (o *Organization) assignValues(columns []string, values []any) error { +func (_m *Organization) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -170,43 +170,43 @@ func (o *Organization) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - o.ID = *value + _m.ID = *value } case organization.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - o.Name = value.String + _m.Name = value.String } case organization.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - o.CreatedAt = value.Time + _m.CreatedAt = value.Time } case organization.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - o.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case organization.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - o.DeletedAt = value.Time + _m.DeletedAt = value.Time } case organization.FieldBlockOnPolicyViolation: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field block_on_policy_violation", values[i]) } else if value.Valid { - o.BlockOnPolicyViolation = value.Bool + _m.BlockOnPolicyViolation = value.Bool } case organization.FieldPoliciesAllowedHostnames: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field policies_allowed_hostnames", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &o.PoliciesAllowedHostnames); err != nil { + if err := json.Unmarshal(*value, &_m.PoliciesAllowedHostnames); err != nil { return fmt.Errorf("unmarshal field policies_allowed_hostnames: %w", err) } } @@ -214,16 +214,16 @@ func (o *Organization) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field prevent_implicit_workflow_creation", values[i]) } else if value.Valid { - o.PreventImplicitWorkflowCreation = value.Bool + _m.PreventImplicitWorkflowCreation = value.Bool } case organization.FieldRestrictContractCreationToOrgAdmins: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field restrict_contract_creation_to_org_admins", values[i]) } else if value.Valid { - o.RestrictContractCreationToOrgAdmins = value.Bool + _m.RestrictContractCreationToOrgAdmins = value.Bool } default: - o.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -231,96 +231,96 @@ func (o *Organization) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Organization. // This includes values selected through modifiers, order, etc. -func (o *Organization) Value(name string) (ent.Value, error) { - return o.selectValues.Get(name) +func (_m *Organization) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryMemberships queries the "memberships" edge of the Organization entity. -func (o *Organization) QueryMemberships() *MembershipQuery { - return NewOrganizationClient(o.config).QueryMemberships(o) +func (_m *Organization) QueryMemberships() *MembershipQuery { + return NewOrganizationClient(_m.config).QueryMemberships(_m) } // QueryWorkflowContracts queries the "workflow_contracts" edge of the Organization entity. -func (o *Organization) QueryWorkflowContracts() *WorkflowContractQuery { - return NewOrganizationClient(o.config).QueryWorkflowContracts(o) +func (_m *Organization) QueryWorkflowContracts() *WorkflowContractQuery { + return NewOrganizationClient(_m.config).QueryWorkflowContracts(_m) } // QueryWorkflows queries the "workflows" edge of the Organization entity. -func (o *Organization) QueryWorkflows() *WorkflowQuery { - return NewOrganizationClient(o.config).QueryWorkflows(o) +func (_m *Organization) QueryWorkflows() *WorkflowQuery { + return NewOrganizationClient(_m.config).QueryWorkflows(_m) } // QueryCasBackends queries the "cas_backends" edge of the Organization entity. -func (o *Organization) QueryCasBackends() *CASBackendQuery { - return NewOrganizationClient(o.config).QueryCasBackends(o) +func (_m *Organization) QueryCasBackends() *CASBackendQuery { + return NewOrganizationClient(_m.config).QueryCasBackends(_m) } // QueryIntegrations queries the "integrations" edge of the Organization entity. -func (o *Organization) QueryIntegrations() *IntegrationQuery { - return NewOrganizationClient(o.config).QueryIntegrations(o) +func (_m *Organization) QueryIntegrations() *IntegrationQuery { + return NewOrganizationClient(_m.config).QueryIntegrations(_m) } // QueryAPITokens queries the "api_tokens" edge of the Organization entity. -func (o *Organization) QueryAPITokens() *APITokenQuery { - return NewOrganizationClient(o.config).QueryAPITokens(o) +func (_m *Organization) QueryAPITokens() *APITokenQuery { + return NewOrganizationClient(_m.config).QueryAPITokens(_m) } // QueryProjects queries the "projects" edge of the Organization entity. -func (o *Organization) QueryProjects() *ProjectQuery { - return NewOrganizationClient(o.config).QueryProjects(o) +func (_m *Organization) QueryProjects() *ProjectQuery { + return NewOrganizationClient(_m.config).QueryProjects(_m) } // QueryGroups queries the "groups" edge of the Organization entity. -func (o *Organization) QueryGroups() *GroupQuery { - return NewOrganizationClient(o.config).QueryGroups(o) +func (_m *Organization) QueryGroups() *GroupQuery { + return NewOrganizationClient(_m.config).QueryGroups(_m) } // Update returns a builder for updating this Organization. // Note that you need to call Organization.Unwrap() before calling this method if this Organization // was returned from a transaction, and the transaction was committed or rolled back. -func (o *Organization) Update() *OrganizationUpdateOne { - return NewOrganizationClient(o.config).UpdateOne(o) +func (_m *Organization) Update() *OrganizationUpdateOne { + return NewOrganizationClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Organization entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (o *Organization) Unwrap() *Organization { - _tx, ok := o.config.driver.(*txDriver) +func (_m *Organization) Unwrap() *Organization { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Organization is not a transactional entity") } - o.config.driver = _tx.drv - return o + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (o *Organization) String() string { +func (_m *Organization) String() string { var builder strings.Builder builder.WriteString("Organization(") - builder.WriteString(fmt.Sprintf("id=%v, ", o.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(o.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(o.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(o.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(o.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("block_on_policy_violation=") - builder.WriteString(fmt.Sprintf("%v", o.BlockOnPolicyViolation)) + builder.WriteString(fmt.Sprintf("%v", _m.BlockOnPolicyViolation)) builder.WriteString(", ") builder.WriteString("policies_allowed_hostnames=") - builder.WriteString(fmt.Sprintf("%v", o.PoliciesAllowedHostnames)) + builder.WriteString(fmt.Sprintf("%v", _m.PoliciesAllowedHostnames)) builder.WriteString(", ") builder.WriteString("prevent_implicit_workflow_creation=") - builder.WriteString(fmt.Sprintf("%v", o.PreventImplicitWorkflowCreation)) + builder.WriteString(fmt.Sprintf("%v", _m.PreventImplicitWorkflowCreation)) builder.WriteString(", ") builder.WriteString("restrict_contract_creation_to_org_admins=") - builder.WriteString(fmt.Sprintf("%v", o.RestrictContractCreationToOrgAdmins)) + builder.WriteString(fmt.Sprintf("%v", _m.RestrictContractCreationToOrgAdmins)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/organization_create.go b/app/controlplane/pkg/data/ent/organization_create.go index a12b0133d..166afdcf4 100644 --- a/app/controlplane/pkg/data/ent/organization_create.go +++ b/app/controlplane/pkg/data/ent/organization_create.go @@ -33,249 +33,249 @@ type OrganizationCreate struct { } // SetName sets the "name" field. -func (oc *OrganizationCreate) SetName(s string) *OrganizationCreate { - oc.mutation.SetName(s) - return oc +func (_c *OrganizationCreate) SetName(v string) *OrganizationCreate { + _c.mutation.SetName(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (oc *OrganizationCreate) SetCreatedAt(t time.Time) *OrganizationCreate { - oc.mutation.SetCreatedAt(t) - return oc +func (_c *OrganizationCreate) SetCreatedAt(v time.Time) *OrganizationCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableCreatedAt(t *time.Time) *OrganizationCreate { - if t != nil { - oc.SetCreatedAt(*t) +func (_c *OrganizationCreate) SetNillableCreatedAt(v *time.Time) *OrganizationCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return oc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (oc *OrganizationCreate) SetUpdatedAt(t time.Time) *OrganizationCreate { - oc.mutation.SetUpdatedAt(t) - return oc +func (_c *OrganizationCreate) SetUpdatedAt(v time.Time) *OrganizationCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableUpdatedAt(t *time.Time) *OrganizationCreate { - if t != nil { - oc.SetUpdatedAt(*t) +func (_c *OrganizationCreate) SetNillableUpdatedAt(v *time.Time) *OrganizationCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return oc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (oc *OrganizationCreate) SetDeletedAt(t time.Time) *OrganizationCreate { - oc.mutation.SetDeletedAt(t) - return oc +func (_c *OrganizationCreate) SetDeletedAt(v time.Time) *OrganizationCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableDeletedAt(t *time.Time) *OrganizationCreate { - if t != nil { - oc.SetDeletedAt(*t) +func (_c *OrganizationCreate) SetNillableDeletedAt(v *time.Time) *OrganizationCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return oc + return _c } // SetBlockOnPolicyViolation sets the "block_on_policy_violation" field. -func (oc *OrganizationCreate) SetBlockOnPolicyViolation(b bool) *OrganizationCreate { - oc.mutation.SetBlockOnPolicyViolation(b) - return oc +func (_c *OrganizationCreate) SetBlockOnPolicyViolation(v bool) *OrganizationCreate { + _c.mutation.SetBlockOnPolicyViolation(v) + return _c } // SetNillableBlockOnPolicyViolation sets the "block_on_policy_violation" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableBlockOnPolicyViolation(b *bool) *OrganizationCreate { - if b != nil { - oc.SetBlockOnPolicyViolation(*b) +func (_c *OrganizationCreate) SetNillableBlockOnPolicyViolation(v *bool) *OrganizationCreate { + if v != nil { + _c.SetBlockOnPolicyViolation(*v) } - return oc + return _c } // SetPoliciesAllowedHostnames sets the "policies_allowed_hostnames" field. -func (oc *OrganizationCreate) SetPoliciesAllowedHostnames(s []string) *OrganizationCreate { - oc.mutation.SetPoliciesAllowedHostnames(s) - return oc +func (_c *OrganizationCreate) SetPoliciesAllowedHostnames(v []string) *OrganizationCreate { + _c.mutation.SetPoliciesAllowedHostnames(v) + return _c } // SetPreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field. -func (oc *OrganizationCreate) SetPreventImplicitWorkflowCreation(b bool) *OrganizationCreate { - oc.mutation.SetPreventImplicitWorkflowCreation(b) - return oc +func (_c *OrganizationCreate) SetPreventImplicitWorkflowCreation(v bool) *OrganizationCreate { + _c.mutation.SetPreventImplicitWorkflowCreation(v) + return _c } // SetNillablePreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillablePreventImplicitWorkflowCreation(b *bool) *OrganizationCreate { - if b != nil { - oc.SetPreventImplicitWorkflowCreation(*b) +func (_c *OrganizationCreate) SetNillablePreventImplicitWorkflowCreation(v *bool) *OrganizationCreate { + if v != nil { + _c.SetPreventImplicitWorkflowCreation(*v) } - return oc + return _c } // SetRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field. -func (oc *OrganizationCreate) SetRestrictContractCreationToOrgAdmins(b bool) *OrganizationCreate { - oc.mutation.SetRestrictContractCreationToOrgAdmins(b) - return oc +func (_c *OrganizationCreate) SetRestrictContractCreationToOrgAdmins(v bool) *OrganizationCreate { + _c.mutation.SetRestrictContractCreationToOrgAdmins(v) + return _c } // SetNillableRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableRestrictContractCreationToOrgAdmins(b *bool) *OrganizationCreate { - if b != nil { - oc.SetRestrictContractCreationToOrgAdmins(*b) +func (_c *OrganizationCreate) SetNillableRestrictContractCreationToOrgAdmins(v *bool) *OrganizationCreate { + if v != nil { + _c.SetRestrictContractCreationToOrgAdmins(*v) } - return oc + return _c } // SetID sets the "id" field. -func (oc *OrganizationCreate) SetID(u uuid.UUID) *OrganizationCreate { - oc.mutation.SetID(u) - return oc +func (_c *OrganizationCreate) SetID(v uuid.UUID) *OrganizationCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (oc *OrganizationCreate) SetNillableID(u *uuid.UUID) *OrganizationCreate { - if u != nil { - oc.SetID(*u) +func (_c *OrganizationCreate) SetNillableID(v *uuid.UUID) *OrganizationCreate { + if v != nil { + _c.SetID(*v) } - return oc + return _c } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (oc *OrganizationCreate) AddMembershipIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddMembershipIDs(ids...) - return oc +func (_c *OrganizationCreate) AddMembershipIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddMembershipIDs(ids...) + return _c } // AddMemberships adds the "memberships" edges to the Membership entity. -func (oc *OrganizationCreate) AddMemberships(m ...*Membership) *OrganizationCreate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_c *OrganizationCreate) AddMemberships(v ...*Membership) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddMembershipIDs(ids...) + return _c.AddMembershipIDs(ids...) } // AddWorkflowContractIDs adds the "workflow_contracts" edge to the WorkflowContract entity by IDs. -func (oc *OrganizationCreate) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddWorkflowContractIDs(ids...) - return oc +func (_c *OrganizationCreate) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddWorkflowContractIDs(ids...) + return _c } // AddWorkflowContracts adds the "workflow_contracts" edges to the WorkflowContract entity. -func (oc *OrganizationCreate) AddWorkflowContracts(w ...*WorkflowContract) *OrganizationCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *OrganizationCreate) AddWorkflowContracts(v ...*WorkflowContract) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddWorkflowContractIDs(ids...) + return _c.AddWorkflowContractIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (oc *OrganizationCreate) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddWorkflowIDs(ids...) - return oc +func (_c *OrganizationCreate) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddWorkflowIDs(ids...) + return _c } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (oc *OrganizationCreate) AddWorkflows(w ...*Workflow) *OrganizationCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *OrganizationCreate) AddWorkflows(v ...*Workflow) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddWorkflowIDs(ids...) + return _c.AddWorkflowIDs(ids...) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (oc *OrganizationCreate) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddCasBackendIDs(ids...) - return oc +func (_c *OrganizationCreate) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddCasBackendIDs(ids...) + return _c } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (oc *OrganizationCreate) AddCasBackends(c ...*CASBackend) *OrganizationCreate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_c *OrganizationCreate) AddCasBackends(v ...*CASBackend) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddCasBackendIDs(ids...) + return _c.AddCasBackendIDs(ids...) } // AddIntegrationIDs adds the "integrations" edge to the Integration entity by IDs. -func (oc *OrganizationCreate) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddIntegrationIDs(ids...) - return oc +func (_c *OrganizationCreate) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddIntegrationIDs(ids...) + return _c } // AddIntegrations adds the "integrations" edges to the Integration entity. -func (oc *OrganizationCreate) AddIntegrations(i ...*Integration) *OrganizationCreate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_c *OrganizationCreate) AddIntegrations(v ...*Integration) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddIntegrationIDs(ids...) + return _c.AddIntegrationIDs(ids...) } // AddAPITokenIDs adds the "api_tokens" edge to the APIToken entity by IDs. -func (oc *OrganizationCreate) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddAPITokenIDs(ids...) - return oc +func (_c *OrganizationCreate) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddAPITokenIDs(ids...) + return _c } // AddAPITokens adds the "api_tokens" edges to the APIToken entity. -func (oc *OrganizationCreate) AddAPITokens(a ...*APIToken) *OrganizationCreate { - ids := make([]uuid.UUID, len(a)) - for i := range a { - ids[i] = a[i].ID +func (_c *OrganizationCreate) AddAPITokens(v ...*APIToken) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddAPITokenIDs(ids...) + return _c.AddAPITokenIDs(ids...) } // AddProjectIDs adds the "projects" edge to the Project entity by IDs. -func (oc *OrganizationCreate) AddProjectIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddProjectIDs(ids...) - return oc +func (_c *OrganizationCreate) AddProjectIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddProjectIDs(ids...) + return _c } // AddProjects adds the "projects" edges to the Project entity. -func (oc *OrganizationCreate) AddProjects(p ...*Project) *OrganizationCreate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_c *OrganizationCreate) AddProjects(v ...*Project) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddProjectIDs(ids...) + return _c.AddProjectIDs(ids...) } // AddGroupIDs adds the "groups" edge to the Group entity by IDs. -func (oc *OrganizationCreate) AddGroupIDs(ids ...uuid.UUID) *OrganizationCreate { - oc.mutation.AddGroupIDs(ids...) - return oc +func (_c *OrganizationCreate) AddGroupIDs(ids ...uuid.UUID) *OrganizationCreate { + _c.mutation.AddGroupIDs(ids...) + return _c } // AddGroups adds the "groups" edges to the Group entity. -func (oc *OrganizationCreate) AddGroups(g ...*Group) *OrganizationCreate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_c *OrganizationCreate) AddGroups(v ...*Group) *OrganizationCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return oc.AddGroupIDs(ids...) + return _c.AddGroupIDs(ids...) } // Mutation returns the OrganizationMutation object of the builder. -func (oc *OrganizationCreate) Mutation() *OrganizationMutation { - return oc.mutation +func (_c *OrganizationCreate) Mutation() *OrganizationMutation { + return _c.mutation } // Save creates the Organization in the database. -func (oc *OrganizationCreate) Save(ctx context.Context) (*Organization, error) { - oc.defaults() - return withHooks(ctx, oc.sqlSave, oc.mutation, oc.hooks) +func (_c *OrganizationCreate) Save(ctx context.Context) (*Organization, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (oc *OrganizationCreate) SaveX(ctx context.Context) *Organization { - v, err := oc.Save(ctx) +func (_c *OrganizationCreate) SaveX(ctx context.Context) *Organization { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -283,75 +283,75 @@ func (oc *OrganizationCreate) SaveX(ctx context.Context) *Organization { } // Exec executes the query. -func (oc *OrganizationCreate) Exec(ctx context.Context) error { - _, err := oc.Save(ctx) +func (_c *OrganizationCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (oc *OrganizationCreate) ExecX(ctx context.Context) { - if err := oc.Exec(ctx); err != nil { +func (_c *OrganizationCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (oc *OrganizationCreate) defaults() { - if _, ok := oc.mutation.CreatedAt(); !ok { +func (_c *OrganizationCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := organization.DefaultCreatedAt() - oc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := oc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := organization.DefaultUpdatedAt() - oc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := oc.mutation.BlockOnPolicyViolation(); !ok { + if _, ok := _c.mutation.BlockOnPolicyViolation(); !ok { v := organization.DefaultBlockOnPolicyViolation - oc.mutation.SetBlockOnPolicyViolation(v) + _c.mutation.SetBlockOnPolicyViolation(v) } - if _, ok := oc.mutation.PreventImplicitWorkflowCreation(); !ok { + if _, ok := _c.mutation.PreventImplicitWorkflowCreation(); !ok { v := organization.DefaultPreventImplicitWorkflowCreation - oc.mutation.SetPreventImplicitWorkflowCreation(v) + _c.mutation.SetPreventImplicitWorkflowCreation(v) } - if _, ok := oc.mutation.RestrictContractCreationToOrgAdmins(); !ok { + if _, ok := _c.mutation.RestrictContractCreationToOrgAdmins(); !ok { v := organization.DefaultRestrictContractCreationToOrgAdmins - oc.mutation.SetRestrictContractCreationToOrgAdmins(v) + _c.mutation.SetRestrictContractCreationToOrgAdmins(v) } - if _, ok := oc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := organization.DefaultID() - oc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (oc *OrganizationCreate) check() error { - if _, ok := oc.mutation.Name(); !ok { +func (_c *OrganizationCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Organization.name"`)} } - if _, ok := oc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Organization.created_at"`)} } - if _, ok := oc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Organization.updated_at"`)} } - if _, ok := oc.mutation.BlockOnPolicyViolation(); !ok { + if _, ok := _c.mutation.BlockOnPolicyViolation(); !ok { return &ValidationError{Name: "block_on_policy_violation", err: errors.New(`ent: missing required field "Organization.block_on_policy_violation"`)} } - if _, ok := oc.mutation.PreventImplicitWorkflowCreation(); !ok { + if _, ok := _c.mutation.PreventImplicitWorkflowCreation(); !ok { return &ValidationError{Name: "prevent_implicit_workflow_creation", err: errors.New(`ent: missing required field "Organization.prevent_implicit_workflow_creation"`)} } - if _, ok := oc.mutation.RestrictContractCreationToOrgAdmins(); !ok { + if _, ok := _c.mutation.RestrictContractCreationToOrgAdmins(); !ok { return &ValidationError{Name: "restrict_contract_creation_to_org_admins", err: errors.New(`ent: missing required field "Organization.restrict_contract_creation_to_org_admins"`)} } return nil } -func (oc *OrganizationCreate) sqlSave(ctx context.Context) (*Organization, error) { - if err := oc.check(); err != nil { +func (_c *OrganizationCreate) sqlSave(ctx context.Context) (*Organization, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := oc.createSpec() - if err := sqlgraph.CreateNode(ctx, oc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -364,54 +364,54 @@ func (oc *OrganizationCreate) sqlSave(ctx context.Context) (*Organization, error return nil, err } } - oc.mutation.id = &_node.ID - oc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) { +func (_c *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) { var ( - _node = &Organization{config: oc.config} + _node = &Organization{config: _c.config} _spec = sqlgraph.NewCreateSpec(organization.Table, sqlgraph.NewFieldSpec(organization.FieldID, field.TypeUUID)) ) - _spec.OnConflict = oc.conflict - if id, ok := oc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := oc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(organization.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := oc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(organization.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := oc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(organization.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := oc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(organization.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := oc.mutation.BlockOnPolicyViolation(); ok { + if value, ok := _c.mutation.BlockOnPolicyViolation(); ok { _spec.SetField(organization.FieldBlockOnPolicyViolation, field.TypeBool, value) _node.BlockOnPolicyViolation = value } - if value, ok := oc.mutation.PoliciesAllowedHostnames(); ok { + if value, ok := _c.mutation.PoliciesAllowedHostnames(); ok { _spec.SetField(organization.FieldPoliciesAllowedHostnames, field.TypeJSON, value) _node.PoliciesAllowedHostnames = value } - if value, ok := oc.mutation.PreventImplicitWorkflowCreation(); ok { + if value, ok := _c.mutation.PreventImplicitWorkflowCreation(); ok { _spec.SetField(organization.FieldPreventImplicitWorkflowCreation, field.TypeBool, value) _node.PreventImplicitWorkflowCreation = value } - if value, ok := oc.mutation.RestrictContractCreationToOrgAdmins(); ok { + if value, ok := _c.mutation.RestrictContractCreationToOrgAdmins(); ok { _spec.SetField(organization.FieldRestrictContractCreationToOrgAdmins, field.TypeBool, value) _node.RestrictContractCreationToOrgAdmins = value } - if nodes := oc.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -427,7 +427,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.WorkflowContractsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowContractsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -443,7 +443,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -459,7 +459,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -475,7 +475,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.IntegrationsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.IntegrationsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -491,7 +491,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.APITokensIDs(); len(nodes) > 0 { + if nodes := _c.mutation.APITokensIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -507,7 +507,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.ProjectsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ProjectsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -523,7 +523,7 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := oc.mutation.GroupsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.GroupsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -558,10 +558,10 @@ func (oc *OrganizationCreate) createSpec() (*Organization, *sqlgraph.CreateSpec) // SetName(v+v). // }). // Exec(ctx) -func (oc *OrganizationCreate) OnConflict(opts ...sql.ConflictOption) *OrganizationUpsertOne { - oc.conflict = opts +func (_c *OrganizationCreate) OnConflict(opts ...sql.ConflictOption) *OrganizationUpsertOne { + _c.conflict = opts return &OrganizationUpsertOne{ - create: oc, + create: _c, } } @@ -571,10 +571,10 @@ func (oc *OrganizationCreate) OnConflict(opts ...sql.ConflictOption) *Organizati // client.Organization.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (oc *OrganizationCreate) OnConflictColumns(columns ...string) *OrganizationUpsertOne { - oc.conflict = append(oc.conflict, sql.ConflictColumns(columns...)) +func (_c *OrganizationCreate) OnConflictColumns(columns ...string) *OrganizationUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &OrganizationUpsertOne{ - create: oc, + create: _c, } } @@ -897,16 +897,16 @@ type OrganizationCreateBulk struct { } // Save creates the Organization entities in the database. -func (ocb *OrganizationCreateBulk) Save(ctx context.Context) ([]*Organization, error) { - if ocb.err != nil { - return nil, ocb.err - } - specs := make([]*sqlgraph.CreateSpec, len(ocb.builders)) - nodes := make([]*Organization, len(ocb.builders)) - mutators := make([]Mutator, len(ocb.builders)) - for i := range ocb.builders { +func (_c *OrganizationCreateBulk) Save(ctx context.Context) ([]*Organization, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Organization, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := ocb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*OrganizationMutation) @@ -920,12 +920,12 @@ func (ocb *OrganizationCreateBulk) Save(ctx context.Context) ([]*Organization, e var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, ocb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = ocb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, ocb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -945,7 +945,7 @@ func (ocb *OrganizationCreateBulk) Save(ctx context.Context) ([]*Organization, e }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, ocb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -953,8 +953,8 @@ func (ocb *OrganizationCreateBulk) Save(ctx context.Context) ([]*Organization, e } // SaveX is like Save, but panics if an error occurs. -func (ocb *OrganizationCreateBulk) SaveX(ctx context.Context) []*Organization { - v, err := ocb.Save(ctx) +func (_c *OrganizationCreateBulk) SaveX(ctx context.Context) []*Organization { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -962,14 +962,14 @@ func (ocb *OrganizationCreateBulk) SaveX(ctx context.Context) []*Organization { } // Exec executes the query. -func (ocb *OrganizationCreateBulk) Exec(ctx context.Context) error { - _, err := ocb.Save(ctx) +func (_c *OrganizationCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ocb *OrganizationCreateBulk) ExecX(ctx context.Context) { - if err := ocb.Exec(ctx); err != nil { +func (_c *OrganizationCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -989,10 +989,10 @@ func (ocb *OrganizationCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (ocb *OrganizationCreateBulk) OnConflict(opts ...sql.ConflictOption) *OrganizationUpsertBulk { - ocb.conflict = opts +func (_c *OrganizationCreateBulk) OnConflict(opts ...sql.ConflictOption) *OrganizationUpsertBulk { + _c.conflict = opts return &OrganizationUpsertBulk{ - create: ocb, + create: _c, } } @@ -1002,10 +1002,10 @@ func (ocb *OrganizationCreateBulk) OnConflict(opts ...sql.ConflictOption) *Organ // client.Organization.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (ocb *OrganizationCreateBulk) OnConflictColumns(columns ...string) *OrganizationUpsertBulk { - ocb.conflict = append(ocb.conflict, sql.ConflictColumns(columns...)) +func (_c *OrganizationCreateBulk) OnConflictColumns(columns ...string) *OrganizationUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &OrganizationUpsertBulk{ - create: ocb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/organization_delete.go b/app/controlplane/pkg/data/ent/organization_delete.go index 96e019a0c..f4a48392e 100644 --- a/app/controlplane/pkg/data/ent/organization_delete.go +++ b/app/controlplane/pkg/data/ent/organization_delete.go @@ -20,56 +20,56 @@ type OrganizationDelete struct { } // Where appends a list predicates to the OrganizationDelete builder. -func (od *OrganizationDelete) Where(ps ...predicate.Organization) *OrganizationDelete { - od.mutation.Where(ps...) - return od +func (_d *OrganizationDelete) Where(ps ...predicate.Organization) *OrganizationDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (od *OrganizationDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, od.sqlExec, od.mutation, od.hooks) +func (_d *OrganizationDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (od *OrganizationDelete) ExecX(ctx context.Context) int { - n, err := od.Exec(ctx) +func (_d *OrganizationDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (od *OrganizationDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *OrganizationDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(organization.Table, sqlgraph.NewFieldSpec(organization.FieldID, field.TypeUUID)) - if ps := od.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, od.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - od.mutation.done = true + _d.mutation.done = true return affected, err } // OrganizationDeleteOne is the builder for deleting a single Organization entity. type OrganizationDeleteOne struct { - od *OrganizationDelete + _d *OrganizationDelete } // Where appends a list predicates to the OrganizationDelete builder. -func (odo *OrganizationDeleteOne) Where(ps ...predicate.Organization) *OrganizationDeleteOne { - odo.od.mutation.Where(ps...) - return odo +func (_d *OrganizationDeleteOne) Where(ps ...predicate.Organization) *OrganizationDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (odo *OrganizationDeleteOne) Exec(ctx context.Context) error { - n, err := odo.od.Exec(ctx) +func (_d *OrganizationDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (odo *OrganizationDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (odo *OrganizationDeleteOne) ExecX(ctx context.Context) { - if err := odo.Exec(ctx); err != nil { +func (_d *OrganizationDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/organization_query.go b/app/controlplane/pkg/data/ent/organization_query.go index 75aeb023e..b879ca750 100644 --- a/app/controlplane/pkg/data/ent/organization_query.go +++ b/app/controlplane/pkg/data/ent/organization_query.go @@ -48,44 +48,44 @@ type OrganizationQuery struct { } // Where adds a new predicate for the OrganizationQuery builder. -func (oq *OrganizationQuery) Where(ps ...predicate.Organization) *OrganizationQuery { - oq.predicates = append(oq.predicates, ps...) - return oq +func (_q *OrganizationQuery) Where(ps ...predicate.Organization) *OrganizationQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (oq *OrganizationQuery) Limit(limit int) *OrganizationQuery { - oq.ctx.Limit = &limit - return oq +func (_q *OrganizationQuery) Limit(limit int) *OrganizationQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (oq *OrganizationQuery) Offset(offset int) *OrganizationQuery { - oq.ctx.Offset = &offset - return oq +func (_q *OrganizationQuery) Offset(offset int) *OrganizationQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (oq *OrganizationQuery) Unique(unique bool) *OrganizationQuery { - oq.ctx.Unique = &unique - return oq +func (_q *OrganizationQuery) Unique(unique bool) *OrganizationQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (oq *OrganizationQuery) Order(o ...organization.OrderOption) *OrganizationQuery { - oq.order = append(oq.order, o...) - return oq +func (_q *OrganizationQuery) Order(o ...organization.OrderOption) *OrganizationQuery { + _q.order = append(_q.order, o...) + return _q } // QueryMemberships chains the current query on the "memberships" edge. -func (oq *OrganizationQuery) QueryMemberships() *MembershipQuery { - query := (&MembershipClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryMemberships() *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -94,20 +94,20 @@ func (oq *OrganizationQuery) QueryMemberships() *MembershipQuery { sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.MembershipsTable, organization.MembershipsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflowContracts chains the current query on the "workflow_contracts" edge. -func (oq *OrganizationQuery) QueryWorkflowContracts() *WorkflowContractQuery { - query := (&WorkflowContractClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryWorkflowContracts() *WorkflowContractQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -116,20 +116,20 @@ func (oq *OrganizationQuery) QueryWorkflowContracts() *WorkflowContractQuery { sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.WorkflowContractsTable, organization.WorkflowContractsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflows chains the current query on the "workflows" edge. -func (oq *OrganizationQuery) QueryWorkflows() *WorkflowQuery { - query := (&WorkflowClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryWorkflows() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -138,20 +138,20 @@ func (oq *OrganizationQuery) QueryWorkflows() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.WorkflowsTable, organization.WorkflowsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryCasBackends chains the current query on the "cas_backends" edge. -func (oq *OrganizationQuery) QueryCasBackends() *CASBackendQuery { - query := (&CASBackendClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryCasBackends() *CASBackendQuery { + query := (&CASBackendClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -160,20 +160,20 @@ func (oq *OrganizationQuery) QueryCasBackends() *CASBackendQuery { sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.CasBackendsTable, organization.CasBackendsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryIntegrations chains the current query on the "integrations" edge. -func (oq *OrganizationQuery) QueryIntegrations() *IntegrationQuery { - query := (&IntegrationClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryIntegrations() *IntegrationQuery { + query := (&IntegrationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -182,20 +182,20 @@ func (oq *OrganizationQuery) QueryIntegrations() *IntegrationQuery { sqlgraph.To(integration.Table, integration.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.IntegrationsTable, organization.IntegrationsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryAPITokens chains the current query on the "api_tokens" edge. -func (oq *OrganizationQuery) QueryAPITokens() *APITokenQuery { - query := (&APITokenClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryAPITokens() *APITokenQuery { + query := (&APITokenClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -204,20 +204,20 @@ func (oq *OrganizationQuery) QueryAPITokens() *APITokenQuery { sqlgraph.To(apitoken.Table, apitoken.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.APITokensTable, organization.APITokensColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryProjects chains the current query on the "projects" edge. -func (oq *OrganizationQuery) QueryProjects() *ProjectQuery { - query := (&ProjectClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryProjects() *ProjectQuery { + query := (&ProjectClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -226,20 +226,20 @@ func (oq *OrganizationQuery) QueryProjects() *ProjectQuery { sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.ProjectsTable, organization.ProjectsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryGroups chains the current query on the "groups" edge. -func (oq *OrganizationQuery) QueryGroups() *GroupQuery { - query := (&GroupClient{config: oq.config}).Query() +func (_q *OrganizationQuery) QueryGroups() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -248,7 +248,7 @@ func (oq *OrganizationQuery) QueryGroups() *GroupQuery { sqlgraph.To(group.Table, group.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, organization.GroupsTable, organization.GroupsColumn), ) - fromU = sqlgraph.SetNeighbors(oq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -256,8 +256,8 @@ func (oq *OrganizationQuery) QueryGroups() *GroupQuery { // First returns the first Organization entity from the query. // Returns a *NotFoundError when no Organization was found. -func (oq *OrganizationQuery) First(ctx context.Context) (*Organization, error) { - nodes, err := oq.Limit(1).All(setContextOp(ctx, oq.ctx, ent.OpQueryFirst)) +func (_q *OrganizationQuery) First(ctx context.Context) (*Organization, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -268,8 +268,8 @@ func (oq *OrganizationQuery) First(ctx context.Context) (*Organization, error) { } // FirstX is like First, but panics if an error occurs. -func (oq *OrganizationQuery) FirstX(ctx context.Context) *Organization { - node, err := oq.First(ctx) +func (_q *OrganizationQuery) FirstX(ctx context.Context) *Organization { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -278,9 +278,9 @@ func (oq *OrganizationQuery) FirstX(ctx context.Context) *Organization { // FirstID returns the first Organization ID from the query. // Returns a *NotFoundError when no Organization ID was found. -func (oq *OrganizationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *OrganizationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = oq.Limit(1).IDs(setContextOp(ctx, oq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -291,8 +291,8 @@ func (oq *OrganizationQuery) FirstID(ctx context.Context) (id uuid.UUID, err err } // FirstIDX is like FirstID, but panics if an error occurs. -func (oq *OrganizationQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := oq.FirstID(ctx) +func (_q *OrganizationQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -302,8 +302,8 @@ func (oq *OrganizationQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Organization entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Organization entity is found. // Returns a *NotFoundError when no Organization entities are found. -func (oq *OrganizationQuery) Only(ctx context.Context) (*Organization, error) { - nodes, err := oq.Limit(2).All(setContextOp(ctx, oq.ctx, ent.OpQueryOnly)) +func (_q *OrganizationQuery) Only(ctx context.Context) (*Organization, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -318,8 +318,8 @@ func (oq *OrganizationQuery) Only(ctx context.Context) (*Organization, error) { } // OnlyX is like Only, but panics if an error occurs. -func (oq *OrganizationQuery) OnlyX(ctx context.Context) *Organization { - node, err := oq.Only(ctx) +func (_q *OrganizationQuery) OnlyX(ctx context.Context) *Organization { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -329,9 +329,9 @@ func (oq *OrganizationQuery) OnlyX(ctx context.Context) *Organization { // OnlyID is like Only, but returns the only Organization ID in the query. // Returns a *NotSingularError when more than one Organization ID is found. // Returns a *NotFoundError when no entities are found. -func (oq *OrganizationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *OrganizationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = oq.Limit(2).IDs(setContextOp(ctx, oq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -346,8 +346,8 @@ func (oq *OrganizationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err erro } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (oq *OrganizationQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := oq.OnlyID(ctx) +func (_q *OrganizationQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -355,18 +355,18 @@ func (oq *OrganizationQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Organizations. -func (oq *OrganizationQuery) All(ctx context.Context) ([]*Organization, error) { - ctx = setContextOp(ctx, oq.ctx, ent.OpQueryAll) - if err := oq.prepareQuery(ctx); err != nil { +func (_q *OrganizationQuery) All(ctx context.Context) ([]*Organization, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Organization, *OrganizationQuery]() - return withInterceptors[[]*Organization](ctx, oq, qr, oq.inters) + return withInterceptors[[]*Organization](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (oq *OrganizationQuery) AllX(ctx context.Context) []*Organization { - nodes, err := oq.All(ctx) +func (_q *OrganizationQuery) AllX(ctx context.Context) []*Organization { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -374,20 +374,20 @@ func (oq *OrganizationQuery) AllX(ctx context.Context) []*Organization { } // IDs executes the query and returns a list of Organization IDs. -func (oq *OrganizationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if oq.ctx.Unique == nil && oq.path != nil { - oq.Unique(true) +func (_q *OrganizationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, oq.ctx, ent.OpQueryIDs) - if err = oq.Select(organization.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(organization.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (oq *OrganizationQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := oq.IDs(ctx) +func (_q *OrganizationQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -395,17 +395,17 @@ func (oq *OrganizationQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (oq *OrganizationQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, oq.ctx, ent.OpQueryCount) - if err := oq.prepareQuery(ctx); err != nil { +func (_q *OrganizationQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, oq, querierCount[*OrganizationQuery](), oq.inters) + return withInterceptors[int](ctx, _q, querierCount[*OrganizationQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (oq *OrganizationQuery) CountX(ctx context.Context) int { - count, err := oq.Count(ctx) +func (_q *OrganizationQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -413,9 +413,9 @@ func (oq *OrganizationQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (oq *OrganizationQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, oq.ctx, ent.OpQueryExist) - switch _, err := oq.FirstID(ctx); { +func (_q *OrganizationQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -426,8 +426,8 @@ func (oq *OrganizationQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (oq *OrganizationQuery) ExistX(ctx context.Context) bool { - exist, err := oq.Exist(ctx) +func (_q *OrganizationQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -436,117 +436,117 @@ func (oq *OrganizationQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the OrganizationQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (oq *OrganizationQuery) Clone() *OrganizationQuery { - if oq == nil { +func (_q *OrganizationQuery) Clone() *OrganizationQuery { + if _q == nil { return nil } return &OrganizationQuery{ - config: oq.config, - ctx: oq.ctx.Clone(), - order: append([]organization.OrderOption{}, oq.order...), - inters: append([]Interceptor{}, oq.inters...), - predicates: append([]predicate.Organization{}, oq.predicates...), - withMemberships: oq.withMemberships.Clone(), - withWorkflowContracts: oq.withWorkflowContracts.Clone(), - withWorkflows: oq.withWorkflows.Clone(), - withCasBackends: oq.withCasBackends.Clone(), - withIntegrations: oq.withIntegrations.Clone(), - withAPITokens: oq.withAPITokens.Clone(), - withProjects: oq.withProjects.Clone(), - withGroups: oq.withGroups.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]organization.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Organization{}, _q.predicates...), + withMemberships: _q.withMemberships.Clone(), + withWorkflowContracts: _q.withWorkflowContracts.Clone(), + withWorkflows: _q.withWorkflows.Clone(), + withCasBackends: _q.withCasBackends.Clone(), + withIntegrations: _q.withIntegrations.Clone(), + withAPITokens: _q.withAPITokens.Clone(), + withProjects: _q.withProjects.Clone(), + withGroups: _q.withGroups.Clone(), // clone intermediate query. - sql: oq.sql.Clone(), - path: oq.path, - modifiers: append([]func(*sql.Selector){}, oq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithMemberships tells the query-builder to eager-load the nodes that are connected to // the "memberships" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithMemberships(opts ...func(*MembershipQuery)) *OrganizationQuery { - query := (&MembershipClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithMemberships(opts ...func(*MembershipQuery)) *OrganizationQuery { + query := (&MembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withMemberships = query - return oq + _q.withMemberships = query + return _q } // WithWorkflowContracts tells the query-builder to eager-load the nodes that are connected to // the "workflow_contracts" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithWorkflowContracts(opts ...func(*WorkflowContractQuery)) *OrganizationQuery { - query := (&WorkflowContractClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithWorkflowContracts(opts ...func(*WorkflowContractQuery)) *OrganizationQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withWorkflowContracts = query - return oq + _q.withWorkflowContracts = query + return _q } // WithWorkflows tells the query-builder to eager-load the nodes that are connected to // the "workflows" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *OrganizationQuery { - query := (&WorkflowClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *OrganizationQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withWorkflows = query - return oq + _q.withWorkflows = query + return _q } // WithCasBackends tells the query-builder to eager-load the nodes that are connected to // the "cas_backends" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithCasBackends(opts ...func(*CASBackendQuery)) *OrganizationQuery { - query := (&CASBackendClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithCasBackends(opts ...func(*CASBackendQuery)) *OrganizationQuery { + query := (&CASBackendClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withCasBackends = query - return oq + _q.withCasBackends = query + return _q } // WithIntegrations tells the query-builder to eager-load the nodes that are connected to // the "integrations" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithIntegrations(opts ...func(*IntegrationQuery)) *OrganizationQuery { - query := (&IntegrationClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithIntegrations(opts ...func(*IntegrationQuery)) *OrganizationQuery { + query := (&IntegrationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withIntegrations = query - return oq + _q.withIntegrations = query + return _q } // WithAPITokens tells the query-builder to eager-load the nodes that are connected to // the "api_tokens" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithAPITokens(opts ...func(*APITokenQuery)) *OrganizationQuery { - query := (&APITokenClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithAPITokens(opts ...func(*APITokenQuery)) *OrganizationQuery { + query := (&APITokenClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withAPITokens = query - return oq + _q.withAPITokens = query + return _q } // WithProjects tells the query-builder to eager-load the nodes that are connected to // the "projects" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithProjects(opts ...func(*ProjectQuery)) *OrganizationQuery { - query := (&ProjectClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithProjects(opts ...func(*ProjectQuery)) *OrganizationQuery { + query := (&ProjectClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withProjects = query - return oq + _q.withProjects = query + return _q } // WithGroups tells the query-builder to eager-load the nodes that are connected to // the "groups" edge. The optional arguments are used to configure the query builder of the edge. -func (oq *OrganizationQuery) WithGroups(opts ...func(*GroupQuery)) *OrganizationQuery { - query := (&GroupClient{config: oq.config}).Query() +func (_q *OrganizationQuery) WithGroups(opts ...func(*GroupQuery)) *OrganizationQuery { + query := (&GroupClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oq.withGroups = query - return oq + _q.withGroups = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -563,10 +563,10 @@ func (oq *OrganizationQuery) WithGroups(opts ...func(*GroupQuery)) *Organization // GroupBy(organization.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (oq *OrganizationQuery) GroupBy(field string, fields ...string) *OrganizationGroupBy { - oq.ctx.Fields = append([]string{field}, fields...) - grbuild := &OrganizationGroupBy{build: oq} - grbuild.flds = &oq.ctx.Fields +func (_q *OrganizationQuery) GroupBy(field string, fields ...string) *OrganizationGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &OrganizationGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = organization.Label grbuild.scan = grbuild.Scan return grbuild @@ -584,90 +584,90 @@ func (oq *OrganizationQuery) GroupBy(field string, fields ...string) *Organizati // client.Organization.Query(). // Select(organization.FieldName). // Scan(ctx, &v) -func (oq *OrganizationQuery) Select(fields ...string) *OrganizationSelect { - oq.ctx.Fields = append(oq.ctx.Fields, fields...) - sbuild := &OrganizationSelect{OrganizationQuery: oq} +func (_q *OrganizationQuery) Select(fields ...string) *OrganizationSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &OrganizationSelect{OrganizationQuery: _q} sbuild.label = organization.Label - sbuild.flds, sbuild.scan = &oq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a OrganizationSelect configured with the given aggregations. -func (oq *OrganizationQuery) Aggregate(fns ...AggregateFunc) *OrganizationSelect { - return oq.Select().Aggregate(fns...) +func (_q *OrganizationQuery) Aggregate(fns ...AggregateFunc) *OrganizationSelect { + return _q.Select().Aggregate(fns...) } -func (oq *OrganizationQuery) prepareQuery(ctx context.Context) error { - for _, inter := range oq.inters { +func (_q *OrganizationQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, oq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range oq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !organization.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if oq.path != nil { - prev, err := oq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - oq.sql = prev + _q.sql = prev } return nil } -func (oq *OrganizationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Organization, error) { +func (_q *OrganizationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Organization, error) { var ( nodes = []*Organization{} - _spec = oq.querySpec() + _spec = _q.querySpec() loadedTypes = [8]bool{ - oq.withMemberships != nil, - oq.withWorkflowContracts != nil, - oq.withWorkflows != nil, - oq.withCasBackends != nil, - oq.withIntegrations != nil, - oq.withAPITokens != nil, - oq.withProjects != nil, - oq.withGroups != nil, + _q.withMemberships != nil, + _q.withWorkflowContracts != nil, + _q.withWorkflows != nil, + _q.withCasBackends != nil, + _q.withIntegrations != nil, + _q.withAPITokens != nil, + _q.withProjects != nil, + _q.withGroups != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*Organization).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Organization{config: oq.config} + node := &Organization{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(oq.modifiers) > 0 { - _spec.Modifiers = oq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, oq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := oq.withMemberships; query != nil { - if err := oq.loadMemberships(ctx, query, nodes, + if query := _q.withMemberships; query != nil { + if err := _q.loadMemberships(ctx, query, nodes, func(n *Organization) { n.Edges.Memberships = []*Membership{} }, func(n *Organization, e *Membership) { n.Edges.Memberships = append(n.Edges.Memberships, e) }); err != nil { return nil, err } } - if query := oq.withWorkflowContracts; query != nil { - if err := oq.loadWorkflowContracts(ctx, query, nodes, + if query := _q.withWorkflowContracts; query != nil { + if err := _q.loadWorkflowContracts(ctx, query, nodes, func(n *Organization) { n.Edges.WorkflowContracts = []*WorkflowContract{} }, func(n *Organization, e *WorkflowContract) { n.Edges.WorkflowContracts = append(n.Edges.WorkflowContracts, e) @@ -675,43 +675,43 @@ func (oq *OrganizationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([] return nil, err } } - if query := oq.withWorkflows; query != nil { - if err := oq.loadWorkflows(ctx, query, nodes, + if query := _q.withWorkflows; query != nil { + if err := _q.loadWorkflows(ctx, query, nodes, func(n *Organization) { n.Edges.Workflows = []*Workflow{} }, func(n *Organization, e *Workflow) { n.Edges.Workflows = append(n.Edges.Workflows, e) }); err != nil { return nil, err } } - if query := oq.withCasBackends; query != nil { - if err := oq.loadCasBackends(ctx, query, nodes, + if query := _q.withCasBackends; query != nil { + if err := _q.loadCasBackends(ctx, query, nodes, func(n *Organization) { n.Edges.CasBackends = []*CASBackend{} }, func(n *Organization, e *CASBackend) { n.Edges.CasBackends = append(n.Edges.CasBackends, e) }); err != nil { return nil, err } } - if query := oq.withIntegrations; query != nil { - if err := oq.loadIntegrations(ctx, query, nodes, + if query := _q.withIntegrations; query != nil { + if err := _q.loadIntegrations(ctx, query, nodes, func(n *Organization) { n.Edges.Integrations = []*Integration{} }, func(n *Organization, e *Integration) { n.Edges.Integrations = append(n.Edges.Integrations, e) }); err != nil { return nil, err } } - if query := oq.withAPITokens; query != nil { - if err := oq.loadAPITokens(ctx, query, nodes, + if query := _q.withAPITokens; query != nil { + if err := _q.loadAPITokens(ctx, query, nodes, func(n *Organization) { n.Edges.APITokens = []*APIToken{} }, func(n *Organization, e *APIToken) { n.Edges.APITokens = append(n.Edges.APITokens, e) }); err != nil { return nil, err } } - if query := oq.withProjects; query != nil { - if err := oq.loadProjects(ctx, query, nodes, + if query := _q.withProjects; query != nil { + if err := _q.loadProjects(ctx, query, nodes, func(n *Organization) { n.Edges.Projects = []*Project{} }, func(n *Organization, e *Project) { n.Edges.Projects = append(n.Edges.Projects, e) }); err != nil { return nil, err } } - if query := oq.withGroups; query != nil { - if err := oq.loadGroups(ctx, query, nodes, + if query := _q.withGroups; query != nil { + if err := _q.loadGroups(ctx, query, nodes, func(n *Organization) { n.Edges.Groups = []*Group{} }, func(n *Organization, e *Group) { n.Edges.Groups = append(n.Edges.Groups, e) }); err != nil { return nil, err @@ -720,7 +720,7 @@ func (oq *OrganizationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([] return nodes, nil } -func (oq *OrganizationQuery) loadMemberships(ctx context.Context, query *MembershipQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Membership)) error { +func (_q *OrganizationQuery) loadMemberships(ctx context.Context, query *MembershipQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Membership)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -751,7 +751,7 @@ func (oq *OrganizationQuery) loadMemberships(ctx context.Context, query *Members } return nil } -func (oq *OrganizationQuery) loadWorkflowContracts(ctx context.Context, query *WorkflowContractQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *WorkflowContract)) error { +func (_q *OrganizationQuery) loadWorkflowContracts(ctx context.Context, query *WorkflowContractQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *WorkflowContract)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -782,7 +782,7 @@ func (oq *OrganizationQuery) loadWorkflowContracts(ctx context.Context, query *W } return nil } -func (oq *OrganizationQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Workflow)) error { +func (_q *OrganizationQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Workflow)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -813,7 +813,7 @@ func (oq *OrganizationQuery) loadWorkflows(ctx context.Context, query *WorkflowQ } return nil } -func (oq *OrganizationQuery) loadCasBackends(ctx context.Context, query *CASBackendQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *CASBackend)) error { +func (_q *OrganizationQuery) loadCasBackends(ctx context.Context, query *CASBackendQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *CASBackend)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -844,7 +844,7 @@ func (oq *OrganizationQuery) loadCasBackends(ctx context.Context, query *CASBack } return nil } -func (oq *OrganizationQuery) loadIntegrations(ctx context.Context, query *IntegrationQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Integration)) error { +func (_q *OrganizationQuery) loadIntegrations(ctx context.Context, query *IntegrationQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Integration)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -875,7 +875,7 @@ func (oq *OrganizationQuery) loadIntegrations(ctx context.Context, query *Integr } return nil } -func (oq *OrganizationQuery) loadAPITokens(ctx context.Context, query *APITokenQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *APIToken)) error { +func (_q *OrganizationQuery) loadAPITokens(ctx context.Context, query *APITokenQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *APIToken)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -905,7 +905,7 @@ func (oq *OrganizationQuery) loadAPITokens(ctx context.Context, query *APITokenQ } return nil } -func (oq *OrganizationQuery) loadProjects(ctx context.Context, query *ProjectQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Project)) error { +func (_q *OrganizationQuery) loadProjects(ctx context.Context, query *ProjectQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Project)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -935,7 +935,7 @@ func (oq *OrganizationQuery) loadProjects(ctx context.Context, query *ProjectQue } return nil } -func (oq *OrganizationQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Group)) error { +func (_q *OrganizationQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Organization, init func(*Organization), assign func(*Organization, *Group)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Organization) for i := range nodes { @@ -966,27 +966,27 @@ func (oq *OrganizationQuery) loadGroups(ctx context.Context, query *GroupQuery, return nil } -func (oq *OrganizationQuery) sqlCount(ctx context.Context) (int, error) { - _spec := oq.querySpec() - if len(oq.modifiers) > 0 { - _spec.Modifiers = oq.modifiers +func (_q *OrganizationQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = oq.ctx.Fields - if len(oq.ctx.Fields) > 0 { - _spec.Unique = oq.ctx.Unique != nil && *oq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, oq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (oq *OrganizationQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *OrganizationQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(organization.Table, organization.Columns, sqlgraph.NewFieldSpec(organization.FieldID, field.TypeUUID)) - _spec.From = oq.sql - if unique := oq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if oq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := oq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, organization.FieldID) for i := range fields { @@ -995,20 +995,20 @@ func (oq *OrganizationQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := oq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := oq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := oq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := oq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -1018,36 +1018,36 @@ func (oq *OrganizationQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (oq *OrganizationQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(oq.driver.Dialect()) +func (_q *OrganizationQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(organization.Table) - columns := oq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = organization.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if oq.sql != nil { - selector = oq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if oq.ctx.Unique != nil && *oq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range oq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range oq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range oq.order { + for _, p := range _q.order { p(selector) } - if offset := oq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := oq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -1056,33 +1056,33 @@ func (oq *OrganizationQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (oq *OrganizationQuery) ForUpdate(opts ...sql.LockOption) *OrganizationQuery { - if oq.driver.Dialect() == dialect.Postgres { - oq.Unique(false) +func (_q *OrganizationQuery) ForUpdate(opts ...sql.LockOption) *OrganizationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - oq.modifiers = append(oq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return oq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (oq *OrganizationQuery) ForShare(opts ...sql.LockOption) *OrganizationQuery { - if oq.driver.Dialect() == dialect.Postgres { - oq.Unique(false) +func (_q *OrganizationQuery) ForShare(opts ...sql.LockOption) *OrganizationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - oq.modifiers = append(oq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return oq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (oq *OrganizationQuery) Modify(modifiers ...func(s *sql.Selector)) *OrganizationSelect { - oq.modifiers = append(oq.modifiers, modifiers...) - return oq.Select() +func (_q *OrganizationQuery) Modify(modifiers ...func(s *sql.Selector)) *OrganizationSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // OrganizationGroupBy is the group-by builder for Organization entities. @@ -1092,41 +1092,41 @@ type OrganizationGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (ogb *OrganizationGroupBy) Aggregate(fns ...AggregateFunc) *OrganizationGroupBy { - ogb.fns = append(ogb.fns, fns...) - return ogb +func (_g *OrganizationGroupBy) Aggregate(fns ...AggregateFunc) *OrganizationGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (ogb *OrganizationGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ogb.build.ctx, ent.OpQueryGroupBy) - if err := ogb.build.prepareQuery(ctx); err != nil { +func (_g *OrganizationGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*OrganizationQuery, *OrganizationGroupBy](ctx, ogb.build, ogb, ogb.build.inters, v) + return scanWithInterceptors[*OrganizationQuery, *OrganizationGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (ogb *OrganizationGroupBy) sqlScan(ctx context.Context, root *OrganizationQuery, v any) error { +func (_g *OrganizationGroupBy) sqlScan(ctx context.Context, root *OrganizationQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(ogb.fns)) - for _, fn := range ogb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*ogb.flds)+len(ogb.fns)) - for _, f := range *ogb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*ogb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ogb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -1140,27 +1140,27 @@ type OrganizationSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (os *OrganizationSelect) Aggregate(fns ...AggregateFunc) *OrganizationSelect { - os.fns = append(os.fns, fns...) - return os +func (_s *OrganizationSelect) Aggregate(fns ...AggregateFunc) *OrganizationSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (os *OrganizationSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, os.ctx, ent.OpQuerySelect) - if err := os.prepareQuery(ctx); err != nil { +func (_s *OrganizationSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*OrganizationQuery, *OrganizationSelect](ctx, os.OrganizationQuery, os, os.inters, v) + return scanWithInterceptors[*OrganizationQuery, *OrganizationSelect](ctx, _s.OrganizationQuery, _s, _s.inters, v) } -func (os *OrganizationSelect) sqlScan(ctx context.Context, root *OrganizationQuery, v any) error { +func (_s *OrganizationSelect) sqlScan(ctx context.Context, root *OrganizationQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(os.fns)) - for _, fn := range os.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*os.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -1168,7 +1168,7 @@ func (os *OrganizationSelect) sqlScan(ctx context.Context, root *OrganizationQue } rows := &sql.Rows{} query, args := selector.Query() - if err := os.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -1176,7 +1176,7 @@ func (os *OrganizationSelect) sqlScan(ctx context.Context, root *OrganizationQue } // Modify adds a query modifier for attaching custom logic to queries. -func (os *OrganizationSelect) Modify(modifiers ...func(s *sql.Selector)) *OrganizationSelect { - os.modifiers = append(os.modifiers, modifiers...) - return os +func (_s *OrganizationSelect) Modify(modifiers ...func(s *sql.Selector)) *OrganizationSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/organization_update.go b/app/controlplane/pkg/data/ent/organization_update.go index 32ae1214c..0f3ba2d96 100644 --- a/app/controlplane/pkg/data/ent/organization_update.go +++ b/app/controlplane/pkg/data/ent/organization_update.go @@ -34,420 +34,420 @@ type OrganizationUpdate struct { } // Where appends a list predicates to the OrganizationUpdate builder. -func (ou *OrganizationUpdate) Where(ps ...predicate.Organization) *OrganizationUpdate { - ou.mutation.Where(ps...) - return ou +func (_u *OrganizationUpdate) Where(ps ...predicate.Organization) *OrganizationUpdate { + _u.mutation.Where(ps...) + return _u } // SetName sets the "name" field. -func (ou *OrganizationUpdate) SetName(s string) *OrganizationUpdate { - ou.mutation.SetName(s) - return ou +func (_u *OrganizationUpdate) SetName(v string) *OrganizationUpdate { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillableName(s *string) *OrganizationUpdate { - if s != nil { - ou.SetName(*s) +func (_u *OrganizationUpdate) SetNillableName(v *string) *OrganizationUpdate { + if v != nil { + _u.SetName(*v) } - return ou + return _u } // SetUpdatedAt sets the "updated_at" field. -func (ou *OrganizationUpdate) SetUpdatedAt(t time.Time) *OrganizationUpdate { - ou.mutation.SetUpdatedAt(t) - return ou +func (_u *OrganizationUpdate) SetUpdatedAt(v time.Time) *OrganizationUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillableUpdatedAt(t *time.Time) *OrganizationUpdate { - if t != nil { - ou.SetUpdatedAt(*t) +func (_u *OrganizationUpdate) SetNillableUpdatedAt(v *time.Time) *OrganizationUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return ou + return _u } // SetDeletedAt sets the "deleted_at" field. -func (ou *OrganizationUpdate) SetDeletedAt(t time.Time) *OrganizationUpdate { - ou.mutation.SetDeletedAt(t) - return ou +func (_u *OrganizationUpdate) SetDeletedAt(v time.Time) *OrganizationUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillableDeletedAt(t *time.Time) *OrganizationUpdate { - if t != nil { - ou.SetDeletedAt(*t) +func (_u *OrganizationUpdate) SetNillableDeletedAt(v *time.Time) *OrganizationUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return ou + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (ou *OrganizationUpdate) ClearDeletedAt() *OrganizationUpdate { - ou.mutation.ClearDeletedAt() - return ou +func (_u *OrganizationUpdate) ClearDeletedAt() *OrganizationUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetBlockOnPolicyViolation sets the "block_on_policy_violation" field. -func (ou *OrganizationUpdate) SetBlockOnPolicyViolation(b bool) *OrganizationUpdate { - ou.mutation.SetBlockOnPolicyViolation(b) - return ou +func (_u *OrganizationUpdate) SetBlockOnPolicyViolation(v bool) *OrganizationUpdate { + _u.mutation.SetBlockOnPolicyViolation(v) + return _u } // SetNillableBlockOnPolicyViolation sets the "block_on_policy_violation" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillableBlockOnPolicyViolation(b *bool) *OrganizationUpdate { - if b != nil { - ou.SetBlockOnPolicyViolation(*b) +func (_u *OrganizationUpdate) SetNillableBlockOnPolicyViolation(v *bool) *OrganizationUpdate { + if v != nil { + _u.SetBlockOnPolicyViolation(*v) } - return ou + return _u } // SetPoliciesAllowedHostnames sets the "policies_allowed_hostnames" field. -func (ou *OrganizationUpdate) SetPoliciesAllowedHostnames(s []string) *OrganizationUpdate { - ou.mutation.SetPoliciesAllowedHostnames(s) - return ou +func (_u *OrganizationUpdate) SetPoliciesAllowedHostnames(v []string) *OrganizationUpdate { + _u.mutation.SetPoliciesAllowedHostnames(v) + return _u } -// AppendPoliciesAllowedHostnames appends s to the "policies_allowed_hostnames" field. -func (ou *OrganizationUpdate) AppendPoliciesAllowedHostnames(s []string) *OrganizationUpdate { - ou.mutation.AppendPoliciesAllowedHostnames(s) - return ou +// AppendPoliciesAllowedHostnames appends value to the "policies_allowed_hostnames" field. +func (_u *OrganizationUpdate) AppendPoliciesAllowedHostnames(v []string) *OrganizationUpdate { + _u.mutation.AppendPoliciesAllowedHostnames(v) + return _u } // ClearPoliciesAllowedHostnames clears the value of the "policies_allowed_hostnames" field. -func (ou *OrganizationUpdate) ClearPoliciesAllowedHostnames() *OrganizationUpdate { - ou.mutation.ClearPoliciesAllowedHostnames() - return ou +func (_u *OrganizationUpdate) ClearPoliciesAllowedHostnames() *OrganizationUpdate { + _u.mutation.ClearPoliciesAllowedHostnames() + return _u } // SetPreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field. -func (ou *OrganizationUpdate) SetPreventImplicitWorkflowCreation(b bool) *OrganizationUpdate { - ou.mutation.SetPreventImplicitWorkflowCreation(b) - return ou +func (_u *OrganizationUpdate) SetPreventImplicitWorkflowCreation(v bool) *OrganizationUpdate { + _u.mutation.SetPreventImplicitWorkflowCreation(v) + return _u } // SetNillablePreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillablePreventImplicitWorkflowCreation(b *bool) *OrganizationUpdate { - if b != nil { - ou.SetPreventImplicitWorkflowCreation(*b) +func (_u *OrganizationUpdate) SetNillablePreventImplicitWorkflowCreation(v *bool) *OrganizationUpdate { + if v != nil { + _u.SetPreventImplicitWorkflowCreation(*v) } - return ou + return _u } // SetRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field. -func (ou *OrganizationUpdate) SetRestrictContractCreationToOrgAdmins(b bool) *OrganizationUpdate { - ou.mutation.SetRestrictContractCreationToOrgAdmins(b) - return ou +func (_u *OrganizationUpdate) SetRestrictContractCreationToOrgAdmins(v bool) *OrganizationUpdate { + _u.mutation.SetRestrictContractCreationToOrgAdmins(v) + return _u } // SetNillableRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field if the given value is not nil. -func (ou *OrganizationUpdate) SetNillableRestrictContractCreationToOrgAdmins(b *bool) *OrganizationUpdate { - if b != nil { - ou.SetRestrictContractCreationToOrgAdmins(*b) +func (_u *OrganizationUpdate) SetNillableRestrictContractCreationToOrgAdmins(v *bool) *OrganizationUpdate { + if v != nil { + _u.SetRestrictContractCreationToOrgAdmins(*v) } - return ou + return _u } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (ou *OrganizationUpdate) AddMembershipIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddMembershipIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddMembershipIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddMembershipIDs(ids...) + return _u } // AddMemberships adds the "memberships" edges to the Membership entity. -func (ou *OrganizationUpdate) AddMemberships(m ...*Membership) *OrganizationUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *OrganizationUpdate) AddMemberships(v ...*Membership) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddMembershipIDs(ids...) + return _u.AddMembershipIDs(ids...) } // AddWorkflowContractIDs adds the "workflow_contracts" edge to the WorkflowContract entity by IDs. -func (ou *OrganizationUpdate) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddWorkflowContractIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddWorkflowContractIDs(ids...) + return _u } // AddWorkflowContracts adds the "workflow_contracts" edges to the WorkflowContract entity. -func (ou *OrganizationUpdate) AddWorkflowContracts(w ...*WorkflowContract) *OrganizationUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdate) AddWorkflowContracts(v ...*WorkflowContract) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddWorkflowContractIDs(ids...) + return _u.AddWorkflowContractIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (ou *OrganizationUpdate) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddWorkflowIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (ou *OrganizationUpdate) AddWorkflows(w ...*Workflow) *OrganizationUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdate) AddWorkflows(v ...*Workflow) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (ou *OrganizationUpdate) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddCasBackendIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddCasBackendIDs(ids...) + return _u } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (ou *OrganizationUpdate) AddCasBackends(c ...*CASBackend) *OrganizationUpdate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *OrganizationUpdate) AddCasBackends(v ...*CASBackend) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddCasBackendIDs(ids...) + return _u.AddCasBackendIDs(ids...) } // AddIntegrationIDs adds the "integrations" edge to the Integration entity by IDs. -func (ou *OrganizationUpdate) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddIntegrationIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddIntegrationIDs(ids...) + return _u } // AddIntegrations adds the "integrations" edges to the Integration entity. -func (ou *OrganizationUpdate) AddIntegrations(i ...*Integration) *OrganizationUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *OrganizationUpdate) AddIntegrations(v ...*Integration) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddIntegrationIDs(ids...) + return _u.AddIntegrationIDs(ids...) } // AddAPITokenIDs adds the "api_tokens" edge to the APIToken entity by IDs. -func (ou *OrganizationUpdate) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddAPITokenIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddAPITokenIDs(ids...) + return _u } // AddAPITokens adds the "api_tokens" edges to the APIToken entity. -func (ou *OrganizationUpdate) AddAPITokens(a ...*APIToken) *OrganizationUpdate { - ids := make([]uuid.UUID, len(a)) - for i := range a { - ids[i] = a[i].ID +func (_u *OrganizationUpdate) AddAPITokens(v ...*APIToken) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddAPITokenIDs(ids...) + return _u.AddAPITokenIDs(ids...) } // AddProjectIDs adds the "projects" edge to the Project entity by IDs. -func (ou *OrganizationUpdate) AddProjectIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddProjectIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddProjectIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddProjectIDs(ids...) + return _u } // AddProjects adds the "projects" edges to the Project entity. -func (ou *OrganizationUpdate) AddProjects(p ...*Project) *OrganizationUpdate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *OrganizationUpdate) AddProjects(v ...*Project) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddProjectIDs(ids...) + return _u.AddProjectIDs(ids...) } // AddGroupIDs adds the "groups" edge to the Group entity by IDs. -func (ou *OrganizationUpdate) AddGroupIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.AddGroupIDs(ids...) - return ou +func (_u *OrganizationUpdate) AddGroupIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.AddGroupIDs(ids...) + return _u } // AddGroups adds the "groups" edges to the Group entity. -func (ou *OrganizationUpdate) AddGroups(g ...*Group) *OrganizationUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *OrganizationUpdate) AddGroups(v ...*Group) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.AddGroupIDs(ids...) + return _u.AddGroupIDs(ids...) } // Mutation returns the OrganizationMutation object of the builder. -func (ou *OrganizationUpdate) Mutation() *OrganizationMutation { - return ou.mutation +func (_u *OrganizationUpdate) Mutation() *OrganizationMutation { + return _u.mutation } // ClearMemberships clears all "memberships" edges to the Membership entity. -func (ou *OrganizationUpdate) ClearMemberships() *OrganizationUpdate { - ou.mutation.ClearMemberships() - return ou +func (_u *OrganizationUpdate) ClearMemberships() *OrganizationUpdate { + _u.mutation.ClearMemberships() + return _u } // RemoveMembershipIDs removes the "memberships" edge to Membership entities by IDs. -func (ou *OrganizationUpdate) RemoveMembershipIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveMembershipIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveMembershipIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveMembershipIDs(ids...) + return _u } // RemoveMemberships removes "memberships" edges to Membership entities. -func (ou *OrganizationUpdate) RemoveMemberships(m ...*Membership) *OrganizationUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *OrganizationUpdate) RemoveMemberships(v ...*Membership) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveMembershipIDs(ids...) + return _u.RemoveMembershipIDs(ids...) } // ClearWorkflowContracts clears all "workflow_contracts" edges to the WorkflowContract entity. -func (ou *OrganizationUpdate) ClearWorkflowContracts() *OrganizationUpdate { - ou.mutation.ClearWorkflowContracts() - return ou +func (_u *OrganizationUpdate) ClearWorkflowContracts() *OrganizationUpdate { + _u.mutation.ClearWorkflowContracts() + return _u } // RemoveWorkflowContractIDs removes the "workflow_contracts" edge to WorkflowContract entities by IDs. -func (ou *OrganizationUpdate) RemoveWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveWorkflowContractIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveWorkflowContractIDs(ids...) + return _u } // RemoveWorkflowContracts removes "workflow_contracts" edges to WorkflowContract entities. -func (ou *OrganizationUpdate) RemoveWorkflowContracts(w ...*WorkflowContract) *OrganizationUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdate) RemoveWorkflowContracts(v ...*WorkflowContract) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveWorkflowContractIDs(ids...) + return _u.RemoveWorkflowContractIDs(ids...) } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (ou *OrganizationUpdate) ClearWorkflows() *OrganizationUpdate { - ou.mutation.ClearWorkflows() - return ou +func (_u *OrganizationUpdate) ClearWorkflows() *OrganizationUpdate { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (ou *OrganizationUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveWorkflowIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (ou *OrganizationUpdate) RemoveWorkflows(w ...*Workflow) *OrganizationUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdate) RemoveWorkflows(v ...*Workflow) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // ClearCasBackends clears all "cas_backends" edges to the CASBackend entity. -func (ou *OrganizationUpdate) ClearCasBackends() *OrganizationUpdate { - ou.mutation.ClearCasBackends() - return ou +func (_u *OrganizationUpdate) ClearCasBackends() *OrganizationUpdate { + _u.mutation.ClearCasBackends() + return _u } // RemoveCasBackendIDs removes the "cas_backends" edge to CASBackend entities by IDs. -func (ou *OrganizationUpdate) RemoveCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveCasBackendIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveCasBackendIDs(ids...) + return _u } // RemoveCasBackends removes "cas_backends" edges to CASBackend entities. -func (ou *OrganizationUpdate) RemoveCasBackends(c ...*CASBackend) *OrganizationUpdate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *OrganizationUpdate) RemoveCasBackends(v ...*CASBackend) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveCasBackendIDs(ids...) + return _u.RemoveCasBackendIDs(ids...) } // ClearIntegrations clears all "integrations" edges to the Integration entity. -func (ou *OrganizationUpdate) ClearIntegrations() *OrganizationUpdate { - ou.mutation.ClearIntegrations() - return ou +func (_u *OrganizationUpdate) ClearIntegrations() *OrganizationUpdate { + _u.mutation.ClearIntegrations() + return _u } // RemoveIntegrationIDs removes the "integrations" edge to Integration entities by IDs. -func (ou *OrganizationUpdate) RemoveIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveIntegrationIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveIntegrationIDs(ids...) + return _u } // RemoveIntegrations removes "integrations" edges to Integration entities. -func (ou *OrganizationUpdate) RemoveIntegrations(i ...*Integration) *OrganizationUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *OrganizationUpdate) RemoveIntegrations(v ...*Integration) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveIntegrationIDs(ids...) + return _u.RemoveIntegrationIDs(ids...) } // ClearAPITokens clears all "api_tokens" edges to the APIToken entity. -func (ou *OrganizationUpdate) ClearAPITokens() *OrganizationUpdate { - ou.mutation.ClearAPITokens() - return ou +func (_u *OrganizationUpdate) ClearAPITokens() *OrganizationUpdate { + _u.mutation.ClearAPITokens() + return _u } // RemoveAPITokenIDs removes the "api_tokens" edge to APIToken entities by IDs. -func (ou *OrganizationUpdate) RemoveAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveAPITokenIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveAPITokenIDs(ids...) + return _u } // RemoveAPITokens removes "api_tokens" edges to APIToken entities. -func (ou *OrganizationUpdate) RemoveAPITokens(a ...*APIToken) *OrganizationUpdate { - ids := make([]uuid.UUID, len(a)) - for i := range a { - ids[i] = a[i].ID +func (_u *OrganizationUpdate) RemoveAPITokens(v ...*APIToken) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveAPITokenIDs(ids...) + return _u.RemoveAPITokenIDs(ids...) } // ClearProjects clears all "projects" edges to the Project entity. -func (ou *OrganizationUpdate) ClearProjects() *OrganizationUpdate { - ou.mutation.ClearProjects() - return ou +func (_u *OrganizationUpdate) ClearProjects() *OrganizationUpdate { + _u.mutation.ClearProjects() + return _u } // RemoveProjectIDs removes the "projects" edge to Project entities by IDs. -func (ou *OrganizationUpdate) RemoveProjectIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveProjectIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveProjectIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveProjectIDs(ids...) + return _u } // RemoveProjects removes "projects" edges to Project entities. -func (ou *OrganizationUpdate) RemoveProjects(p ...*Project) *OrganizationUpdate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *OrganizationUpdate) RemoveProjects(v ...*Project) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveProjectIDs(ids...) + return _u.RemoveProjectIDs(ids...) } // ClearGroups clears all "groups" edges to the Group entity. -func (ou *OrganizationUpdate) ClearGroups() *OrganizationUpdate { - ou.mutation.ClearGroups() - return ou +func (_u *OrganizationUpdate) ClearGroups() *OrganizationUpdate { + _u.mutation.ClearGroups() + return _u } // RemoveGroupIDs removes the "groups" edge to Group entities by IDs. -func (ou *OrganizationUpdate) RemoveGroupIDs(ids ...uuid.UUID) *OrganizationUpdate { - ou.mutation.RemoveGroupIDs(ids...) - return ou +func (_u *OrganizationUpdate) RemoveGroupIDs(ids ...uuid.UUID) *OrganizationUpdate { + _u.mutation.RemoveGroupIDs(ids...) + return _u } // RemoveGroups removes "groups" edges to Group entities. -func (ou *OrganizationUpdate) RemoveGroups(g ...*Group) *OrganizationUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *OrganizationUpdate) RemoveGroups(v ...*Group) *OrganizationUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ou.RemoveGroupIDs(ids...) + return _u.RemoveGroupIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (ou *OrganizationUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, ou.sqlSave, ou.mutation, ou.hooks) +func (_u *OrganizationUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (ou *OrganizationUpdate) SaveX(ctx context.Context) int { - affected, err := ou.Save(ctx) +func (_u *OrganizationUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -455,66 +455,66 @@ func (ou *OrganizationUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (ou *OrganizationUpdate) Exec(ctx context.Context) error { - _, err := ou.Save(ctx) +func (_u *OrganizationUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ou *OrganizationUpdate) ExecX(ctx context.Context) { - if err := ou.Exec(ctx); err != nil { +func (_u *OrganizationUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (ou *OrganizationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrganizationUpdate { - ou.modifiers = append(ou.modifiers, modifiers...) - return ou +func (_u *OrganizationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrganizationUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { +func (_u *OrganizationUpdate) sqlSave(ctx context.Context) (_node int, err error) { _spec := sqlgraph.NewUpdateSpec(organization.Table, organization.Columns, sqlgraph.NewFieldSpec(organization.FieldID, field.TypeUUID)) - if ps := ou.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := ou.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(organization.FieldName, field.TypeString, value) } - if value, ok := ou.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(organization.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ou.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(organization.FieldDeletedAt, field.TypeTime, value) } - if ou.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(organization.FieldDeletedAt, field.TypeTime) } - if value, ok := ou.mutation.BlockOnPolicyViolation(); ok { + if value, ok := _u.mutation.BlockOnPolicyViolation(); ok { _spec.SetField(organization.FieldBlockOnPolicyViolation, field.TypeBool, value) } - if value, ok := ou.mutation.PoliciesAllowedHostnames(); ok { + if value, ok := _u.mutation.PoliciesAllowedHostnames(); ok { _spec.SetField(organization.FieldPoliciesAllowedHostnames, field.TypeJSON, value) } - if value, ok := ou.mutation.AppendedPoliciesAllowedHostnames(); ok { + if value, ok := _u.mutation.AppendedPoliciesAllowedHostnames(); ok { _spec.AddModifier(func(u *sql.UpdateBuilder) { sqljson.Append(u, organization.FieldPoliciesAllowedHostnames, value) }) } - if ou.mutation.PoliciesAllowedHostnamesCleared() { + if _u.mutation.PoliciesAllowedHostnamesCleared() { _spec.ClearField(organization.FieldPoliciesAllowedHostnames, field.TypeJSON) } - if value, ok := ou.mutation.PreventImplicitWorkflowCreation(); ok { + if value, ok := _u.mutation.PreventImplicitWorkflowCreation(); ok { _spec.SetField(organization.FieldPreventImplicitWorkflowCreation, field.TypeBool, value) } - if value, ok := ou.mutation.RestrictContractCreationToOrgAdmins(); ok { + if value, ok := _u.mutation.RestrictContractCreationToOrgAdmins(); ok { _spec.SetField(organization.FieldRestrictContractCreationToOrgAdmins, field.TypeBool, value) } - if ou.mutation.MembershipsCleared() { + if _u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -527,7 +527,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !ou.mutation.MembershipsCleared() { + if nodes := _u.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !_u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -543,7 +543,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -559,7 +559,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.WorkflowContractsCleared() { + if _u.mutation.WorkflowContractsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -572,7 +572,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedWorkflowContractsIDs(); len(nodes) > 0 && !ou.mutation.WorkflowContractsCleared() { + if nodes := _u.mutation.RemovedWorkflowContractsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowContractsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -588,7 +588,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.WorkflowContractsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowContractsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -604,7 +604,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -617,7 +617,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !ou.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -633,7 +633,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -649,7 +649,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.CasBackendsCleared() { + if _u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -662,7 +662,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !ou.mutation.CasBackendsCleared() { + if nodes := _u.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !_u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -678,7 +678,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -694,7 +694,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.IntegrationsCleared() { + if _u.mutation.IntegrationsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -707,7 +707,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedIntegrationsIDs(); len(nodes) > 0 && !ou.mutation.IntegrationsCleared() { + if nodes := _u.mutation.RemovedIntegrationsIDs(); len(nodes) > 0 && !_u.mutation.IntegrationsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -723,7 +723,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.IntegrationsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -739,7 +739,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.APITokensCleared() { + if _u.mutation.APITokensCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -752,7 +752,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedAPITokensIDs(); len(nodes) > 0 && !ou.mutation.APITokensCleared() { + if nodes := _u.mutation.RemovedAPITokensIDs(); len(nodes) > 0 && !_u.mutation.APITokensCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -768,7 +768,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.APITokensIDs(); len(nodes) > 0 { + if nodes := _u.mutation.APITokensIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -784,7 +784,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.ProjectsCleared() { + if _u.mutation.ProjectsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -797,7 +797,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedProjectsIDs(); len(nodes) > 0 && !ou.mutation.ProjectsCleared() { + if nodes := _u.mutation.RemovedProjectsIDs(); len(nodes) > 0 && !_u.mutation.ProjectsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -813,7 +813,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.ProjectsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -829,7 +829,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ou.mutation.GroupsCleared() { + if _u.mutation.GroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -842,7 +842,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !ou.mutation.GroupsCleared() { + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -858,7 +858,7 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ou.mutation.GroupsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -874,8 +874,8 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(ou.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, ou.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{organization.Label} } else if sqlgraph.IsConstraintError(err) { @@ -883,8 +883,8 @@ func (ou *OrganizationUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - ou.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // OrganizationUpdateOne is the builder for updating a single Organization entity. @@ -897,427 +897,427 @@ type OrganizationUpdateOne struct { } // SetName sets the "name" field. -func (ouo *OrganizationUpdateOne) SetName(s string) *OrganizationUpdateOne { - ouo.mutation.SetName(s) - return ouo +func (_u *OrganizationUpdateOne) SetName(v string) *OrganizationUpdateOne { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillableName(s *string) *OrganizationUpdateOne { - if s != nil { - ouo.SetName(*s) +func (_u *OrganizationUpdateOne) SetNillableName(v *string) *OrganizationUpdateOne { + if v != nil { + _u.SetName(*v) } - return ouo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (ouo *OrganizationUpdateOne) SetUpdatedAt(t time.Time) *OrganizationUpdateOne { - ouo.mutation.SetUpdatedAt(t) - return ouo +func (_u *OrganizationUpdateOne) SetUpdatedAt(v time.Time) *OrganizationUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillableUpdatedAt(t *time.Time) *OrganizationUpdateOne { - if t != nil { - ouo.SetUpdatedAt(*t) +func (_u *OrganizationUpdateOne) SetNillableUpdatedAt(v *time.Time) *OrganizationUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return ouo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (ouo *OrganizationUpdateOne) SetDeletedAt(t time.Time) *OrganizationUpdateOne { - ouo.mutation.SetDeletedAt(t) - return ouo +func (_u *OrganizationUpdateOne) SetDeletedAt(v time.Time) *OrganizationUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillableDeletedAt(t *time.Time) *OrganizationUpdateOne { - if t != nil { - ouo.SetDeletedAt(*t) +func (_u *OrganizationUpdateOne) SetNillableDeletedAt(v *time.Time) *OrganizationUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return ouo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (ouo *OrganizationUpdateOne) ClearDeletedAt() *OrganizationUpdateOne { - ouo.mutation.ClearDeletedAt() - return ouo +func (_u *OrganizationUpdateOne) ClearDeletedAt() *OrganizationUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetBlockOnPolicyViolation sets the "block_on_policy_violation" field. -func (ouo *OrganizationUpdateOne) SetBlockOnPolicyViolation(b bool) *OrganizationUpdateOne { - ouo.mutation.SetBlockOnPolicyViolation(b) - return ouo +func (_u *OrganizationUpdateOne) SetBlockOnPolicyViolation(v bool) *OrganizationUpdateOne { + _u.mutation.SetBlockOnPolicyViolation(v) + return _u } // SetNillableBlockOnPolicyViolation sets the "block_on_policy_violation" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillableBlockOnPolicyViolation(b *bool) *OrganizationUpdateOne { - if b != nil { - ouo.SetBlockOnPolicyViolation(*b) +func (_u *OrganizationUpdateOne) SetNillableBlockOnPolicyViolation(v *bool) *OrganizationUpdateOne { + if v != nil { + _u.SetBlockOnPolicyViolation(*v) } - return ouo + return _u } // SetPoliciesAllowedHostnames sets the "policies_allowed_hostnames" field. -func (ouo *OrganizationUpdateOne) SetPoliciesAllowedHostnames(s []string) *OrganizationUpdateOne { - ouo.mutation.SetPoliciesAllowedHostnames(s) - return ouo +func (_u *OrganizationUpdateOne) SetPoliciesAllowedHostnames(v []string) *OrganizationUpdateOne { + _u.mutation.SetPoliciesAllowedHostnames(v) + return _u } -// AppendPoliciesAllowedHostnames appends s to the "policies_allowed_hostnames" field. -func (ouo *OrganizationUpdateOne) AppendPoliciesAllowedHostnames(s []string) *OrganizationUpdateOne { - ouo.mutation.AppendPoliciesAllowedHostnames(s) - return ouo +// AppendPoliciesAllowedHostnames appends value to the "policies_allowed_hostnames" field. +func (_u *OrganizationUpdateOne) AppendPoliciesAllowedHostnames(v []string) *OrganizationUpdateOne { + _u.mutation.AppendPoliciesAllowedHostnames(v) + return _u } // ClearPoliciesAllowedHostnames clears the value of the "policies_allowed_hostnames" field. -func (ouo *OrganizationUpdateOne) ClearPoliciesAllowedHostnames() *OrganizationUpdateOne { - ouo.mutation.ClearPoliciesAllowedHostnames() - return ouo +func (_u *OrganizationUpdateOne) ClearPoliciesAllowedHostnames() *OrganizationUpdateOne { + _u.mutation.ClearPoliciesAllowedHostnames() + return _u } // SetPreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field. -func (ouo *OrganizationUpdateOne) SetPreventImplicitWorkflowCreation(b bool) *OrganizationUpdateOne { - ouo.mutation.SetPreventImplicitWorkflowCreation(b) - return ouo +func (_u *OrganizationUpdateOne) SetPreventImplicitWorkflowCreation(v bool) *OrganizationUpdateOne { + _u.mutation.SetPreventImplicitWorkflowCreation(v) + return _u } // SetNillablePreventImplicitWorkflowCreation sets the "prevent_implicit_workflow_creation" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillablePreventImplicitWorkflowCreation(b *bool) *OrganizationUpdateOne { - if b != nil { - ouo.SetPreventImplicitWorkflowCreation(*b) +func (_u *OrganizationUpdateOne) SetNillablePreventImplicitWorkflowCreation(v *bool) *OrganizationUpdateOne { + if v != nil { + _u.SetPreventImplicitWorkflowCreation(*v) } - return ouo + return _u } // SetRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field. -func (ouo *OrganizationUpdateOne) SetRestrictContractCreationToOrgAdmins(b bool) *OrganizationUpdateOne { - ouo.mutation.SetRestrictContractCreationToOrgAdmins(b) - return ouo +func (_u *OrganizationUpdateOne) SetRestrictContractCreationToOrgAdmins(v bool) *OrganizationUpdateOne { + _u.mutation.SetRestrictContractCreationToOrgAdmins(v) + return _u } // SetNillableRestrictContractCreationToOrgAdmins sets the "restrict_contract_creation_to_org_admins" field if the given value is not nil. -func (ouo *OrganizationUpdateOne) SetNillableRestrictContractCreationToOrgAdmins(b *bool) *OrganizationUpdateOne { - if b != nil { - ouo.SetRestrictContractCreationToOrgAdmins(*b) +func (_u *OrganizationUpdateOne) SetNillableRestrictContractCreationToOrgAdmins(v *bool) *OrganizationUpdateOne { + if v != nil { + _u.SetRestrictContractCreationToOrgAdmins(*v) } - return ouo + return _u } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (ouo *OrganizationUpdateOne) AddMembershipIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddMembershipIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddMembershipIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddMembershipIDs(ids...) + return _u } // AddMemberships adds the "memberships" edges to the Membership entity. -func (ouo *OrganizationUpdateOne) AddMemberships(m ...*Membership) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *OrganizationUpdateOne) AddMemberships(v ...*Membership) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddMembershipIDs(ids...) + return _u.AddMembershipIDs(ids...) } // AddWorkflowContractIDs adds the "workflow_contracts" edge to the WorkflowContract entity by IDs. -func (ouo *OrganizationUpdateOne) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddWorkflowContractIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddWorkflowContractIDs(ids...) + return _u } // AddWorkflowContracts adds the "workflow_contracts" edges to the WorkflowContract entity. -func (ouo *OrganizationUpdateOne) AddWorkflowContracts(w ...*WorkflowContract) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdateOne) AddWorkflowContracts(v ...*WorkflowContract) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddWorkflowContractIDs(ids...) + return _u.AddWorkflowContractIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (ouo *OrganizationUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddWorkflowIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (ouo *OrganizationUpdateOne) AddWorkflows(w ...*Workflow) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdateOne) AddWorkflows(v ...*Workflow) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (ouo *OrganizationUpdateOne) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddCasBackendIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddCasBackendIDs(ids...) + return _u } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (ouo *OrganizationUpdateOne) AddCasBackends(c ...*CASBackend) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *OrganizationUpdateOne) AddCasBackends(v ...*CASBackend) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddCasBackendIDs(ids...) + return _u.AddCasBackendIDs(ids...) } // AddIntegrationIDs adds the "integrations" edge to the Integration entity by IDs. -func (ouo *OrganizationUpdateOne) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddIntegrationIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddIntegrationIDs(ids...) + return _u } // AddIntegrations adds the "integrations" edges to the Integration entity. -func (ouo *OrganizationUpdateOne) AddIntegrations(i ...*Integration) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *OrganizationUpdateOne) AddIntegrations(v ...*Integration) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddIntegrationIDs(ids...) + return _u.AddIntegrationIDs(ids...) } // AddAPITokenIDs adds the "api_tokens" edge to the APIToken entity by IDs. -func (ouo *OrganizationUpdateOne) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddAPITokenIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddAPITokenIDs(ids...) + return _u } // AddAPITokens adds the "api_tokens" edges to the APIToken entity. -func (ouo *OrganizationUpdateOne) AddAPITokens(a ...*APIToken) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(a)) - for i := range a { - ids[i] = a[i].ID +func (_u *OrganizationUpdateOne) AddAPITokens(v ...*APIToken) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddAPITokenIDs(ids...) + return _u.AddAPITokenIDs(ids...) } // AddProjectIDs adds the "projects" edge to the Project entity by IDs. -func (ouo *OrganizationUpdateOne) AddProjectIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddProjectIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddProjectIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddProjectIDs(ids...) + return _u } // AddProjects adds the "projects" edges to the Project entity. -func (ouo *OrganizationUpdateOne) AddProjects(p ...*Project) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *OrganizationUpdateOne) AddProjects(v ...*Project) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddProjectIDs(ids...) + return _u.AddProjectIDs(ids...) } // AddGroupIDs adds the "groups" edge to the Group entity by IDs. -func (ouo *OrganizationUpdateOne) AddGroupIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.AddGroupIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) AddGroupIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.AddGroupIDs(ids...) + return _u } // AddGroups adds the "groups" edges to the Group entity. -func (ouo *OrganizationUpdateOne) AddGroups(g ...*Group) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *OrganizationUpdateOne) AddGroups(v ...*Group) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.AddGroupIDs(ids...) + return _u.AddGroupIDs(ids...) } // Mutation returns the OrganizationMutation object of the builder. -func (ouo *OrganizationUpdateOne) Mutation() *OrganizationMutation { - return ouo.mutation +func (_u *OrganizationUpdateOne) Mutation() *OrganizationMutation { + return _u.mutation } // ClearMemberships clears all "memberships" edges to the Membership entity. -func (ouo *OrganizationUpdateOne) ClearMemberships() *OrganizationUpdateOne { - ouo.mutation.ClearMemberships() - return ouo +func (_u *OrganizationUpdateOne) ClearMemberships() *OrganizationUpdateOne { + _u.mutation.ClearMemberships() + return _u } // RemoveMembershipIDs removes the "memberships" edge to Membership entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveMembershipIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveMembershipIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveMembershipIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveMembershipIDs(ids...) + return _u } // RemoveMemberships removes "memberships" edges to Membership entities. -func (ouo *OrganizationUpdateOne) RemoveMemberships(m ...*Membership) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *OrganizationUpdateOne) RemoveMemberships(v ...*Membership) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveMembershipIDs(ids...) + return _u.RemoveMembershipIDs(ids...) } // ClearWorkflowContracts clears all "workflow_contracts" edges to the WorkflowContract entity. -func (ouo *OrganizationUpdateOne) ClearWorkflowContracts() *OrganizationUpdateOne { - ouo.mutation.ClearWorkflowContracts() - return ouo +func (_u *OrganizationUpdateOne) ClearWorkflowContracts() *OrganizationUpdateOne { + _u.mutation.ClearWorkflowContracts() + return _u } // RemoveWorkflowContractIDs removes the "workflow_contracts" edge to WorkflowContract entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveWorkflowContractIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveWorkflowContractIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveWorkflowContractIDs(ids...) + return _u } // RemoveWorkflowContracts removes "workflow_contracts" edges to WorkflowContract entities. -func (ouo *OrganizationUpdateOne) RemoveWorkflowContracts(w ...*WorkflowContract) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdateOne) RemoveWorkflowContracts(v ...*WorkflowContract) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveWorkflowContractIDs(ids...) + return _u.RemoveWorkflowContractIDs(ids...) } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (ouo *OrganizationUpdateOne) ClearWorkflows() *OrganizationUpdateOne { - ouo.mutation.ClearWorkflows() - return ouo +func (_u *OrganizationUpdateOne) ClearWorkflows() *OrganizationUpdateOne { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveWorkflowIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (ouo *OrganizationUpdateOne) RemoveWorkflows(w ...*Workflow) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *OrganizationUpdateOne) RemoveWorkflows(v ...*Workflow) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // ClearCasBackends clears all "cas_backends" edges to the CASBackend entity. -func (ouo *OrganizationUpdateOne) ClearCasBackends() *OrganizationUpdateOne { - ouo.mutation.ClearCasBackends() - return ouo +func (_u *OrganizationUpdateOne) ClearCasBackends() *OrganizationUpdateOne { + _u.mutation.ClearCasBackends() + return _u } // RemoveCasBackendIDs removes the "cas_backends" edge to CASBackend entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveCasBackendIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveCasBackendIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveCasBackendIDs(ids...) + return _u } // RemoveCasBackends removes "cas_backends" edges to CASBackend entities. -func (ouo *OrganizationUpdateOne) RemoveCasBackends(c ...*CASBackend) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *OrganizationUpdateOne) RemoveCasBackends(v ...*CASBackend) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveCasBackendIDs(ids...) + return _u.RemoveCasBackendIDs(ids...) } // ClearIntegrations clears all "integrations" edges to the Integration entity. -func (ouo *OrganizationUpdateOne) ClearIntegrations() *OrganizationUpdateOne { - ouo.mutation.ClearIntegrations() - return ouo +func (_u *OrganizationUpdateOne) ClearIntegrations() *OrganizationUpdateOne { + _u.mutation.ClearIntegrations() + return _u } // RemoveIntegrationIDs removes the "integrations" edge to Integration entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveIntegrationIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveIntegrationIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveIntegrationIDs(ids...) + return _u } // RemoveIntegrations removes "integrations" edges to Integration entities. -func (ouo *OrganizationUpdateOne) RemoveIntegrations(i ...*Integration) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *OrganizationUpdateOne) RemoveIntegrations(v ...*Integration) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveIntegrationIDs(ids...) + return _u.RemoveIntegrationIDs(ids...) } // ClearAPITokens clears all "api_tokens" edges to the APIToken entity. -func (ouo *OrganizationUpdateOne) ClearAPITokens() *OrganizationUpdateOne { - ouo.mutation.ClearAPITokens() - return ouo +func (_u *OrganizationUpdateOne) ClearAPITokens() *OrganizationUpdateOne { + _u.mutation.ClearAPITokens() + return _u } // RemoveAPITokenIDs removes the "api_tokens" edge to APIToken entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveAPITokenIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveAPITokenIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveAPITokenIDs(ids...) + return _u } // RemoveAPITokens removes "api_tokens" edges to APIToken entities. -func (ouo *OrganizationUpdateOne) RemoveAPITokens(a ...*APIToken) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(a)) - for i := range a { - ids[i] = a[i].ID +func (_u *OrganizationUpdateOne) RemoveAPITokens(v ...*APIToken) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveAPITokenIDs(ids...) + return _u.RemoveAPITokenIDs(ids...) } // ClearProjects clears all "projects" edges to the Project entity. -func (ouo *OrganizationUpdateOne) ClearProjects() *OrganizationUpdateOne { - ouo.mutation.ClearProjects() - return ouo +func (_u *OrganizationUpdateOne) ClearProjects() *OrganizationUpdateOne { + _u.mutation.ClearProjects() + return _u } // RemoveProjectIDs removes the "projects" edge to Project entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveProjectIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveProjectIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveProjectIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveProjectIDs(ids...) + return _u } // RemoveProjects removes "projects" edges to Project entities. -func (ouo *OrganizationUpdateOne) RemoveProjects(p ...*Project) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *OrganizationUpdateOne) RemoveProjects(v ...*Project) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveProjectIDs(ids...) + return _u.RemoveProjectIDs(ids...) } // ClearGroups clears all "groups" edges to the Group entity. -func (ouo *OrganizationUpdateOne) ClearGroups() *OrganizationUpdateOne { - ouo.mutation.ClearGroups() - return ouo +func (_u *OrganizationUpdateOne) ClearGroups() *OrganizationUpdateOne { + _u.mutation.ClearGroups() + return _u } // RemoveGroupIDs removes the "groups" edge to Group entities by IDs. -func (ouo *OrganizationUpdateOne) RemoveGroupIDs(ids ...uuid.UUID) *OrganizationUpdateOne { - ouo.mutation.RemoveGroupIDs(ids...) - return ouo +func (_u *OrganizationUpdateOne) RemoveGroupIDs(ids ...uuid.UUID) *OrganizationUpdateOne { + _u.mutation.RemoveGroupIDs(ids...) + return _u } // RemoveGroups removes "groups" edges to Group entities. -func (ouo *OrganizationUpdateOne) RemoveGroups(g ...*Group) *OrganizationUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *OrganizationUpdateOne) RemoveGroups(v ...*Group) *OrganizationUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ouo.RemoveGroupIDs(ids...) + return _u.RemoveGroupIDs(ids...) } // Where appends a list predicates to the OrganizationUpdate builder. -func (ouo *OrganizationUpdateOne) Where(ps ...predicate.Organization) *OrganizationUpdateOne { - ouo.mutation.Where(ps...) - return ouo +func (_u *OrganizationUpdateOne) Where(ps ...predicate.Organization) *OrganizationUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (ouo *OrganizationUpdateOne) Select(field string, fields ...string) *OrganizationUpdateOne { - ouo.fields = append([]string{field}, fields...) - return ouo +func (_u *OrganizationUpdateOne) Select(field string, fields ...string) *OrganizationUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Organization entity. -func (ouo *OrganizationUpdateOne) Save(ctx context.Context) (*Organization, error) { - return withHooks(ctx, ouo.sqlSave, ouo.mutation, ouo.hooks) +func (_u *OrganizationUpdateOne) Save(ctx context.Context) (*Organization, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (ouo *OrganizationUpdateOne) SaveX(ctx context.Context) *Organization { - node, err := ouo.Save(ctx) +func (_u *OrganizationUpdateOne) SaveX(ctx context.Context) *Organization { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -1325,32 +1325,32 @@ func (ouo *OrganizationUpdateOne) SaveX(ctx context.Context) *Organization { } // Exec executes the query on the entity. -func (ouo *OrganizationUpdateOne) Exec(ctx context.Context) error { - _, err := ouo.Save(ctx) +func (_u *OrganizationUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ouo *OrganizationUpdateOne) ExecX(ctx context.Context) { - if err := ouo.Exec(ctx); err != nil { +func (_u *OrganizationUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (ouo *OrganizationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrganizationUpdateOne { - ouo.modifiers = append(ouo.modifiers, modifiers...) - return ouo +func (_u *OrganizationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrganizationUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organization, err error) { +func (_u *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organization, err error) { _spec := sqlgraph.NewUpdateSpec(organization.Table, organization.Columns, sqlgraph.NewFieldSpec(organization.FieldID, field.TypeUUID)) - id, ok := ouo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Organization.id" for update`)} } _spec.Node.ID.Value = id - if fields := ouo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, organization.FieldID) for _, f := range fields { @@ -1362,46 +1362,46 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } } } - if ps := ouo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := ouo.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(organization.FieldName, field.TypeString, value) } - if value, ok := ouo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(organization.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ouo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(organization.FieldDeletedAt, field.TypeTime, value) } - if ouo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(organization.FieldDeletedAt, field.TypeTime) } - if value, ok := ouo.mutation.BlockOnPolicyViolation(); ok { + if value, ok := _u.mutation.BlockOnPolicyViolation(); ok { _spec.SetField(organization.FieldBlockOnPolicyViolation, field.TypeBool, value) } - if value, ok := ouo.mutation.PoliciesAllowedHostnames(); ok { + if value, ok := _u.mutation.PoliciesAllowedHostnames(); ok { _spec.SetField(organization.FieldPoliciesAllowedHostnames, field.TypeJSON, value) } - if value, ok := ouo.mutation.AppendedPoliciesAllowedHostnames(); ok { + if value, ok := _u.mutation.AppendedPoliciesAllowedHostnames(); ok { _spec.AddModifier(func(u *sql.UpdateBuilder) { sqljson.Append(u, organization.FieldPoliciesAllowedHostnames, value) }) } - if ouo.mutation.PoliciesAllowedHostnamesCleared() { + if _u.mutation.PoliciesAllowedHostnamesCleared() { _spec.ClearField(organization.FieldPoliciesAllowedHostnames, field.TypeJSON) } - if value, ok := ouo.mutation.PreventImplicitWorkflowCreation(); ok { + if value, ok := _u.mutation.PreventImplicitWorkflowCreation(); ok { _spec.SetField(organization.FieldPreventImplicitWorkflowCreation, field.TypeBool, value) } - if value, ok := ouo.mutation.RestrictContractCreationToOrgAdmins(); ok { + if value, ok := _u.mutation.RestrictContractCreationToOrgAdmins(); ok { _spec.SetField(organization.FieldRestrictContractCreationToOrgAdmins, field.TypeBool, value) } - if ouo.mutation.MembershipsCleared() { + if _u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1414,7 +1414,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !ouo.mutation.MembershipsCleared() { + if nodes := _u.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !_u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1430,7 +1430,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1446,7 +1446,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.WorkflowContractsCleared() { + if _u.mutation.WorkflowContractsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1459,7 +1459,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedWorkflowContractsIDs(); len(nodes) > 0 && !ouo.mutation.WorkflowContractsCleared() { + if nodes := _u.mutation.RemovedWorkflowContractsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowContractsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1475,7 +1475,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.WorkflowContractsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowContractsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1491,7 +1491,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1504,7 +1504,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !ouo.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1520,7 +1520,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1536,7 +1536,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.CasBackendsCleared() { + if _u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1549,7 +1549,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !ouo.mutation.CasBackendsCleared() { + if nodes := _u.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !_u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1565,7 +1565,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1581,7 +1581,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.IntegrationsCleared() { + if _u.mutation.IntegrationsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1594,7 +1594,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedIntegrationsIDs(); len(nodes) > 0 && !ouo.mutation.IntegrationsCleared() { + if nodes := _u.mutation.RemovedIntegrationsIDs(); len(nodes) > 0 && !_u.mutation.IntegrationsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1610,7 +1610,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.IntegrationsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1626,7 +1626,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.APITokensCleared() { + if _u.mutation.APITokensCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1639,7 +1639,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedAPITokensIDs(); len(nodes) > 0 && !ouo.mutation.APITokensCleared() { + if nodes := _u.mutation.RemovedAPITokensIDs(); len(nodes) > 0 && !_u.mutation.APITokensCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1655,7 +1655,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.APITokensIDs(); len(nodes) > 0 { + if nodes := _u.mutation.APITokensIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1671,7 +1671,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.ProjectsCleared() { + if _u.mutation.ProjectsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1684,7 +1684,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedProjectsIDs(); len(nodes) > 0 && !ouo.mutation.ProjectsCleared() { + if nodes := _u.mutation.RemovedProjectsIDs(); len(nodes) > 0 && !_u.mutation.ProjectsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1700,7 +1700,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.ProjectsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1716,7 +1716,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ouo.mutation.GroupsCleared() { + if _u.mutation.GroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1729,7 +1729,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !ouo.mutation.GroupsCleared() { + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1745,7 +1745,7 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ouo.mutation.GroupsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1761,11 +1761,11 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(ouo.modifiers...) - _node = &Organization{config: ouo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Organization{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, ouo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{organization.Label} } else if sqlgraph.IsConstraintError(err) { @@ -1773,6 +1773,6 @@ func (ouo *OrganizationUpdateOne) sqlSave(ctx context.Context) (_node *Organizat } return nil, err } - ouo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/orginvitation.go b/app/controlplane/pkg/data/ent/orginvitation.go index c4f3009a0..01fd546d7 100644 --- a/app/controlplane/pkg/data/ent/orginvitation.go +++ b/app/controlplane/pkg/data/ent/orginvitation.go @@ -100,7 +100,7 @@ func (*OrgInvitation) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the OrgInvitation fields. -func (oi *OrgInvitation) assignValues(columns []string, values []any) error { +func (_m *OrgInvitation) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -110,60 +110,60 @@ func (oi *OrgInvitation) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - oi.ID = *value + _m.ID = *value } case orginvitation.FieldReceiverEmail: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field receiver_email", values[i]) } else if value.Valid { - oi.ReceiverEmail = value.String + _m.ReceiverEmail = value.String } case orginvitation.FieldStatus: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field status", values[i]) } else if value.Valid { - oi.Status = biz.OrgInvitationStatus(value.String) + _m.Status = biz.OrgInvitationStatus(value.String) } case orginvitation.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - oi.CreatedAt = value.Time + _m.CreatedAt = value.Time } case orginvitation.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - oi.DeletedAt = value.Time + _m.DeletedAt = value.Time } case orginvitation.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - oi.OrganizationID = *value + _m.OrganizationID = *value } case orginvitation.FieldSenderID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field sender_id", values[i]) } else if value != nil { - oi.SenderID = *value + _m.SenderID = *value } case orginvitation.FieldRole: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field role", values[i]) } else if value.Valid { - oi.Role = authz.Role(value.String) + _m.Role = authz.Role(value.String) } case orginvitation.FieldContext: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field context", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &oi.Context); err != nil { + if err := json.Unmarshal(*value, &_m.Context); err != nil { return fmt.Errorf("unmarshal field context: %w", err) } } default: - oi.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -171,66 +171,66 @@ func (oi *OrgInvitation) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the OrgInvitation. // This includes values selected through modifiers, order, etc. -func (oi *OrgInvitation) Value(name string) (ent.Value, error) { - return oi.selectValues.Get(name) +func (_m *OrgInvitation) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryOrganization queries the "organization" edge of the OrgInvitation entity. -func (oi *OrgInvitation) QueryOrganization() *OrganizationQuery { - return NewOrgInvitationClient(oi.config).QueryOrganization(oi) +func (_m *OrgInvitation) QueryOrganization() *OrganizationQuery { + return NewOrgInvitationClient(_m.config).QueryOrganization(_m) } // QuerySender queries the "sender" edge of the OrgInvitation entity. -func (oi *OrgInvitation) QuerySender() *UserQuery { - return NewOrgInvitationClient(oi.config).QuerySender(oi) +func (_m *OrgInvitation) QuerySender() *UserQuery { + return NewOrgInvitationClient(_m.config).QuerySender(_m) } // Update returns a builder for updating this OrgInvitation. // Note that you need to call OrgInvitation.Unwrap() before calling this method if this OrgInvitation // was returned from a transaction, and the transaction was committed or rolled back. -func (oi *OrgInvitation) Update() *OrgInvitationUpdateOne { - return NewOrgInvitationClient(oi.config).UpdateOne(oi) +func (_m *OrgInvitation) Update() *OrgInvitationUpdateOne { + return NewOrgInvitationClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the OrgInvitation entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (oi *OrgInvitation) Unwrap() *OrgInvitation { - _tx, ok := oi.config.driver.(*txDriver) +func (_m *OrgInvitation) Unwrap() *OrgInvitation { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: OrgInvitation is not a transactional entity") } - oi.config.driver = _tx.drv - return oi + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (oi *OrgInvitation) String() string { +func (_m *OrgInvitation) String() string { var builder strings.Builder builder.WriteString("OrgInvitation(") - builder.WriteString(fmt.Sprintf("id=%v, ", oi.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("receiver_email=") - builder.WriteString(oi.ReceiverEmail) + builder.WriteString(_m.ReceiverEmail) builder.WriteString(", ") builder.WriteString("status=") - builder.WriteString(fmt.Sprintf("%v", oi.Status)) + builder.WriteString(fmt.Sprintf("%v", _m.Status)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(oi.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(oi.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", oi.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteString(", ") builder.WriteString("sender_id=") - builder.WriteString(fmt.Sprintf("%v", oi.SenderID)) + builder.WriteString(fmt.Sprintf("%v", _m.SenderID)) builder.WriteString(", ") builder.WriteString("role=") - builder.WriteString(fmt.Sprintf("%v", oi.Role)) + builder.WriteString(fmt.Sprintf("%v", _m.Role)) builder.WriteString(", ") builder.WriteString("context=") - builder.WriteString(fmt.Sprintf("%v", oi.Context)) + builder.WriteString(fmt.Sprintf("%v", _m.Context)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/orginvitation_create.go b/app/controlplane/pkg/data/ent/orginvitation_create.go index aa4f323a9..b7fb77850 100644 --- a/app/controlplane/pkg/data/ent/orginvitation_create.go +++ b/app/controlplane/pkg/data/ent/orginvitation_create.go @@ -29,131 +29,131 @@ type OrgInvitationCreate struct { } // SetReceiverEmail sets the "receiver_email" field. -func (oic *OrgInvitationCreate) SetReceiverEmail(s string) *OrgInvitationCreate { - oic.mutation.SetReceiverEmail(s) - return oic +func (_c *OrgInvitationCreate) SetReceiverEmail(v string) *OrgInvitationCreate { + _c.mutation.SetReceiverEmail(v) + return _c } // SetStatus sets the "status" field. -func (oic *OrgInvitationCreate) SetStatus(bis biz.OrgInvitationStatus) *OrgInvitationCreate { - oic.mutation.SetStatus(bis) - return oic +func (_c *OrgInvitationCreate) SetStatus(v biz.OrgInvitationStatus) *OrgInvitationCreate { + _c.mutation.SetStatus(v) + return _c } // SetNillableStatus sets the "status" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableStatus(bis *biz.OrgInvitationStatus) *OrgInvitationCreate { - if bis != nil { - oic.SetStatus(*bis) +func (_c *OrgInvitationCreate) SetNillableStatus(v *biz.OrgInvitationStatus) *OrgInvitationCreate { + if v != nil { + _c.SetStatus(*v) } - return oic + return _c } // SetCreatedAt sets the "created_at" field. -func (oic *OrgInvitationCreate) SetCreatedAt(t time.Time) *OrgInvitationCreate { - oic.mutation.SetCreatedAt(t) - return oic +func (_c *OrgInvitationCreate) SetCreatedAt(v time.Time) *OrgInvitationCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableCreatedAt(t *time.Time) *OrgInvitationCreate { - if t != nil { - oic.SetCreatedAt(*t) +func (_c *OrgInvitationCreate) SetNillableCreatedAt(v *time.Time) *OrgInvitationCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return oic + return _c } // SetDeletedAt sets the "deleted_at" field. -func (oic *OrgInvitationCreate) SetDeletedAt(t time.Time) *OrgInvitationCreate { - oic.mutation.SetDeletedAt(t) - return oic +func (_c *OrgInvitationCreate) SetDeletedAt(v time.Time) *OrgInvitationCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableDeletedAt(t *time.Time) *OrgInvitationCreate { - if t != nil { - oic.SetDeletedAt(*t) +func (_c *OrgInvitationCreate) SetNillableDeletedAt(v *time.Time) *OrgInvitationCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return oic + return _c } // SetOrganizationID sets the "organization_id" field. -func (oic *OrgInvitationCreate) SetOrganizationID(u uuid.UUID) *OrgInvitationCreate { - oic.mutation.SetOrganizationID(u) - return oic +func (_c *OrgInvitationCreate) SetOrganizationID(v uuid.UUID) *OrgInvitationCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetSenderID sets the "sender_id" field. -func (oic *OrgInvitationCreate) SetSenderID(u uuid.UUID) *OrgInvitationCreate { - oic.mutation.SetSenderID(u) - return oic +func (_c *OrgInvitationCreate) SetSenderID(v uuid.UUID) *OrgInvitationCreate { + _c.mutation.SetSenderID(v) + return _c } // SetRole sets the "role" field. -func (oic *OrgInvitationCreate) SetRole(a authz.Role) *OrgInvitationCreate { - oic.mutation.SetRole(a) - return oic +func (_c *OrgInvitationCreate) SetRole(v authz.Role) *OrgInvitationCreate { + _c.mutation.SetRole(v) + return _c } // SetNillableRole sets the "role" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableRole(a *authz.Role) *OrgInvitationCreate { - if a != nil { - oic.SetRole(*a) +func (_c *OrgInvitationCreate) SetNillableRole(v *authz.Role) *OrgInvitationCreate { + if v != nil { + _c.SetRole(*v) } - return oic + return _c } // SetContext sets the "context" field. -func (oic *OrgInvitationCreate) SetContext(bic biz.OrgInvitationContext) *OrgInvitationCreate { - oic.mutation.SetContext(bic) - return oic +func (_c *OrgInvitationCreate) SetContext(v biz.OrgInvitationContext) *OrgInvitationCreate { + _c.mutation.SetContext(v) + return _c } // SetNillableContext sets the "context" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableContext(bic *biz.OrgInvitationContext) *OrgInvitationCreate { - if bic != nil { - oic.SetContext(*bic) +func (_c *OrgInvitationCreate) SetNillableContext(v *biz.OrgInvitationContext) *OrgInvitationCreate { + if v != nil { + _c.SetContext(*v) } - return oic + return _c } // SetID sets the "id" field. -func (oic *OrgInvitationCreate) SetID(u uuid.UUID) *OrgInvitationCreate { - oic.mutation.SetID(u) - return oic +func (_c *OrgInvitationCreate) SetID(v uuid.UUID) *OrgInvitationCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (oic *OrgInvitationCreate) SetNillableID(u *uuid.UUID) *OrgInvitationCreate { - if u != nil { - oic.SetID(*u) +func (_c *OrgInvitationCreate) SetNillableID(v *uuid.UUID) *OrgInvitationCreate { + if v != nil { + _c.SetID(*v) } - return oic + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (oic *OrgInvitationCreate) SetOrganization(o *Organization) *OrgInvitationCreate { - return oic.SetOrganizationID(o.ID) +func (_c *OrgInvitationCreate) SetOrganization(v *Organization) *OrgInvitationCreate { + return _c.SetOrganizationID(v.ID) } // SetSender sets the "sender" edge to the User entity. -func (oic *OrgInvitationCreate) SetSender(u *User) *OrgInvitationCreate { - return oic.SetSenderID(u.ID) +func (_c *OrgInvitationCreate) SetSender(v *User) *OrgInvitationCreate { + return _c.SetSenderID(v.ID) } // Mutation returns the OrgInvitationMutation object of the builder. -func (oic *OrgInvitationCreate) Mutation() *OrgInvitationMutation { - return oic.mutation +func (_c *OrgInvitationCreate) Mutation() *OrgInvitationMutation { + return _c.mutation } // Save creates the OrgInvitation in the database. -func (oic *OrgInvitationCreate) Save(ctx context.Context) (*OrgInvitation, error) { - oic.defaults() - return withHooks(ctx, oic.sqlSave, oic.mutation, oic.hooks) +func (_c *OrgInvitationCreate) Save(ctx context.Context) (*OrgInvitation, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (oic *OrgInvitationCreate) SaveX(ctx context.Context) *OrgInvitation { - v, err := oic.Save(ctx) +func (_c *OrgInvitationCreate) SaveX(ctx context.Context) *OrgInvitation { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -161,76 +161,76 @@ func (oic *OrgInvitationCreate) SaveX(ctx context.Context) *OrgInvitation { } // Exec executes the query. -func (oic *OrgInvitationCreate) Exec(ctx context.Context) error { - _, err := oic.Save(ctx) +func (_c *OrgInvitationCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (oic *OrgInvitationCreate) ExecX(ctx context.Context) { - if err := oic.Exec(ctx); err != nil { +func (_c *OrgInvitationCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (oic *OrgInvitationCreate) defaults() { - if _, ok := oic.mutation.Status(); !ok { +func (_c *OrgInvitationCreate) defaults() { + if _, ok := _c.mutation.Status(); !ok { v := orginvitation.DefaultStatus - oic.mutation.SetStatus(v) + _c.mutation.SetStatus(v) } - if _, ok := oic.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := orginvitation.DefaultCreatedAt() - oic.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := oic.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := orginvitation.DefaultID() - oic.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (oic *OrgInvitationCreate) check() error { - if _, ok := oic.mutation.ReceiverEmail(); !ok { +func (_c *OrgInvitationCreate) check() error { + if _, ok := _c.mutation.ReceiverEmail(); !ok { return &ValidationError{Name: "receiver_email", err: errors.New(`ent: missing required field "OrgInvitation.receiver_email"`)} } - if _, ok := oic.mutation.Status(); !ok { + if _, ok := _c.mutation.Status(); !ok { return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "OrgInvitation.status"`)} } - if v, ok := oic.mutation.Status(); ok { + if v, ok := _c.mutation.Status(); ok { if err := orginvitation.StatusValidator(v); err != nil { return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.status": %w`, err)} } } - if _, ok := oic.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "OrgInvitation.created_at"`)} } - if _, ok := oic.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "OrgInvitation.organization_id"`)} } - if _, ok := oic.mutation.SenderID(); !ok { + if _, ok := _c.mutation.SenderID(); !ok { return &ValidationError{Name: "sender_id", err: errors.New(`ent: missing required field "OrgInvitation.sender_id"`)} } - if v, ok := oic.mutation.Role(); ok { + if v, ok := _c.mutation.Role(); ok { if err := orginvitation.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.role": %w`, err)} } } - if len(oic.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "OrgInvitation.organization"`)} } - if len(oic.mutation.SenderIDs()) == 0 { + if len(_c.mutation.SenderIDs()) == 0 { return &ValidationError{Name: "sender", err: errors.New(`ent: missing required edge "OrgInvitation.sender"`)} } return nil } -func (oic *OrgInvitationCreate) sqlSave(ctx context.Context) (*OrgInvitation, error) { - if err := oic.check(); err != nil { +func (_c *OrgInvitationCreate) sqlSave(ctx context.Context) (*OrgInvitation, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := oic.createSpec() - if err := sqlgraph.CreateNode(ctx, oic.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -243,46 +243,46 @@ func (oic *OrgInvitationCreate) sqlSave(ctx context.Context) (*OrgInvitation, er return nil, err } } - oic.mutation.id = &_node.ID - oic.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (oic *OrgInvitationCreate) createSpec() (*OrgInvitation, *sqlgraph.CreateSpec) { +func (_c *OrgInvitationCreate) createSpec() (*OrgInvitation, *sqlgraph.CreateSpec) { var ( - _node = &OrgInvitation{config: oic.config} + _node = &OrgInvitation{config: _c.config} _spec = sqlgraph.NewCreateSpec(orginvitation.Table, sqlgraph.NewFieldSpec(orginvitation.FieldID, field.TypeUUID)) ) - _spec.OnConflict = oic.conflict - if id, ok := oic.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := oic.mutation.ReceiverEmail(); ok { + if value, ok := _c.mutation.ReceiverEmail(); ok { _spec.SetField(orginvitation.FieldReceiverEmail, field.TypeString, value) _node.ReceiverEmail = value } - if value, ok := oic.mutation.Status(); ok { + if value, ok := _c.mutation.Status(); ok { _spec.SetField(orginvitation.FieldStatus, field.TypeEnum, value) _node.Status = value } - if value, ok := oic.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(orginvitation.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := oic.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(orginvitation.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := oic.mutation.Role(); ok { + if value, ok := _c.mutation.Role(); ok { _spec.SetField(orginvitation.FieldRole, field.TypeEnum, value) _node.Role = value } - if value, ok := oic.mutation.Context(); ok { + if value, ok := _c.mutation.Context(); ok { _spec.SetField(orginvitation.FieldContext, field.TypeJSON, value) _node.Context = value } - if nodes := oic.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -299,7 +299,7 @@ func (oic *OrgInvitationCreate) createSpec() (*OrgInvitation, *sqlgraph.CreateSp _node.OrganizationID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := oic.mutation.SenderIDs(); len(nodes) > 0 { + if nodes := _c.mutation.SenderIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -335,10 +335,10 @@ func (oic *OrgInvitationCreate) createSpec() (*OrgInvitation, *sqlgraph.CreateSp // SetReceiverEmail(v+v). // }). // Exec(ctx) -func (oic *OrgInvitationCreate) OnConflict(opts ...sql.ConflictOption) *OrgInvitationUpsertOne { - oic.conflict = opts +func (_c *OrgInvitationCreate) OnConflict(opts ...sql.ConflictOption) *OrgInvitationUpsertOne { + _c.conflict = opts return &OrgInvitationUpsertOne{ - create: oic, + create: _c, } } @@ -348,10 +348,10 @@ func (oic *OrgInvitationCreate) OnConflict(opts ...sql.ConflictOption) *OrgInvit // client.OrgInvitation.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (oic *OrgInvitationCreate) OnConflictColumns(columns ...string) *OrgInvitationUpsertOne { - oic.conflict = append(oic.conflict, sql.ConflictColumns(columns...)) +func (_c *OrgInvitationCreate) OnConflictColumns(columns ...string) *OrgInvitationUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &OrgInvitationUpsertOne{ - create: oic, + create: _c, } } @@ -664,16 +664,16 @@ type OrgInvitationCreateBulk struct { } // Save creates the OrgInvitation entities in the database. -func (oicb *OrgInvitationCreateBulk) Save(ctx context.Context) ([]*OrgInvitation, error) { - if oicb.err != nil { - return nil, oicb.err - } - specs := make([]*sqlgraph.CreateSpec, len(oicb.builders)) - nodes := make([]*OrgInvitation, len(oicb.builders)) - mutators := make([]Mutator, len(oicb.builders)) - for i := range oicb.builders { +func (_c *OrgInvitationCreateBulk) Save(ctx context.Context) ([]*OrgInvitation, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*OrgInvitation, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := oicb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*OrgInvitationMutation) @@ -687,12 +687,12 @@ func (oicb *OrgInvitationCreateBulk) Save(ctx context.Context) ([]*OrgInvitation var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, oicb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = oicb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, oicb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -712,7 +712,7 @@ func (oicb *OrgInvitationCreateBulk) Save(ctx context.Context) ([]*OrgInvitation }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, oicb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -720,8 +720,8 @@ func (oicb *OrgInvitationCreateBulk) Save(ctx context.Context) ([]*OrgInvitation } // SaveX is like Save, but panics if an error occurs. -func (oicb *OrgInvitationCreateBulk) SaveX(ctx context.Context) []*OrgInvitation { - v, err := oicb.Save(ctx) +func (_c *OrgInvitationCreateBulk) SaveX(ctx context.Context) []*OrgInvitation { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -729,14 +729,14 @@ func (oicb *OrgInvitationCreateBulk) SaveX(ctx context.Context) []*OrgInvitation } // Exec executes the query. -func (oicb *OrgInvitationCreateBulk) Exec(ctx context.Context) error { - _, err := oicb.Save(ctx) +func (_c *OrgInvitationCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (oicb *OrgInvitationCreateBulk) ExecX(ctx context.Context) { - if err := oicb.Exec(ctx); err != nil { +func (_c *OrgInvitationCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -756,10 +756,10 @@ func (oicb *OrgInvitationCreateBulk) ExecX(ctx context.Context) { // SetReceiverEmail(v+v). // }). // Exec(ctx) -func (oicb *OrgInvitationCreateBulk) OnConflict(opts ...sql.ConflictOption) *OrgInvitationUpsertBulk { - oicb.conflict = opts +func (_c *OrgInvitationCreateBulk) OnConflict(opts ...sql.ConflictOption) *OrgInvitationUpsertBulk { + _c.conflict = opts return &OrgInvitationUpsertBulk{ - create: oicb, + create: _c, } } @@ -769,10 +769,10 @@ func (oicb *OrgInvitationCreateBulk) OnConflict(opts ...sql.ConflictOption) *Org // client.OrgInvitation.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (oicb *OrgInvitationCreateBulk) OnConflictColumns(columns ...string) *OrgInvitationUpsertBulk { - oicb.conflict = append(oicb.conflict, sql.ConflictColumns(columns...)) +func (_c *OrgInvitationCreateBulk) OnConflictColumns(columns ...string) *OrgInvitationUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &OrgInvitationUpsertBulk{ - create: oicb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/orginvitation_delete.go b/app/controlplane/pkg/data/ent/orginvitation_delete.go index bca1bfbeb..5d84d7c08 100644 --- a/app/controlplane/pkg/data/ent/orginvitation_delete.go +++ b/app/controlplane/pkg/data/ent/orginvitation_delete.go @@ -20,56 +20,56 @@ type OrgInvitationDelete struct { } // Where appends a list predicates to the OrgInvitationDelete builder. -func (oid *OrgInvitationDelete) Where(ps ...predicate.OrgInvitation) *OrgInvitationDelete { - oid.mutation.Where(ps...) - return oid +func (_d *OrgInvitationDelete) Where(ps ...predicate.OrgInvitation) *OrgInvitationDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (oid *OrgInvitationDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, oid.sqlExec, oid.mutation, oid.hooks) +func (_d *OrgInvitationDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (oid *OrgInvitationDelete) ExecX(ctx context.Context) int { - n, err := oid.Exec(ctx) +func (_d *OrgInvitationDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (oid *OrgInvitationDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *OrgInvitationDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(orginvitation.Table, sqlgraph.NewFieldSpec(orginvitation.FieldID, field.TypeUUID)) - if ps := oid.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, oid.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - oid.mutation.done = true + _d.mutation.done = true return affected, err } // OrgInvitationDeleteOne is the builder for deleting a single OrgInvitation entity. type OrgInvitationDeleteOne struct { - oid *OrgInvitationDelete + _d *OrgInvitationDelete } // Where appends a list predicates to the OrgInvitationDelete builder. -func (oido *OrgInvitationDeleteOne) Where(ps ...predicate.OrgInvitation) *OrgInvitationDeleteOne { - oido.oid.mutation.Where(ps...) - return oido +func (_d *OrgInvitationDeleteOne) Where(ps ...predicate.OrgInvitation) *OrgInvitationDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (oido *OrgInvitationDeleteOne) Exec(ctx context.Context) error { - n, err := oido.oid.Exec(ctx) +func (_d *OrgInvitationDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (oido *OrgInvitationDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (oido *OrgInvitationDeleteOne) ExecX(ctx context.Context) { - if err := oido.Exec(ctx); err != nil { +func (_d *OrgInvitationDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/orginvitation_query.go b/app/controlplane/pkg/data/ent/orginvitation_query.go index 1a351b610..0763cb2e8 100644 --- a/app/controlplane/pkg/data/ent/orginvitation_query.go +++ b/app/controlplane/pkg/data/ent/orginvitation_query.go @@ -35,44 +35,44 @@ type OrgInvitationQuery struct { } // Where adds a new predicate for the OrgInvitationQuery builder. -func (oiq *OrgInvitationQuery) Where(ps ...predicate.OrgInvitation) *OrgInvitationQuery { - oiq.predicates = append(oiq.predicates, ps...) - return oiq +func (_q *OrgInvitationQuery) Where(ps ...predicate.OrgInvitation) *OrgInvitationQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (oiq *OrgInvitationQuery) Limit(limit int) *OrgInvitationQuery { - oiq.ctx.Limit = &limit - return oiq +func (_q *OrgInvitationQuery) Limit(limit int) *OrgInvitationQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (oiq *OrgInvitationQuery) Offset(offset int) *OrgInvitationQuery { - oiq.ctx.Offset = &offset - return oiq +func (_q *OrgInvitationQuery) Offset(offset int) *OrgInvitationQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (oiq *OrgInvitationQuery) Unique(unique bool) *OrgInvitationQuery { - oiq.ctx.Unique = &unique - return oiq +func (_q *OrgInvitationQuery) Unique(unique bool) *OrgInvitationQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (oiq *OrgInvitationQuery) Order(o ...orginvitation.OrderOption) *OrgInvitationQuery { - oiq.order = append(oiq.order, o...) - return oiq +func (_q *OrgInvitationQuery) Order(o ...orginvitation.OrderOption) *OrgInvitationQuery { + _q.order = append(_q.order, o...) + return _q } // QueryOrganization chains the current query on the "organization" edge. -func (oiq *OrgInvitationQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: oiq.config}).Query() +func (_q *OrgInvitationQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oiq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oiq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -81,20 +81,20 @@ func (oiq *OrgInvitationQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, orginvitation.OrganizationTable, orginvitation.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(oiq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QuerySender chains the current query on the "sender" edge. -func (oiq *OrgInvitationQuery) QuerySender() *UserQuery { - query := (&UserClient{config: oiq.config}).Query() +func (_q *OrgInvitationQuery) QuerySender() *UserQuery { + query := (&UserClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := oiq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := oiq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -103,7 +103,7 @@ func (oiq *OrgInvitationQuery) QuerySender() *UserQuery { sqlgraph.To(user.Table, user.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, orginvitation.SenderTable, orginvitation.SenderColumn), ) - fromU = sqlgraph.SetNeighbors(oiq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -111,8 +111,8 @@ func (oiq *OrgInvitationQuery) QuerySender() *UserQuery { // First returns the first OrgInvitation entity from the query. // Returns a *NotFoundError when no OrgInvitation was found. -func (oiq *OrgInvitationQuery) First(ctx context.Context) (*OrgInvitation, error) { - nodes, err := oiq.Limit(1).All(setContextOp(ctx, oiq.ctx, ent.OpQueryFirst)) +func (_q *OrgInvitationQuery) First(ctx context.Context) (*OrgInvitation, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -123,8 +123,8 @@ func (oiq *OrgInvitationQuery) First(ctx context.Context) (*OrgInvitation, error } // FirstX is like First, but panics if an error occurs. -func (oiq *OrgInvitationQuery) FirstX(ctx context.Context) *OrgInvitation { - node, err := oiq.First(ctx) +func (_q *OrgInvitationQuery) FirstX(ctx context.Context) *OrgInvitation { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -133,9 +133,9 @@ func (oiq *OrgInvitationQuery) FirstX(ctx context.Context) *OrgInvitation { // FirstID returns the first OrgInvitation ID from the query. // Returns a *NotFoundError when no OrgInvitation ID was found. -func (oiq *OrgInvitationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *OrgInvitationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = oiq.Limit(1).IDs(setContextOp(ctx, oiq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -146,8 +146,8 @@ func (oiq *OrgInvitationQuery) FirstID(ctx context.Context) (id uuid.UUID, err e } // FirstIDX is like FirstID, but panics if an error occurs. -func (oiq *OrgInvitationQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := oiq.FirstID(ctx) +func (_q *OrgInvitationQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -157,8 +157,8 @@ func (oiq *OrgInvitationQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single OrgInvitation entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one OrgInvitation entity is found. // Returns a *NotFoundError when no OrgInvitation entities are found. -func (oiq *OrgInvitationQuery) Only(ctx context.Context) (*OrgInvitation, error) { - nodes, err := oiq.Limit(2).All(setContextOp(ctx, oiq.ctx, ent.OpQueryOnly)) +func (_q *OrgInvitationQuery) Only(ctx context.Context) (*OrgInvitation, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -173,8 +173,8 @@ func (oiq *OrgInvitationQuery) Only(ctx context.Context) (*OrgInvitation, error) } // OnlyX is like Only, but panics if an error occurs. -func (oiq *OrgInvitationQuery) OnlyX(ctx context.Context) *OrgInvitation { - node, err := oiq.Only(ctx) +func (_q *OrgInvitationQuery) OnlyX(ctx context.Context) *OrgInvitation { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -184,9 +184,9 @@ func (oiq *OrgInvitationQuery) OnlyX(ctx context.Context) *OrgInvitation { // OnlyID is like Only, but returns the only OrgInvitation ID in the query. // Returns a *NotSingularError when more than one OrgInvitation ID is found. // Returns a *NotFoundError when no entities are found. -func (oiq *OrgInvitationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *OrgInvitationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = oiq.Limit(2).IDs(setContextOp(ctx, oiq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -201,8 +201,8 @@ func (oiq *OrgInvitationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err er } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (oiq *OrgInvitationQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := oiq.OnlyID(ctx) +func (_q *OrgInvitationQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -210,18 +210,18 @@ func (oiq *OrgInvitationQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of OrgInvitations. -func (oiq *OrgInvitationQuery) All(ctx context.Context) ([]*OrgInvitation, error) { - ctx = setContextOp(ctx, oiq.ctx, ent.OpQueryAll) - if err := oiq.prepareQuery(ctx); err != nil { +func (_q *OrgInvitationQuery) All(ctx context.Context) ([]*OrgInvitation, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*OrgInvitation, *OrgInvitationQuery]() - return withInterceptors[[]*OrgInvitation](ctx, oiq, qr, oiq.inters) + return withInterceptors[[]*OrgInvitation](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (oiq *OrgInvitationQuery) AllX(ctx context.Context) []*OrgInvitation { - nodes, err := oiq.All(ctx) +func (_q *OrgInvitationQuery) AllX(ctx context.Context) []*OrgInvitation { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -229,20 +229,20 @@ func (oiq *OrgInvitationQuery) AllX(ctx context.Context) []*OrgInvitation { } // IDs executes the query and returns a list of OrgInvitation IDs. -func (oiq *OrgInvitationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if oiq.ctx.Unique == nil && oiq.path != nil { - oiq.Unique(true) +func (_q *OrgInvitationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, oiq.ctx, ent.OpQueryIDs) - if err = oiq.Select(orginvitation.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(orginvitation.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (oiq *OrgInvitationQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := oiq.IDs(ctx) +func (_q *OrgInvitationQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -250,17 +250,17 @@ func (oiq *OrgInvitationQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (oiq *OrgInvitationQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, oiq.ctx, ent.OpQueryCount) - if err := oiq.prepareQuery(ctx); err != nil { +func (_q *OrgInvitationQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, oiq, querierCount[*OrgInvitationQuery](), oiq.inters) + return withInterceptors[int](ctx, _q, querierCount[*OrgInvitationQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (oiq *OrgInvitationQuery) CountX(ctx context.Context) int { - count, err := oiq.Count(ctx) +func (_q *OrgInvitationQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -268,9 +268,9 @@ func (oiq *OrgInvitationQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (oiq *OrgInvitationQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, oiq.ctx, ent.OpQueryExist) - switch _, err := oiq.FirstID(ctx); { +func (_q *OrgInvitationQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -281,8 +281,8 @@ func (oiq *OrgInvitationQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (oiq *OrgInvitationQuery) ExistX(ctx context.Context) bool { - exist, err := oiq.Exist(ctx) +func (_q *OrgInvitationQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -291,45 +291,45 @@ func (oiq *OrgInvitationQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the OrgInvitationQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (oiq *OrgInvitationQuery) Clone() *OrgInvitationQuery { - if oiq == nil { +func (_q *OrgInvitationQuery) Clone() *OrgInvitationQuery { + if _q == nil { return nil } return &OrgInvitationQuery{ - config: oiq.config, - ctx: oiq.ctx.Clone(), - order: append([]orginvitation.OrderOption{}, oiq.order...), - inters: append([]Interceptor{}, oiq.inters...), - predicates: append([]predicate.OrgInvitation{}, oiq.predicates...), - withOrganization: oiq.withOrganization.Clone(), - withSender: oiq.withSender.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]orginvitation.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.OrgInvitation{}, _q.predicates...), + withOrganization: _q.withOrganization.Clone(), + withSender: _q.withSender.Clone(), // clone intermediate query. - sql: oiq.sql.Clone(), - path: oiq.path, - modifiers: append([]func(*sql.Selector){}, oiq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (oiq *OrgInvitationQuery) WithOrganization(opts ...func(*OrganizationQuery)) *OrgInvitationQuery { - query := (&OrganizationClient{config: oiq.config}).Query() +func (_q *OrgInvitationQuery) WithOrganization(opts ...func(*OrganizationQuery)) *OrgInvitationQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oiq.withOrganization = query - return oiq + _q.withOrganization = query + return _q } // WithSender tells the query-builder to eager-load the nodes that are connected to // the "sender" edge. The optional arguments are used to configure the query builder of the edge. -func (oiq *OrgInvitationQuery) WithSender(opts ...func(*UserQuery)) *OrgInvitationQuery { - query := (&UserClient{config: oiq.config}).Query() +func (_q *OrgInvitationQuery) WithSender(opts ...func(*UserQuery)) *OrgInvitationQuery { + query := (&UserClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - oiq.withSender = query - return oiq + _q.withSender = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -346,10 +346,10 @@ func (oiq *OrgInvitationQuery) WithSender(opts ...func(*UserQuery)) *OrgInvitati // GroupBy(orginvitation.FieldReceiverEmail). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (oiq *OrgInvitationQuery) GroupBy(field string, fields ...string) *OrgInvitationGroupBy { - oiq.ctx.Fields = append([]string{field}, fields...) - grbuild := &OrgInvitationGroupBy{build: oiq} - grbuild.flds = &oiq.ctx.Fields +func (_q *OrgInvitationQuery) GroupBy(field string, fields ...string) *OrgInvitationGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &OrgInvitationGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = orginvitation.Label grbuild.scan = grbuild.Scan return grbuild @@ -367,83 +367,83 @@ func (oiq *OrgInvitationQuery) GroupBy(field string, fields ...string) *OrgInvit // client.OrgInvitation.Query(). // Select(orginvitation.FieldReceiverEmail). // Scan(ctx, &v) -func (oiq *OrgInvitationQuery) Select(fields ...string) *OrgInvitationSelect { - oiq.ctx.Fields = append(oiq.ctx.Fields, fields...) - sbuild := &OrgInvitationSelect{OrgInvitationQuery: oiq} +func (_q *OrgInvitationQuery) Select(fields ...string) *OrgInvitationSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &OrgInvitationSelect{OrgInvitationQuery: _q} sbuild.label = orginvitation.Label - sbuild.flds, sbuild.scan = &oiq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a OrgInvitationSelect configured with the given aggregations. -func (oiq *OrgInvitationQuery) Aggregate(fns ...AggregateFunc) *OrgInvitationSelect { - return oiq.Select().Aggregate(fns...) +func (_q *OrgInvitationQuery) Aggregate(fns ...AggregateFunc) *OrgInvitationSelect { + return _q.Select().Aggregate(fns...) } -func (oiq *OrgInvitationQuery) prepareQuery(ctx context.Context) error { - for _, inter := range oiq.inters { +func (_q *OrgInvitationQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, oiq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range oiq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !orginvitation.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if oiq.path != nil { - prev, err := oiq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - oiq.sql = prev + _q.sql = prev } return nil } -func (oiq *OrgInvitationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*OrgInvitation, error) { +func (_q *OrgInvitationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*OrgInvitation, error) { var ( nodes = []*OrgInvitation{} - _spec = oiq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - oiq.withOrganization != nil, - oiq.withSender != nil, + _q.withOrganization != nil, + _q.withSender != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*OrgInvitation).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &OrgInvitation{config: oiq.config} + node := &OrgInvitation{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(oiq.modifiers) > 0 { - _spec.Modifiers = oiq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, oiq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := oiq.withOrganization; query != nil { - if err := oiq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *OrgInvitation, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := oiq.withSender; query != nil { - if err := oiq.loadSender(ctx, query, nodes, nil, + if query := _q.withSender; query != nil { + if err := _q.loadSender(ctx, query, nodes, nil, func(n *OrgInvitation, e *User) { n.Edges.Sender = e }); err != nil { return nil, err } @@ -451,7 +451,7 @@ func (oiq *OrgInvitationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ( return nodes, nil } -func (oiq *OrgInvitationQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*OrgInvitation, init func(*OrgInvitation), assign func(*OrgInvitation, *Organization)) error { +func (_q *OrgInvitationQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*OrgInvitation, init func(*OrgInvitation), assign func(*OrgInvitation, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*OrgInvitation) for i := range nodes { @@ -480,7 +480,7 @@ func (oiq *OrgInvitationQuery) loadOrganization(ctx context.Context, query *Orga } return nil } -func (oiq *OrgInvitationQuery) loadSender(ctx context.Context, query *UserQuery, nodes []*OrgInvitation, init func(*OrgInvitation), assign func(*OrgInvitation, *User)) error { +func (_q *OrgInvitationQuery) loadSender(ctx context.Context, query *UserQuery, nodes []*OrgInvitation, init func(*OrgInvitation), assign func(*OrgInvitation, *User)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*OrgInvitation) for i := range nodes { @@ -510,27 +510,27 @@ func (oiq *OrgInvitationQuery) loadSender(ctx context.Context, query *UserQuery, return nil } -func (oiq *OrgInvitationQuery) sqlCount(ctx context.Context) (int, error) { - _spec := oiq.querySpec() - if len(oiq.modifiers) > 0 { - _spec.Modifiers = oiq.modifiers +func (_q *OrgInvitationQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = oiq.ctx.Fields - if len(oiq.ctx.Fields) > 0 { - _spec.Unique = oiq.ctx.Unique != nil && *oiq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, oiq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (oiq *OrgInvitationQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *OrgInvitationQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(orginvitation.Table, orginvitation.Columns, sqlgraph.NewFieldSpec(orginvitation.FieldID, field.TypeUUID)) - _spec.From = oiq.sql - if unique := oiq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if oiq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := oiq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, orginvitation.FieldID) for i := range fields { @@ -538,27 +538,27 @@ func (oiq *OrgInvitationQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if oiq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(orginvitation.FieldOrganizationID) } - if oiq.withSender != nil { + if _q.withSender != nil { _spec.Node.AddColumnOnce(orginvitation.FieldSenderID) } } - if ps := oiq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := oiq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := oiq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := oiq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -568,36 +568,36 @@ func (oiq *OrgInvitationQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (oiq *OrgInvitationQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(oiq.driver.Dialect()) +func (_q *OrgInvitationQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(orginvitation.Table) - columns := oiq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = orginvitation.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if oiq.sql != nil { - selector = oiq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if oiq.ctx.Unique != nil && *oiq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range oiq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range oiq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range oiq.order { + for _, p := range _q.order { p(selector) } - if offset := oiq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := oiq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -606,33 +606,33 @@ func (oiq *OrgInvitationQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (oiq *OrgInvitationQuery) ForUpdate(opts ...sql.LockOption) *OrgInvitationQuery { - if oiq.driver.Dialect() == dialect.Postgres { - oiq.Unique(false) +func (_q *OrgInvitationQuery) ForUpdate(opts ...sql.LockOption) *OrgInvitationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - oiq.modifiers = append(oiq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return oiq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (oiq *OrgInvitationQuery) ForShare(opts ...sql.LockOption) *OrgInvitationQuery { - if oiq.driver.Dialect() == dialect.Postgres { - oiq.Unique(false) +func (_q *OrgInvitationQuery) ForShare(opts ...sql.LockOption) *OrgInvitationQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - oiq.modifiers = append(oiq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return oiq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (oiq *OrgInvitationQuery) Modify(modifiers ...func(s *sql.Selector)) *OrgInvitationSelect { - oiq.modifiers = append(oiq.modifiers, modifiers...) - return oiq.Select() +func (_q *OrgInvitationQuery) Modify(modifiers ...func(s *sql.Selector)) *OrgInvitationSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // OrgInvitationGroupBy is the group-by builder for OrgInvitation entities. @@ -642,41 +642,41 @@ type OrgInvitationGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (oigb *OrgInvitationGroupBy) Aggregate(fns ...AggregateFunc) *OrgInvitationGroupBy { - oigb.fns = append(oigb.fns, fns...) - return oigb +func (_g *OrgInvitationGroupBy) Aggregate(fns ...AggregateFunc) *OrgInvitationGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (oigb *OrgInvitationGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, oigb.build.ctx, ent.OpQueryGroupBy) - if err := oigb.build.prepareQuery(ctx); err != nil { +func (_g *OrgInvitationGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*OrgInvitationQuery, *OrgInvitationGroupBy](ctx, oigb.build, oigb, oigb.build.inters, v) + return scanWithInterceptors[*OrgInvitationQuery, *OrgInvitationGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (oigb *OrgInvitationGroupBy) sqlScan(ctx context.Context, root *OrgInvitationQuery, v any) error { +func (_g *OrgInvitationGroupBy) sqlScan(ctx context.Context, root *OrgInvitationQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(oigb.fns)) - for _, fn := range oigb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*oigb.flds)+len(oigb.fns)) - for _, f := range *oigb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*oigb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := oigb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -690,27 +690,27 @@ type OrgInvitationSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ois *OrgInvitationSelect) Aggregate(fns ...AggregateFunc) *OrgInvitationSelect { - ois.fns = append(ois.fns, fns...) - return ois +func (_s *OrgInvitationSelect) Aggregate(fns ...AggregateFunc) *OrgInvitationSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ois *OrgInvitationSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ois.ctx, ent.OpQuerySelect) - if err := ois.prepareQuery(ctx); err != nil { +func (_s *OrgInvitationSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*OrgInvitationQuery, *OrgInvitationSelect](ctx, ois.OrgInvitationQuery, ois, ois.inters, v) + return scanWithInterceptors[*OrgInvitationQuery, *OrgInvitationSelect](ctx, _s.OrgInvitationQuery, _s, _s.inters, v) } -func (ois *OrgInvitationSelect) sqlScan(ctx context.Context, root *OrgInvitationQuery, v any) error { +func (_s *OrgInvitationSelect) sqlScan(ctx context.Context, root *OrgInvitationQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ois.fns)) - for _, fn := range ois.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ois.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -718,7 +718,7 @@ func (ois *OrgInvitationSelect) sqlScan(ctx context.Context, root *OrgInvitation } rows := &sql.Rows{} query, args := selector.Query() - if err := ois.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -726,7 +726,7 @@ func (ois *OrgInvitationSelect) sqlScan(ctx context.Context, root *OrgInvitation } // Modify adds a query modifier for attaching custom logic to queries. -func (ois *OrgInvitationSelect) Modify(modifiers ...func(s *sql.Selector)) *OrgInvitationSelect { - ois.modifiers = append(ois.modifiers, modifiers...) - return ois +func (_s *OrgInvitationSelect) Modify(modifiers ...func(s *sql.Selector)) *OrgInvitationSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/orginvitation_update.go b/app/controlplane/pkg/data/ent/orginvitation_update.go index 6d8c3dd7d..5bee3b482 100644 --- a/app/controlplane/pkg/data/ent/orginvitation_update.go +++ b/app/controlplane/pkg/data/ent/orginvitation_update.go @@ -29,148 +29,148 @@ type OrgInvitationUpdate struct { } // Where appends a list predicates to the OrgInvitationUpdate builder. -func (oiu *OrgInvitationUpdate) Where(ps ...predicate.OrgInvitation) *OrgInvitationUpdate { - oiu.mutation.Where(ps...) - return oiu +func (_u *OrgInvitationUpdate) Where(ps ...predicate.OrgInvitation) *OrgInvitationUpdate { + _u.mutation.Where(ps...) + return _u } // SetStatus sets the "status" field. -func (oiu *OrgInvitationUpdate) SetStatus(bis biz.OrgInvitationStatus) *OrgInvitationUpdate { - oiu.mutation.SetStatus(bis) - return oiu +func (_u *OrgInvitationUpdate) SetStatus(v biz.OrgInvitationStatus) *OrgInvitationUpdate { + _u.mutation.SetStatus(v) + return _u } // SetNillableStatus sets the "status" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableStatus(bis *biz.OrgInvitationStatus) *OrgInvitationUpdate { - if bis != nil { - oiu.SetStatus(*bis) +func (_u *OrgInvitationUpdate) SetNillableStatus(v *biz.OrgInvitationStatus) *OrgInvitationUpdate { + if v != nil { + _u.SetStatus(*v) } - return oiu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (oiu *OrgInvitationUpdate) SetDeletedAt(t time.Time) *OrgInvitationUpdate { - oiu.mutation.SetDeletedAt(t) - return oiu +func (_u *OrgInvitationUpdate) SetDeletedAt(v time.Time) *OrgInvitationUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableDeletedAt(t *time.Time) *OrgInvitationUpdate { - if t != nil { - oiu.SetDeletedAt(*t) +func (_u *OrgInvitationUpdate) SetNillableDeletedAt(v *time.Time) *OrgInvitationUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return oiu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (oiu *OrgInvitationUpdate) ClearDeletedAt() *OrgInvitationUpdate { - oiu.mutation.ClearDeletedAt() - return oiu +func (_u *OrgInvitationUpdate) ClearDeletedAt() *OrgInvitationUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetOrganizationID sets the "organization_id" field. -func (oiu *OrgInvitationUpdate) SetOrganizationID(u uuid.UUID) *OrgInvitationUpdate { - oiu.mutation.SetOrganizationID(u) - return oiu +func (_u *OrgInvitationUpdate) SetOrganizationID(v uuid.UUID) *OrgInvitationUpdate { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableOrganizationID(u *uuid.UUID) *OrgInvitationUpdate { - if u != nil { - oiu.SetOrganizationID(*u) +func (_u *OrgInvitationUpdate) SetNillableOrganizationID(v *uuid.UUID) *OrgInvitationUpdate { + if v != nil { + _u.SetOrganizationID(*v) } - return oiu + return _u } // SetSenderID sets the "sender_id" field. -func (oiu *OrgInvitationUpdate) SetSenderID(u uuid.UUID) *OrgInvitationUpdate { - oiu.mutation.SetSenderID(u) - return oiu +func (_u *OrgInvitationUpdate) SetSenderID(v uuid.UUID) *OrgInvitationUpdate { + _u.mutation.SetSenderID(v) + return _u } // SetNillableSenderID sets the "sender_id" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableSenderID(u *uuid.UUID) *OrgInvitationUpdate { - if u != nil { - oiu.SetSenderID(*u) +func (_u *OrgInvitationUpdate) SetNillableSenderID(v *uuid.UUID) *OrgInvitationUpdate { + if v != nil { + _u.SetSenderID(*v) } - return oiu + return _u } // SetRole sets the "role" field. -func (oiu *OrgInvitationUpdate) SetRole(a authz.Role) *OrgInvitationUpdate { - oiu.mutation.SetRole(a) - return oiu +func (_u *OrgInvitationUpdate) SetRole(v authz.Role) *OrgInvitationUpdate { + _u.mutation.SetRole(v) + return _u } // SetNillableRole sets the "role" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableRole(a *authz.Role) *OrgInvitationUpdate { - if a != nil { - oiu.SetRole(*a) +func (_u *OrgInvitationUpdate) SetNillableRole(v *authz.Role) *OrgInvitationUpdate { + if v != nil { + _u.SetRole(*v) } - return oiu + return _u } // ClearRole clears the value of the "role" field. -func (oiu *OrgInvitationUpdate) ClearRole() *OrgInvitationUpdate { - oiu.mutation.ClearRole() - return oiu +func (_u *OrgInvitationUpdate) ClearRole() *OrgInvitationUpdate { + _u.mutation.ClearRole() + return _u } // SetContext sets the "context" field. -func (oiu *OrgInvitationUpdate) SetContext(bic biz.OrgInvitationContext) *OrgInvitationUpdate { - oiu.mutation.SetContext(bic) - return oiu +func (_u *OrgInvitationUpdate) SetContext(v biz.OrgInvitationContext) *OrgInvitationUpdate { + _u.mutation.SetContext(v) + return _u } // SetNillableContext sets the "context" field if the given value is not nil. -func (oiu *OrgInvitationUpdate) SetNillableContext(bic *biz.OrgInvitationContext) *OrgInvitationUpdate { - if bic != nil { - oiu.SetContext(*bic) +func (_u *OrgInvitationUpdate) SetNillableContext(v *biz.OrgInvitationContext) *OrgInvitationUpdate { + if v != nil { + _u.SetContext(*v) } - return oiu + return _u } // ClearContext clears the value of the "context" field. -func (oiu *OrgInvitationUpdate) ClearContext() *OrgInvitationUpdate { - oiu.mutation.ClearContext() - return oiu +func (_u *OrgInvitationUpdate) ClearContext() *OrgInvitationUpdate { + _u.mutation.ClearContext() + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (oiu *OrgInvitationUpdate) SetOrganization(o *Organization) *OrgInvitationUpdate { - return oiu.SetOrganizationID(o.ID) +func (_u *OrgInvitationUpdate) SetOrganization(v *Organization) *OrgInvitationUpdate { + return _u.SetOrganizationID(v.ID) } // SetSender sets the "sender" edge to the User entity. -func (oiu *OrgInvitationUpdate) SetSender(u *User) *OrgInvitationUpdate { - return oiu.SetSenderID(u.ID) +func (_u *OrgInvitationUpdate) SetSender(v *User) *OrgInvitationUpdate { + return _u.SetSenderID(v.ID) } // Mutation returns the OrgInvitationMutation object of the builder. -func (oiu *OrgInvitationUpdate) Mutation() *OrgInvitationMutation { - return oiu.mutation +func (_u *OrgInvitationUpdate) Mutation() *OrgInvitationMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (oiu *OrgInvitationUpdate) ClearOrganization() *OrgInvitationUpdate { - oiu.mutation.ClearOrganization() - return oiu +func (_u *OrgInvitationUpdate) ClearOrganization() *OrgInvitationUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearSender clears the "sender" edge to the User entity. -func (oiu *OrgInvitationUpdate) ClearSender() *OrgInvitationUpdate { - oiu.mutation.ClearSender() - return oiu +func (_u *OrgInvitationUpdate) ClearSender() *OrgInvitationUpdate { + _u.mutation.ClearSender() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (oiu *OrgInvitationUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, oiu.sqlSave, oiu.mutation, oiu.hooks) +func (_u *OrgInvitationUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (oiu *OrgInvitationUpdate) SaveX(ctx context.Context) int { - affected, err := oiu.Save(ctx) +func (_u *OrgInvitationUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -178,79 +178,79 @@ func (oiu *OrgInvitationUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (oiu *OrgInvitationUpdate) Exec(ctx context.Context) error { - _, err := oiu.Save(ctx) +func (_u *OrgInvitationUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (oiu *OrgInvitationUpdate) ExecX(ctx context.Context) { - if err := oiu.Exec(ctx); err != nil { +func (_u *OrgInvitationUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (oiu *OrgInvitationUpdate) check() error { - if v, ok := oiu.mutation.Status(); ok { +func (_u *OrgInvitationUpdate) check() error { + if v, ok := _u.mutation.Status(); ok { if err := orginvitation.StatusValidator(v); err != nil { return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.status": %w`, err)} } } - if v, ok := oiu.mutation.Role(); ok { + if v, ok := _u.mutation.Role(); ok { if err := orginvitation.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.role": %w`, err)} } } - if oiu.mutation.OrganizationCleared() && len(oiu.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "OrgInvitation.organization"`) } - if oiu.mutation.SenderCleared() && len(oiu.mutation.SenderIDs()) > 0 { + if _u.mutation.SenderCleared() && len(_u.mutation.SenderIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "OrgInvitation.sender"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (oiu *OrgInvitationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrgInvitationUpdate { - oiu.modifiers = append(oiu.modifiers, modifiers...) - return oiu +func (_u *OrgInvitationUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrgInvitationUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := oiu.check(); err != nil { - return n, err +func (_u *OrgInvitationUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(orginvitation.Table, orginvitation.Columns, sqlgraph.NewFieldSpec(orginvitation.FieldID, field.TypeUUID)) - if ps := oiu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := oiu.mutation.Status(); ok { + if value, ok := _u.mutation.Status(); ok { _spec.SetField(orginvitation.FieldStatus, field.TypeEnum, value) } - if value, ok := oiu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(orginvitation.FieldDeletedAt, field.TypeTime, value) } - if oiu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(orginvitation.FieldDeletedAt, field.TypeTime) } - if value, ok := oiu.mutation.Role(); ok { + if value, ok := _u.mutation.Role(); ok { _spec.SetField(orginvitation.FieldRole, field.TypeEnum, value) } - if oiu.mutation.RoleCleared() { + if _u.mutation.RoleCleared() { _spec.ClearField(orginvitation.FieldRole, field.TypeEnum) } - if value, ok := oiu.mutation.Context(); ok { + if value, ok := _u.mutation.Context(); ok { _spec.SetField(orginvitation.FieldContext, field.TypeJSON, value) } - if oiu.mutation.ContextCleared() { + if _u.mutation.ContextCleared() { _spec.ClearField(orginvitation.FieldContext, field.TypeJSON) } - if oiu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -263,7 +263,7 @@ func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := oiu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -279,7 +279,7 @@ func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if oiu.mutation.SenderCleared() { + if _u.mutation.SenderCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -292,7 +292,7 @@ func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := oiu.mutation.SenderIDs(); len(nodes) > 0 { + if nodes := _u.mutation.SenderIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -308,8 +308,8 @@ func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(oiu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, oiu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{orginvitation.Label} } else if sqlgraph.IsConstraintError(err) { @@ -317,8 +317,8 @@ func (oiu *OrgInvitationUpdate) sqlSave(ctx context.Context) (n int, err error) } return 0, err } - oiu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // OrgInvitationUpdateOne is the builder for updating a single OrgInvitation entity. @@ -331,155 +331,155 @@ type OrgInvitationUpdateOne struct { } // SetStatus sets the "status" field. -func (oiuo *OrgInvitationUpdateOne) SetStatus(bis biz.OrgInvitationStatus) *OrgInvitationUpdateOne { - oiuo.mutation.SetStatus(bis) - return oiuo +func (_u *OrgInvitationUpdateOne) SetStatus(v biz.OrgInvitationStatus) *OrgInvitationUpdateOne { + _u.mutation.SetStatus(v) + return _u } // SetNillableStatus sets the "status" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableStatus(bis *biz.OrgInvitationStatus) *OrgInvitationUpdateOne { - if bis != nil { - oiuo.SetStatus(*bis) +func (_u *OrgInvitationUpdateOne) SetNillableStatus(v *biz.OrgInvitationStatus) *OrgInvitationUpdateOne { + if v != nil { + _u.SetStatus(*v) } - return oiuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (oiuo *OrgInvitationUpdateOne) SetDeletedAt(t time.Time) *OrgInvitationUpdateOne { - oiuo.mutation.SetDeletedAt(t) - return oiuo +func (_u *OrgInvitationUpdateOne) SetDeletedAt(v time.Time) *OrgInvitationUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableDeletedAt(t *time.Time) *OrgInvitationUpdateOne { - if t != nil { - oiuo.SetDeletedAt(*t) +func (_u *OrgInvitationUpdateOne) SetNillableDeletedAt(v *time.Time) *OrgInvitationUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return oiuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (oiuo *OrgInvitationUpdateOne) ClearDeletedAt() *OrgInvitationUpdateOne { - oiuo.mutation.ClearDeletedAt() - return oiuo +func (_u *OrgInvitationUpdateOne) ClearDeletedAt() *OrgInvitationUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetOrganizationID sets the "organization_id" field. -func (oiuo *OrgInvitationUpdateOne) SetOrganizationID(u uuid.UUID) *OrgInvitationUpdateOne { - oiuo.mutation.SetOrganizationID(u) - return oiuo +func (_u *OrgInvitationUpdateOne) SetOrganizationID(v uuid.UUID) *OrgInvitationUpdateOne { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableOrganizationID(u *uuid.UUID) *OrgInvitationUpdateOne { - if u != nil { - oiuo.SetOrganizationID(*u) +func (_u *OrgInvitationUpdateOne) SetNillableOrganizationID(v *uuid.UUID) *OrgInvitationUpdateOne { + if v != nil { + _u.SetOrganizationID(*v) } - return oiuo + return _u } // SetSenderID sets the "sender_id" field. -func (oiuo *OrgInvitationUpdateOne) SetSenderID(u uuid.UUID) *OrgInvitationUpdateOne { - oiuo.mutation.SetSenderID(u) - return oiuo +func (_u *OrgInvitationUpdateOne) SetSenderID(v uuid.UUID) *OrgInvitationUpdateOne { + _u.mutation.SetSenderID(v) + return _u } // SetNillableSenderID sets the "sender_id" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableSenderID(u *uuid.UUID) *OrgInvitationUpdateOne { - if u != nil { - oiuo.SetSenderID(*u) +func (_u *OrgInvitationUpdateOne) SetNillableSenderID(v *uuid.UUID) *OrgInvitationUpdateOne { + if v != nil { + _u.SetSenderID(*v) } - return oiuo + return _u } // SetRole sets the "role" field. -func (oiuo *OrgInvitationUpdateOne) SetRole(a authz.Role) *OrgInvitationUpdateOne { - oiuo.mutation.SetRole(a) - return oiuo +func (_u *OrgInvitationUpdateOne) SetRole(v authz.Role) *OrgInvitationUpdateOne { + _u.mutation.SetRole(v) + return _u } // SetNillableRole sets the "role" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableRole(a *authz.Role) *OrgInvitationUpdateOne { - if a != nil { - oiuo.SetRole(*a) +func (_u *OrgInvitationUpdateOne) SetNillableRole(v *authz.Role) *OrgInvitationUpdateOne { + if v != nil { + _u.SetRole(*v) } - return oiuo + return _u } // ClearRole clears the value of the "role" field. -func (oiuo *OrgInvitationUpdateOne) ClearRole() *OrgInvitationUpdateOne { - oiuo.mutation.ClearRole() - return oiuo +func (_u *OrgInvitationUpdateOne) ClearRole() *OrgInvitationUpdateOne { + _u.mutation.ClearRole() + return _u } // SetContext sets the "context" field. -func (oiuo *OrgInvitationUpdateOne) SetContext(bic biz.OrgInvitationContext) *OrgInvitationUpdateOne { - oiuo.mutation.SetContext(bic) - return oiuo +func (_u *OrgInvitationUpdateOne) SetContext(v biz.OrgInvitationContext) *OrgInvitationUpdateOne { + _u.mutation.SetContext(v) + return _u } // SetNillableContext sets the "context" field if the given value is not nil. -func (oiuo *OrgInvitationUpdateOne) SetNillableContext(bic *biz.OrgInvitationContext) *OrgInvitationUpdateOne { - if bic != nil { - oiuo.SetContext(*bic) +func (_u *OrgInvitationUpdateOne) SetNillableContext(v *biz.OrgInvitationContext) *OrgInvitationUpdateOne { + if v != nil { + _u.SetContext(*v) } - return oiuo + return _u } // ClearContext clears the value of the "context" field. -func (oiuo *OrgInvitationUpdateOne) ClearContext() *OrgInvitationUpdateOne { - oiuo.mutation.ClearContext() - return oiuo +func (_u *OrgInvitationUpdateOne) ClearContext() *OrgInvitationUpdateOne { + _u.mutation.ClearContext() + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (oiuo *OrgInvitationUpdateOne) SetOrganization(o *Organization) *OrgInvitationUpdateOne { - return oiuo.SetOrganizationID(o.ID) +func (_u *OrgInvitationUpdateOne) SetOrganization(v *Organization) *OrgInvitationUpdateOne { + return _u.SetOrganizationID(v.ID) } // SetSender sets the "sender" edge to the User entity. -func (oiuo *OrgInvitationUpdateOne) SetSender(u *User) *OrgInvitationUpdateOne { - return oiuo.SetSenderID(u.ID) +func (_u *OrgInvitationUpdateOne) SetSender(v *User) *OrgInvitationUpdateOne { + return _u.SetSenderID(v.ID) } // Mutation returns the OrgInvitationMutation object of the builder. -func (oiuo *OrgInvitationUpdateOne) Mutation() *OrgInvitationMutation { - return oiuo.mutation +func (_u *OrgInvitationUpdateOne) Mutation() *OrgInvitationMutation { + return _u.mutation } // ClearOrganization clears the "organization" edge to the Organization entity. -func (oiuo *OrgInvitationUpdateOne) ClearOrganization() *OrgInvitationUpdateOne { - oiuo.mutation.ClearOrganization() - return oiuo +func (_u *OrgInvitationUpdateOne) ClearOrganization() *OrgInvitationUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearSender clears the "sender" edge to the User entity. -func (oiuo *OrgInvitationUpdateOne) ClearSender() *OrgInvitationUpdateOne { - oiuo.mutation.ClearSender() - return oiuo +func (_u *OrgInvitationUpdateOne) ClearSender() *OrgInvitationUpdateOne { + _u.mutation.ClearSender() + return _u } // Where appends a list predicates to the OrgInvitationUpdate builder. -func (oiuo *OrgInvitationUpdateOne) Where(ps ...predicate.OrgInvitation) *OrgInvitationUpdateOne { - oiuo.mutation.Where(ps...) - return oiuo +func (_u *OrgInvitationUpdateOne) Where(ps ...predicate.OrgInvitation) *OrgInvitationUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (oiuo *OrgInvitationUpdateOne) Select(field string, fields ...string) *OrgInvitationUpdateOne { - oiuo.fields = append([]string{field}, fields...) - return oiuo +func (_u *OrgInvitationUpdateOne) Select(field string, fields ...string) *OrgInvitationUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated OrgInvitation entity. -func (oiuo *OrgInvitationUpdateOne) Save(ctx context.Context) (*OrgInvitation, error) { - return withHooks(ctx, oiuo.sqlSave, oiuo.mutation, oiuo.hooks) +func (_u *OrgInvitationUpdateOne) Save(ctx context.Context) (*OrgInvitation, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (oiuo *OrgInvitationUpdateOne) SaveX(ctx context.Context) *OrgInvitation { - node, err := oiuo.Save(ctx) +func (_u *OrgInvitationUpdateOne) SaveX(ctx context.Context) *OrgInvitation { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -487,56 +487,56 @@ func (oiuo *OrgInvitationUpdateOne) SaveX(ctx context.Context) *OrgInvitation { } // Exec executes the query on the entity. -func (oiuo *OrgInvitationUpdateOne) Exec(ctx context.Context) error { - _, err := oiuo.Save(ctx) +func (_u *OrgInvitationUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (oiuo *OrgInvitationUpdateOne) ExecX(ctx context.Context) { - if err := oiuo.Exec(ctx); err != nil { +func (_u *OrgInvitationUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (oiuo *OrgInvitationUpdateOne) check() error { - if v, ok := oiuo.mutation.Status(); ok { +func (_u *OrgInvitationUpdateOne) check() error { + if v, ok := _u.mutation.Status(); ok { if err := orginvitation.StatusValidator(v); err != nil { return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.status": %w`, err)} } } - if v, ok := oiuo.mutation.Role(); ok { + if v, ok := _u.mutation.Role(); ok { if err := orginvitation.RoleValidator(v); err != nil { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "OrgInvitation.role": %w`, err)} } } - if oiuo.mutation.OrganizationCleared() && len(oiuo.mutation.OrganizationIDs()) > 0 { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "OrgInvitation.organization"`) } - if oiuo.mutation.SenderCleared() && len(oiuo.mutation.SenderIDs()) > 0 { + if _u.mutation.SenderCleared() && len(_u.mutation.SenderIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "OrgInvitation.sender"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (oiuo *OrgInvitationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrgInvitationUpdateOne { - oiuo.modifiers = append(oiuo.modifiers, modifiers...) - return oiuo +func (_u *OrgInvitationUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *OrgInvitationUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvitation, err error) { - if err := oiuo.check(); err != nil { +func (_u *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvitation, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(orginvitation.Table, orginvitation.Columns, sqlgraph.NewFieldSpec(orginvitation.FieldID, field.TypeUUID)) - id, ok := oiuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "OrgInvitation.id" for update`)} } _spec.Node.ID.Value = id - if fields := oiuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, orginvitation.FieldID) for _, f := range fields { @@ -548,35 +548,35 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } } } - if ps := oiuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := oiuo.mutation.Status(); ok { + if value, ok := _u.mutation.Status(); ok { _spec.SetField(orginvitation.FieldStatus, field.TypeEnum, value) } - if value, ok := oiuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(orginvitation.FieldDeletedAt, field.TypeTime, value) } - if oiuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(orginvitation.FieldDeletedAt, field.TypeTime) } - if value, ok := oiuo.mutation.Role(); ok { + if value, ok := _u.mutation.Role(); ok { _spec.SetField(orginvitation.FieldRole, field.TypeEnum, value) } - if oiuo.mutation.RoleCleared() { + if _u.mutation.RoleCleared() { _spec.ClearField(orginvitation.FieldRole, field.TypeEnum) } - if value, ok := oiuo.mutation.Context(); ok { + if value, ok := _u.mutation.Context(); ok { _spec.SetField(orginvitation.FieldContext, field.TypeJSON, value) } - if oiuo.mutation.ContextCleared() { + if _u.mutation.ContextCleared() { _spec.ClearField(orginvitation.FieldContext, field.TypeJSON) } - if oiuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -589,7 +589,7 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := oiuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -605,7 +605,7 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if oiuo.mutation.SenderCleared() { + if _u.mutation.SenderCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -618,7 +618,7 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := oiuo.mutation.SenderIDs(); len(nodes) > 0 { + if nodes := _u.mutation.SenderIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -634,11 +634,11 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(oiuo.modifiers...) - _node = &OrgInvitation{config: oiuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &OrgInvitation{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, oiuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{orginvitation.Label} } else if sqlgraph.IsConstraintError(err) { @@ -646,6 +646,6 @@ func (oiuo *OrgInvitationUpdateOne) sqlSave(ctx context.Context) (_node *OrgInvi } return nil, err } - oiuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/project.go b/app/controlplane/pkg/data/ent/project.go index 2e1474596..9563ef6f7 100644 --- a/app/controlplane/pkg/data/ent/project.go +++ b/app/controlplane/pkg/data/ent/project.go @@ -99,7 +99,7 @@ func (*Project) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Project fields. -func (pr *Project) assignValues(columns []string, values []any) error { +func (_m *Project) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -109,46 +109,46 @@ func (pr *Project) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - pr.ID = *value + _m.ID = *value } case project.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - pr.Name = value.String + _m.Name = value.String } case project.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - pr.Description = value.String + _m.Description = value.String } case project.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - pr.CreatedAt = value.Time + _m.CreatedAt = value.Time } case project.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - pr.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case project.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - pr.DeletedAt = value.Time + _m.DeletedAt = value.Time } case project.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - pr.OrganizationID = *value + _m.OrganizationID = *value } default: - pr.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -156,65 +156,65 @@ func (pr *Project) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Project. // This includes values selected through modifiers, order, etc. -func (pr *Project) Value(name string) (ent.Value, error) { - return pr.selectValues.Get(name) +func (_m *Project) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryOrganization queries the "organization" edge of the Project entity. -func (pr *Project) QueryOrganization() *OrganizationQuery { - return NewProjectClient(pr.config).QueryOrganization(pr) +func (_m *Project) QueryOrganization() *OrganizationQuery { + return NewProjectClient(_m.config).QueryOrganization(_m) } // QueryWorkflows queries the "workflows" edge of the Project entity. -func (pr *Project) QueryWorkflows() *WorkflowQuery { - return NewProjectClient(pr.config).QueryWorkflows(pr) +func (_m *Project) QueryWorkflows() *WorkflowQuery { + return NewProjectClient(_m.config).QueryWorkflows(_m) } // QueryVersions queries the "versions" edge of the Project entity. -func (pr *Project) QueryVersions() *ProjectVersionQuery { - return NewProjectClient(pr.config).QueryVersions(pr) +func (_m *Project) QueryVersions() *ProjectVersionQuery { + return NewProjectClient(_m.config).QueryVersions(_m) } // Update returns a builder for updating this Project. // Note that you need to call Project.Unwrap() before calling this method if this Project // was returned from a transaction, and the transaction was committed or rolled back. -func (pr *Project) Update() *ProjectUpdateOne { - return NewProjectClient(pr.config).UpdateOne(pr) +func (_m *Project) Update() *ProjectUpdateOne { + return NewProjectClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Project entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (pr *Project) Unwrap() *Project { - _tx, ok := pr.config.driver.(*txDriver) +func (_m *Project) Unwrap() *Project { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Project is not a transactional entity") } - pr.config.driver = _tx.drv - return pr + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (pr *Project) String() string { +func (_m *Project) String() string { var builder strings.Builder builder.WriteString("Project(") - builder.WriteString(fmt.Sprintf("id=%v, ", pr.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(pr.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(pr.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(pr.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(pr.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(pr.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", pr.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/project_create.go b/app/controlplane/pkg/data/ent/project_create.go index 8bf5da8b1..1b343c97b 100644 --- a/app/controlplane/pkg/data/ent/project_create.go +++ b/app/controlplane/pkg/data/ent/project_create.go @@ -28,136 +28,136 @@ type ProjectCreate struct { } // SetName sets the "name" field. -func (pc *ProjectCreate) SetName(s string) *ProjectCreate { - pc.mutation.SetName(s) - return pc +func (_c *ProjectCreate) SetName(v string) *ProjectCreate { + _c.mutation.SetName(v) + return _c } // SetDescription sets the "description" field. -func (pc *ProjectCreate) SetDescription(s string) *ProjectCreate { - pc.mutation.SetDescription(s) - return pc +func (_c *ProjectCreate) SetDescription(v string) *ProjectCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (pc *ProjectCreate) SetNillableDescription(s *string) *ProjectCreate { - if s != nil { - pc.SetDescription(*s) +func (_c *ProjectCreate) SetNillableDescription(v *string) *ProjectCreate { + if v != nil { + _c.SetDescription(*v) } - return pc + return _c } // SetCreatedAt sets the "created_at" field. -func (pc *ProjectCreate) SetCreatedAt(t time.Time) *ProjectCreate { - pc.mutation.SetCreatedAt(t) - return pc +func (_c *ProjectCreate) SetCreatedAt(v time.Time) *ProjectCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (pc *ProjectCreate) SetNillableCreatedAt(t *time.Time) *ProjectCreate { - if t != nil { - pc.SetCreatedAt(*t) +func (_c *ProjectCreate) SetNillableCreatedAt(v *time.Time) *ProjectCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return pc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (pc *ProjectCreate) SetUpdatedAt(t time.Time) *ProjectCreate { - pc.mutation.SetUpdatedAt(t) - return pc +func (_c *ProjectCreate) SetUpdatedAt(v time.Time) *ProjectCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (pc *ProjectCreate) SetNillableUpdatedAt(t *time.Time) *ProjectCreate { - if t != nil { - pc.SetUpdatedAt(*t) +func (_c *ProjectCreate) SetNillableUpdatedAt(v *time.Time) *ProjectCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return pc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (pc *ProjectCreate) SetDeletedAt(t time.Time) *ProjectCreate { - pc.mutation.SetDeletedAt(t) - return pc +func (_c *ProjectCreate) SetDeletedAt(v time.Time) *ProjectCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (pc *ProjectCreate) SetNillableDeletedAt(t *time.Time) *ProjectCreate { - if t != nil { - pc.SetDeletedAt(*t) +func (_c *ProjectCreate) SetNillableDeletedAt(v *time.Time) *ProjectCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return pc + return _c } // SetOrganizationID sets the "organization_id" field. -func (pc *ProjectCreate) SetOrganizationID(u uuid.UUID) *ProjectCreate { - pc.mutation.SetOrganizationID(u) - return pc +func (_c *ProjectCreate) SetOrganizationID(v uuid.UUID) *ProjectCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetID sets the "id" field. -func (pc *ProjectCreate) SetID(u uuid.UUID) *ProjectCreate { - pc.mutation.SetID(u) - return pc +func (_c *ProjectCreate) SetID(v uuid.UUID) *ProjectCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (pc *ProjectCreate) SetNillableID(u *uuid.UUID) *ProjectCreate { - if u != nil { - pc.SetID(*u) +func (_c *ProjectCreate) SetNillableID(v *uuid.UUID) *ProjectCreate { + if v != nil { + _c.SetID(*v) } - return pc + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (pc *ProjectCreate) SetOrganization(o *Organization) *ProjectCreate { - return pc.SetOrganizationID(o.ID) +func (_c *ProjectCreate) SetOrganization(v *Organization) *ProjectCreate { + return _c.SetOrganizationID(v.ID) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (pc *ProjectCreate) AddWorkflowIDs(ids ...uuid.UUID) *ProjectCreate { - pc.mutation.AddWorkflowIDs(ids...) - return pc +func (_c *ProjectCreate) AddWorkflowIDs(ids ...uuid.UUID) *ProjectCreate { + _c.mutation.AddWorkflowIDs(ids...) + return _c } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (pc *ProjectCreate) AddWorkflows(w ...*Workflow) *ProjectCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *ProjectCreate) AddWorkflows(v ...*Workflow) *ProjectCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pc.AddWorkflowIDs(ids...) + return _c.AddWorkflowIDs(ids...) } // AddVersionIDs adds the "versions" edge to the ProjectVersion entity by IDs. -func (pc *ProjectCreate) AddVersionIDs(ids ...uuid.UUID) *ProjectCreate { - pc.mutation.AddVersionIDs(ids...) - return pc +func (_c *ProjectCreate) AddVersionIDs(ids ...uuid.UUID) *ProjectCreate { + _c.mutation.AddVersionIDs(ids...) + return _c } // AddVersions adds the "versions" edges to the ProjectVersion entity. -func (pc *ProjectCreate) AddVersions(p ...*ProjectVersion) *ProjectCreate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_c *ProjectCreate) AddVersions(v ...*ProjectVersion) *ProjectCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pc.AddVersionIDs(ids...) + return _c.AddVersionIDs(ids...) } // Mutation returns the ProjectMutation object of the builder. -func (pc *ProjectCreate) Mutation() *ProjectMutation { - return pc.mutation +func (_c *ProjectCreate) Mutation() *ProjectMutation { + return _c.mutation } // Save creates the Project in the database. -func (pc *ProjectCreate) Save(ctx context.Context) (*Project, error) { - pc.defaults() - return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) +func (_c *ProjectCreate) Save(ctx context.Context) (*Project, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (pc *ProjectCreate) SaveX(ctx context.Context) *Project { - v, err := pc.Save(ctx) +func (_c *ProjectCreate) SaveX(ctx context.Context) *Project { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -165,65 +165,65 @@ func (pc *ProjectCreate) SaveX(ctx context.Context) *Project { } // Exec executes the query. -func (pc *ProjectCreate) Exec(ctx context.Context) error { - _, err := pc.Save(ctx) +func (_c *ProjectCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pc *ProjectCreate) ExecX(ctx context.Context) { - if err := pc.Exec(ctx); err != nil { +func (_c *ProjectCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (pc *ProjectCreate) defaults() { - if _, ok := pc.mutation.CreatedAt(); !ok { +func (_c *ProjectCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := project.DefaultCreatedAt() - pc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := pc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := project.DefaultUpdatedAt() - pc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := pc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := project.DefaultID() - pc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (pc *ProjectCreate) check() error { - if _, ok := pc.mutation.Name(); !ok { +func (_c *ProjectCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Project.name"`)} } - if v, ok := pc.mutation.Name(); ok { + if v, ok := _c.mutation.Name(); ok { if err := project.NameValidator(v); err != nil { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Project.name": %w`, err)} } } - if _, ok := pc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Project.created_at"`)} } - if _, ok := pc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Project.updated_at"`)} } - if _, ok := pc.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "Project.organization_id"`)} } - if len(pc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "Project.organization"`)} } return nil } -func (pc *ProjectCreate) sqlSave(ctx context.Context) (*Project, error) { - if err := pc.check(); err != nil { +func (_c *ProjectCreate) sqlSave(ctx context.Context) (*Project, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := pc.createSpec() - if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -236,42 +236,42 @@ func (pc *ProjectCreate) sqlSave(ctx context.Context) (*Project, error) { return nil, err } } - pc.mutation.id = &_node.ID - pc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (pc *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) { +func (_c *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) { var ( - _node = &Project{config: pc.config} + _node = &Project{config: _c.config} _spec = sqlgraph.NewCreateSpec(project.Table, sqlgraph.NewFieldSpec(project.FieldID, field.TypeUUID)) ) - _spec.OnConflict = pc.conflict - if id, ok := pc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := pc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(project.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := pc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(project.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := pc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(project.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := pc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(project.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := pc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(project.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if nodes := pc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -288,7 +288,7 @@ func (pc *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) { _node.OrganizationID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := pc.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -304,7 +304,7 @@ func (pc *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := pc.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -339,10 +339,10 @@ func (pc *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) { // SetName(v+v). // }). // Exec(ctx) -func (pc *ProjectCreate) OnConflict(opts ...sql.ConflictOption) *ProjectUpsertOne { - pc.conflict = opts +func (_c *ProjectCreate) OnConflict(opts ...sql.ConflictOption) *ProjectUpsertOne { + _c.conflict = opts return &ProjectUpsertOne{ - create: pc, + create: _c, } } @@ -352,10 +352,10 @@ func (pc *ProjectCreate) OnConflict(opts ...sql.ConflictOption) *ProjectUpsertOn // client.Project.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (pc *ProjectCreate) OnConflictColumns(columns ...string) *ProjectUpsertOne { - pc.conflict = append(pc.conflict, sql.ConflictColumns(columns...)) +func (_c *ProjectCreate) OnConflictColumns(columns ...string) *ProjectUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ProjectUpsertOne{ - create: pc, + create: _c, } } @@ -580,16 +580,16 @@ type ProjectCreateBulk struct { } // Save creates the Project entities in the database. -func (pcb *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) { - if pcb.err != nil { - return nil, pcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) - nodes := make([]*Project, len(pcb.builders)) - mutators := make([]Mutator, len(pcb.builders)) - for i := range pcb.builders { +func (_c *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Project, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := pcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*ProjectMutation) @@ -603,12 +603,12 @@ func (pcb *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = pcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -628,7 +628,7 @@ func (pcb *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -636,8 +636,8 @@ func (pcb *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) { } // SaveX is like Save, but panics if an error occurs. -func (pcb *ProjectCreateBulk) SaveX(ctx context.Context) []*Project { - v, err := pcb.Save(ctx) +func (_c *ProjectCreateBulk) SaveX(ctx context.Context) []*Project { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -645,14 +645,14 @@ func (pcb *ProjectCreateBulk) SaveX(ctx context.Context) []*Project { } // Exec executes the query. -func (pcb *ProjectCreateBulk) Exec(ctx context.Context) error { - _, err := pcb.Save(ctx) +func (_c *ProjectCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pcb *ProjectCreateBulk) ExecX(ctx context.Context) { - if err := pcb.Exec(ctx); err != nil { +func (_c *ProjectCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -672,10 +672,10 @@ func (pcb *ProjectCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (pcb *ProjectCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProjectUpsertBulk { - pcb.conflict = opts +func (_c *ProjectCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProjectUpsertBulk { + _c.conflict = opts return &ProjectUpsertBulk{ - create: pcb, + create: _c, } } @@ -685,10 +685,10 @@ func (pcb *ProjectCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProjectUps // client.Project.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (pcb *ProjectCreateBulk) OnConflictColumns(columns ...string) *ProjectUpsertBulk { - pcb.conflict = append(pcb.conflict, sql.ConflictColumns(columns...)) +func (_c *ProjectCreateBulk) OnConflictColumns(columns ...string) *ProjectUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ProjectUpsertBulk{ - create: pcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/project_delete.go b/app/controlplane/pkg/data/ent/project_delete.go index a9a9f147e..893449040 100644 --- a/app/controlplane/pkg/data/ent/project_delete.go +++ b/app/controlplane/pkg/data/ent/project_delete.go @@ -20,56 +20,56 @@ type ProjectDelete struct { } // Where appends a list predicates to the ProjectDelete builder. -func (pd *ProjectDelete) Where(ps ...predicate.Project) *ProjectDelete { - pd.mutation.Where(ps...) - return pd +func (_d *ProjectDelete) Where(ps ...predicate.Project) *ProjectDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (pd *ProjectDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) +func (_d *ProjectDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (pd *ProjectDelete) ExecX(ctx context.Context) int { - n, err := pd.Exec(ctx) +func (_d *ProjectDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (pd *ProjectDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *ProjectDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(project.Table, sqlgraph.NewFieldSpec(project.FieldID, field.TypeUUID)) - if ps := pd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - pd.mutation.done = true + _d.mutation.done = true return affected, err } // ProjectDeleteOne is the builder for deleting a single Project entity. type ProjectDeleteOne struct { - pd *ProjectDelete + _d *ProjectDelete } // Where appends a list predicates to the ProjectDelete builder. -func (pdo *ProjectDeleteOne) Where(ps ...predicate.Project) *ProjectDeleteOne { - pdo.pd.mutation.Where(ps...) - return pdo +func (_d *ProjectDeleteOne) Where(ps ...predicate.Project) *ProjectDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (pdo *ProjectDeleteOne) Exec(ctx context.Context) error { - n, err := pdo.pd.Exec(ctx) +func (_d *ProjectDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (pdo *ProjectDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (pdo *ProjectDeleteOne) ExecX(ctx context.Context) { - if err := pdo.Exec(ctx); err != nil { +func (_d *ProjectDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/project_query.go b/app/controlplane/pkg/data/ent/project_query.go index 9c7dc370a..bc07b1a7e 100644 --- a/app/controlplane/pkg/data/ent/project_query.go +++ b/app/controlplane/pkg/data/ent/project_query.go @@ -38,44 +38,44 @@ type ProjectQuery struct { } // Where adds a new predicate for the ProjectQuery builder. -func (pq *ProjectQuery) Where(ps ...predicate.Project) *ProjectQuery { - pq.predicates = append(pq.predicates, ps...) - return pq +func (_q *ProjectQuery) Where(ps ...predicate.Project) *ProjectQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (pq *ProjectQuery) Limit(limit int) *ProjectQuery { - pq.ctx.Limit = &limit - return pq +func (_q *ProjectQuery) Limit(limit int) *ProjectQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (pq *ProjectQuery) Offset(offset int) *ProjectQuery { - pq.ctx.Offset = &offset - return pq +func (_q *ProjectQuery) Offset(offset int) *ProjectQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (pq *ProjectQuery) Unique(unique bool) *ProjectQuery { - pq.ctx.Unique = &unique - return pq +func (_q *ProjectQuery) Unique(unique bool) *ProjectQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (pq *ProjectQuery) Order(o ...project.OrderOption) *ProjectQuery { - pq.order = append(pq.order, o...) - return pq +func (_q *ProjectQuery) Order(o ...project.OrderOption) *ProjectQuery { + _q.order = append(_q.order, o...) + return _q } // QueryOrganization chains the current query on the "organization" edge. -func (pq *ProjectQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: pq.config}).Query() +func (_q *ProjectQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := pq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -84,20 +84,20 @@ func (pq *ProjectQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, project.OrganizationTable, project.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflows chains the current query on the "workflows" edge. -func (pq *ProjectQuery) QueryWorkflows() *WorkflowQuery { - query := (&WorkflowClient{config: pq.config}).Query() +func (_q *ProjectQuery) QueryWorkflows() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := pq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -106,20 +106,20 @@ func (pq *ProjectQuery) QueryWorkflows() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, project.WorkflowsTable, project.WorkflowsColumn), ) - fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryVersions chains the current query on the "versions" edge. -func (pq *ProjectQuery) QueryVersions() *ProjectVersionQuery { - query := (&ProjectVersionClient{config: pq.config}).Query() +func (_q *ProjectQuery) QueryVersions() *ProjectVersionQuery { + query := (&ProjectVersionClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := pq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -128,7 +128,7 @@ func (pq *ProjectQuery) QueryVersions() *ProjectVersionQuery { sqlgraph.To(projectversion.Table, projectversion.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, project.VersionsTable, project.VersionsColumn), ) - fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -136,8 +136,8 @@ func (pq *ProjectQuery) QueryVersions() *ProjectVersionQuery { // First returns the first Project entity from the query. // Returns a *NotFoundError when no Project was found. -func (pq *ProjectQuery) First(ctx context.Context) (*Project, error) { - nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, ent.OpQueryFirst)) +func (_q *ProjectQuery) First(ctx context.Context) (*Project, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -148,8 +148,8 @@ func (pq *ProjectQuery) First(ctx context.Context) (*Project, error) { } // FirstX is like First, but panics if an error occurs. -func (pq *ProjectQuery) FirstX(ctx context.Context) *Project { - node, err := pq.First(ctx) +func (_q *ProjectQuery) FirstX(ctx context.Context) *Project { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,9 +158,9 @@ func (pq *ProjectQuery) FirstX(ctx context.Context) *Project { // FirstID returns the first Project ID from the query. // Returns a *NotFoundError when no Project ID was found. -func (pq *ProjectQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ProjectQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -171,8 +171,8 @@ func (pq *ProjectQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { } // FirstIDX is like FirstID, but panics if an error occurs. -func (pq *ProjectQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := pq.FirstID(ctx) +func (_q *ProjectQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -182,8 +182,8 @@ func (pq *ProjectQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Project entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Project entity is found. // Returns a *NotFoundError when no Project entities are found. -func (pq *ProjectQuery) Only(ctx context.Context) (*Project, error) { - nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, ent.OpQueryOnly)) +func (_q *ProjectQuery) Only(ctx context.Context) (*Project, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -198,8 +198,8 @@ func (pq *ProjectQuery) Only(ctx context.Context) (*Project, error) { } // OnlyX is like Only, but panics if an error occurs. -func (pq *ProjectQuery) OnlyX(ctx context.Context) *Project { - node, err := pq.Only(ctx) +func (_q *ProjectQuery) OnlyX(ctx context.Context) *Project { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -209,9 +209,9 @@ func (pq *ProjectQuery) OnlyX(ctx context.Context) *Project { // OnlyID is like Only, but returns the only Project ID in the query. // Returns a *NotSingularError when more than one Project ID is found. // Returns a *NotFoundError when no entities are found. -func (pq *ProjectQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ProjectQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -226,8 +226,8 @@ func (pq *ProjectQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (pq *ProjectQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := pq.OnlyID(ctx) +func (_q *ProjectQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -235,18 +235,18 @@ func (pq *ProjectQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Projects. -func (pq *ProjectQuery) All(ctx context.Context) ([]*Project, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryAll) - if err := pq.prepareQuery(ctx); err != nil { +func (_q *ProjectQuery) All(ctx context.Context) ([]*Project, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Project, *ProjectQuery]() - return withInterceptors[[]*Project](ctx, pq, qr, pq.inters) + return withInterceptors[[]*Project](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (pq *ProjectQuery) AllX(ctx context.Context) []*Project { - nodes, err := pq.All(ctx) +func (_q *ProjectQuery) AllX(ctx context.Context) []*Project { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -254,20 +254,20 @@ func (pq *ProjectQuery) AllX(ctx context.Context) []*Project { } // IDs executes the query and returns a list of Project IDs. -func (pq *ProjectQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if pq.ctx.Unique == nil && pq.path != nil { - pq.Unique(true) +func (_q *ProjectQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryIDs) - if err = pq.Select(project.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(project.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (pq *ProjectQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := pq.IDs(ctx) +func (_q *ProjectQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -275,17 +275,17 @@ func (pq *ProjectQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (pq *ProjectQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryCount) - if err := pq.prepareQuery(ctx); err != nil { +func (_q *ProjectQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, pq, querierCount[*ProjectQuery](), pq.inters) + return withInterceptors[int](ctx, _q, querierCount[*ProjectQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (pq *ProjectQuery) CountX(ctx context.Context) int { - count, err := pq.Count(ctx) +func (_q *ProjectQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -293,9 +293,9 @@ func (pq *ProjectQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (pq *ProjectQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryExist) - switch _, err := pq.FirstID(ctx); { +func (_q *ProjectQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -306,8 +306,8 @@ func (pq *ProjectQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (pq *ProjectQuery) ExistX(ctx context.Context) bool { - exist, err := pq.Exist(ctx) +func (_q *ProjectQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -316,57 +316,57 @@ func (pq *ProjectQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the ProjectQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (pq *ProjectQuery) Clone() *ProjectQuery { - if pq == nil { +func (_q *ProjectQuery) Clone() *ProjectQuery { + if _q == nil { return nil } return &ProjectQuery{ - config: pq.config, - ctx: pq.ctx.Clone(), - order: append([]project.OrderOption{}, pq.order...), - inters: append([]Interceptor{}, pq.inters...), - predicates: append([]predicate.Project{}, pq.predicates...), - withOrganization: pq.withOrganization.Clone(), - withWorkflows: pq.withWorkflows.Clone(), - withVersions: pq.withVersions.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]project.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Project{}, _q.predicates...), + withOrganization: _q.withOrganization.Clone(), + withWorkflows: _q.withWorkflows.Clone(), + withVersions: _q.withVersions.Clone(), // clone intermediate query. - sql: pq.sql.Clone(), - path: pq.path, - modifiers: append([]func(*sql.Selector){}, pq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (pq *ProjectQuery) WithOrganization(opts ...func(*OrganizationQuery)) *ProjectQuery { - query := (&OrganizationClient{config: pq.config}).Query() +func (_q *ProjectQuery) WithOrganization(opts ...func(*OrganizationQuery)) *ProjectQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - pq.withOrganization = query - return pq + _q.withOrganization = query + return _q } // WithWorkflows tells the query-builder to eager-load the nodes that are connected to // the "workflows" edge. The optional arguments are used to configure the query builder of the edge. -func (pq *ProjectQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *ProjectQuery { - query := (&WorkflowClient{config: pq.config}).Query() +func (_q *ProjectQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *ProjectQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - pq.withWorkflows = query - return pq + _q.withWorkflows = query + return _q } // WithVersions tells the query-builder to eager-load the nodes that are connected to // the "versions" edge. The optional arguments are used to configure the query builder of the edge. -func (pq *ProjectQuery) WithVersions(opts ...func(*ProjectVersionQuery)) *ProjectQuery { - query := (&ProjectVersionClient{config: pq.config}).Query() +func (_q *ProjectQuery) WithVersions(opts ...func(*ProjectVersionQuery)) *ProjectQuery { + query := (&ProjectVersionClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - pq.withVersions = query - return pq + _q.withVersions = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -383,10 +383,10 @@ func (pq *ProjectQuery) WithVersions(opts ...func(*ProjectVersionQuery)) *Projec // GroupBy(project.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (pq *ProjectQuery) GroupBy(field string, fields ...string) *ProjectGroupBy { - pq.ctx.Fields = append([]string{field}, fields...) - grbuild := &ProjectGroupBy{build: pq} - grbuild.flds = &pq.ctx.Fields +func (_q *ProjectQuery) GroupBy(field string, fields ...string) *ProjectGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProjectGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = project.Label grbuild.scan = grbuild.Scan return grbuild @@ -404,91 +404,91 @@ func (pq *ProjectQuery) GroupBy(field string, fields ...string) *ProjectGroupBy // client.Project.Query(). // Select(project.FieldName). // Scan(ctx, &v) -func (pq *ProjectQuery) Select(fields ...string) *ProjectSelect { - pq.ctx.Fields = append(pq.ctx.Fields, fields...) - sbuild := &ProjectSelect{ProjectQuery: pq} +func (_q *ProjectQuery) Select(fields ...string) *ProjectSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ProjectSelect{ProjectQuery: _q} sbuild.label = project.Label - sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a ProjectSelect configured with the given aggregations. -func (pq *ProjectQuery) Aggregate(fns ...AggregateFunc) *ProjectSelect { - return pq.Select().Aggregate(fns...) +func (_q *ProjectQuery) Aggregate(fns ...AggregateFunc) *ProjectSelect { + return _q.Select().Aggregate(fns...) } -func (pq *ProjectQuery) prepareQuery(ctx context.Context) error { - for _, inter := range pq.inters { +func (_q *ProjectQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, pq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range pq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !project.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if pq.path != nil { - prev, err := pq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - pq.sql = prev + _q.sql = prev } return nil } -func (pq *ProjectQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Project, error) { +func (_q *ProjectQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Project, error) { var ( nodes = []*Project{} - _spec = pq.querySpec() + _spec = _q.querySpec() loadedTypes = [3]bool{ - pq.withOrganization != nil, - pq.withWorkflows != nil, - pq.withVersions != nil, + _q.withOrganization != nil, + _q.withWorkflows != nil, + _q.withVersions != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*Project).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Project{config: pq.config} + node := &Project{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(pq.modifiers) > 0 { - _spec.Modifiers = pq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := pq.withOrganization; query != nil { - if err := pq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *Project, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := pq.withWorkflows; query != nil { - if err := pq.loadWorkflows(ctx, query, nodes, + if query := _q.withWorkflows; query != nil { + if err := _q.loadWorkflows(ctx, query, nodes, func(n *Project) { n.Edges.Workflows = []*Workflow{} }, func(n *Project, e *Workflow) { n.Edges.Workflows = append(n.Edges.Workflows, e) }); err != nil { return nil, err } } - if query := pq.withVersions; query != nil { - if err := pq.loadVersions(ctx, query, nodes, + if query := _q.withVersions; query != nil { + if err := _q.loadVersions(ctx, query, nodes, func(n *Project) { n.Edges.Versions = []*ProjectVersion{} }, func(n *Project, e *ProjectVersion) { n.Edges.Versions = append(n.Edges.Versions, e) }); err != nil { return nil, err @@ -497,7 +497,7 @@ func (pq *ProjectQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proj return nodes, nil } -func (pq *ProjectQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Project, init func(*Project), assign func(*Project, *Organization)) error { +func (_q *ProjectQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Project, init func(*Project), assign func(*Project, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Project) for i := range nodes { @@ -526,7 +526,7 @@ func (pq *ProjectQuery) loadOrganization(ctx context.Context, query *Organizatio } return nil } -func (pq *ProjectQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Project, init func(*Project), assign func(*Project, *Workflow)) error { +func (_q *ProjectQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Project, init func(*Project), assign func(*Project, *Workflow)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Project) for i := range nodes { @@ -557,7 +557,7 @@ func (pq *ProjectQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, } return nil } -func (pq *ProjectQuery) loadVersions(ctx context.Context, query *ProjectVersionQuery, nodes []*Project, init func(*Project), assign func(*Project, *ProjectVersion)) error { +func (_q *ProjectQuery) loadVersions(ctx context.Context, query *ProjectVersionQuery, nodes []*Project, init func(*Project), assign func(*Project, *ProjectVersion)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Project) for i := range nodes { @@ -588,27 +588,27 @@ func (pq *ProjectQuery) loadVersions(ctx context.Context, query *ProjectVersionQ return nil } -func (pq *ProjectQuery) sqlCount(ctx context.Context) (int, error) { - _spec := pq.querySpec() - if len(pq.modifiers) > 0 { - _spec.Modifiers = pq.modifiers +func (_q *ProjectQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = pq.ctx.Fields - if len(pq.ctx.Fields) > 0 { - _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, pq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (pq *ProjectQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *ProjectQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeUUID)) - _spec.From = pq.sql - if unique := pq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if pq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := pq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, project.FieldID) for i := range fields { @@ -616,24 +616,24 @@ func (pq *ProjectQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if pq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(project.FieldOrganizationID) } } - if ps := pq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := pq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := pq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := pq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -643,36 +643,36 @@ func (pq *ProjectQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (pq *ProjectQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(pq.driver.Dialect()) +func (_q *ProjectQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(project.Table) - columns := pq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = project.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if pq.sql != nil { - selector = pq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if pq.ctx.Unique != nil && *pq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range pq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range pq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range pq.order { + for _, p := range _q.order { p(selector) } - if offset := pq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := pq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -681,33 +681,33 @@ func (pq *ProjectQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (pq *ProjectQuery) ForUpdate(opts ...sql.LockOption) *ProjectQuery { - if pq.driver.Dialect() == dialect.Postgres { - pq.Unique(false) +func (_q *ProjectQuery) ForUpdate(opts ...sql.LockOption) *ProjectQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - pq.modifiers = append(pq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return pq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (pq *ProjectQuery) ForShare(opts ...sql.LockOption) *ProjectQuery { - if pq.driver.Dialect() == dialect.Postgres { - pq.Unique(false) +func (_q *ProjectQuery) ForShare(opts ...sql.LockOption) *ProjectQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - pq.modifiers = append(pq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return pq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (pq *ProjectQuery) Modify(modifiers ...func(s *sql.Selector)) *ProjectSelect { - pq.modifiers = append(pq.modifiers, modifiers...) - return pq.Select() +func (_q *ProjectQuery) Modify(modifiers ...func(s *sql.Selector)) *ProjectSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // ProjectGroupBy is the group-by builder for Project entities. @@ -717,41 +717,41 @@ type ProjectGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (pgb *ProjectGroupBy) Aggregate(fns ...AggregateFunc) *ProjectGroupBy { - pgb.fns = append(pgb.fns, fns...) - return pgb +func (_g *ProjectGroupBy) Aggregate(fns ...AggregateFunc) *ProjectGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (pgb *ProjectGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, pgb.build.ctx, ent.OpQueryGroupBy) - if err := pgb.build.prepareQuery(ctx); err != nil { +func (_g *ProjectGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ProjectQuery, *ProjectGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) + return scanWithInterceptors[*ProjectQuery, *ProjectGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (pgb *ProjectGroupBy) sqlScan(ctx context.Context, root *ProjectQuery, v any) error { +func (_g *ProjectGroupBy) sqlScan(ctx context.Context, root *ProjectQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(pgb.fns)) - for _, fn := range pgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) - for _, f := range *pgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*pgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -765,27 +765,27 @@ type ProjectSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ps *ProjectSelect) Aggregate(fns ...AggregateFunc) *ProjectSelect { - ps.fns = append(ps.fns, fns...) - return ps +func (_s *ProjectSelect) Aggregate(fns ...AggregateFunc) *ProjectSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ps *ProjectSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ps.ctx, ent.OpQuerySelect) - if err := ps.prepareQuery(ctx); err != nil { +func (_s *ProjectSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ProjectQuery, *ProjectSelect](ctx, ps.ProjectQuery, ps, ps.inters, v) + return scanWithInterceptors[*ProjectQuery, *ProjectSelect](ctx, _s.ProjectQuery, _s, _s.inters, v) } -func (ps *ProjectSelect) sqlScan(ctx context.Context, root *ProjectQuery, v any) error { +func (_s *ProjectSelect) sqlScan(ctx context.Context, root *ProjectQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ps.fns)) - for _, fn := range ps.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ps.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -793,7 +793,7 @@ func (ps *ProjectSelect) sqlScan(ctx context.Context, root *ProjectQuery, v any) } rows := &sql.Rows{} query, args := selector.Query() - if err := ps.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -801,7 +801,7 @@ func (ps *ProjectSelect) sqlScan(ctx context.Context, root *ProjectQuery, v any) } // Modify adds a query modifier for attaching custom logic to queries. -func (ps *ProjectSelect) Modify(modifiers ...func(s *sql.Selector)) *ProjectSelect { - ps.modifiers = append(ps.modifiers, modifiers...) - return ps +func (_s *ProjectSelect) Modify(modifiers ...func(s *sql.Selector)) *ProjectSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/project_update.go b/app/controlplane/pkg/data/ent/project_update.go index f563ea521..871a27172 100644 --- a/app/controlplane/pkg/data/ent/project_update.go +++ b/app/controlplane/pkg/data/ent/project_update.go @@ -27,150 +27,150 @@ type ProjectUpdate struct { } // Where appends a list predicates to the ProjectUpdate builder. -func (pu *ProjectUpdate) Where(ps ...predicate.Project) *ProjectUpdate { - pu.mutation.Where(ps...) - return pu +func (_u *ProjectUpdate) Where(ps ...predicate.Project) *ProjectUpdate { + _u.mutation.Where(ps...) + return _u } // SetDescription sets the "description" field. -func (pu *ProjectUpdate) SetDescription(s string) *ProjectUpdate { - pu.mutation.SetDescription(s) - return pu +func (_u *ProjectUpdate) SetDescription(v string) *ProjectUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (pu *ProjectUpdate) SetNillableDescription(s *string) *ProjectUpdate { - if s != nil { - pu.SetDescription(*s) +func (_u *ProjectUpdate) SetNillableDescription(v *string) *ProjectUpdate { + if v != nil { + _u.SetDescription(*v) } - return pu + return _u } // ClearDescription clears the value of the "description" field. -func (pu *ProjectUpdate) ClearDescription() *ProjectUpdate { - pu.mutation.ClearDescription() - return pu +func (_u *ProjectUpdate) ClearDescription() *ProjectUpdate { + _u.mutation.ClearDescription() + return _u } // SetUpdatedAt sets the "updated_at" field. -func (pu *ProjectUpdate) SetUpdatedAt(t time.Time) *ProjectUpdate { - pu.mutation.SetUpdatedAt(t) - return pu +func (_u *ProjectUpdate) SetUpdatedAt(v time.Time) *ProjectUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (pu *ProjectUpdate) SetNillableUpdatedAt(t *time.Time) *ProjectUpdate { - if t != nil { - pu.SetUpdatedAt(*t) +func (_u *ProjectUpdate) SetNillableUpdatedAt(v *time.Time) *ProjectUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return pu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (pu *ProjectUpdate) SetDeletedAt(t time.Time) *ProjectUpdate { - pu.mutation.SetDeletedAt(t) - return pu +func (_u *ProjectUpdate) SetDeletedAt(v time.Time) *ProjectUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (pu *ProjectUpdate) SetNillableDeletedAt(t *time.Time) *ProjectUpdate { - if t != nil { - pu.SetDeletedAt(*t) +func (_u *ProjectUpdate) SetNillableDeletedAt(v *time.Time) *ProjectUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return pu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (pu *ProjectUpdate) ClearDeletedAt() *ProjectUpdate { - pu.mutation.ClearDeletedAt() - return pu +func (_u *ProjectUpdate) ClearDeletedAt() *ProjectUpdate { + _u.mutation.ClearDeletedAt() + return _u } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (pu *ProjectUpdate) AddWorkflowIDs(ids ...uuid.UUID) *ProjectUpdate { - pu.mutation.AddWorkflowIDs(ids...) - return pu +func (_u *ProjectUpdate) AddWorkflowIDs(ids ...uuid.UUID) *ProjectUpdate { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (pu *ProjectUpdate) AddWorkflows(w ...*Workflow) *ProjectUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectUpdate) AddWorkflows(v ...*Workflow) *ProjectUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pu.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // AddVersionIDs adds the "versions" edge to the ProjectVersion entity by IDs. -func (pu *ProjectUpdate) AddVersionIDs(ids ...uuid.UUID) *ProjectUpdate { - pu.mutation.AddVersionIDs(ids...) - return pu +func (_u *ProjectUpdate) AddVersionIDs(ids ...uuid.UUID) *ProjectUpdate { + _u.mutation.AddVersionIDs(ids...) + return _u } // AddVersions adds the "versions" edges to the ProjectVersion entity. -func (pu *ProjectUpdate) AddVersions(p ...*ProjectVersion) *ProjectUpdate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *ProjectUpdate) AddVersions(v ...*ProjectVersion) *ProjectUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pu.AddVersionIDs(ids...) + return _u.AddVersionIDs(ids...) } // Mutation returns the ProjectMutation object of the builder. -func (pu *ProjectUpdate) Mutation() *ProjectMutation { - return pu.mutation +func (_u *ProjectUpdate) Mutation() *ProjectMutation { + return _u.mutation } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (pu *ProjectUpdate) ClearWorkflows() *ProjectUpdate { - pu.mutation.ClearWorkflows() - return pu +func (_u *ProjectUpdate) ClearWorkflows() *ProjectUpdate { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (pu *ProjectUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *ProjectUpdate { - pu.mutation.RemoveWorkflowIDs(ids...) - return pu +func (_u *ProjectUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *ProjectUpdate { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (pu *ProjectUpdate) RemoveWorkflows(w ...*Workflow) *ProjectUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectUpdate) RemoveWorkflows(v ...*Workflow) *ProjectUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pu.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // ClearVersions clears all "versions" edges to the ProjectVersion entity. -func (pu *ProjectUpdate) ClearVersions() *ProjectUpdate { - pu.mutation.ClearVersions() - return pu +func (_u *ProjectUpdate) ClearVersions() *ProjectUpdate { + _u.mutation.ClearVersions() + return _u } // RemoveVersionIDs removes the "versions" edge to ProjectVersion entities by IDs. -func (pu *ProjectUpdate) RemoveVersionIDs(ids ...uuid.UUID) *ProjectUpdate { - pu.mutation.RemoveVersionIDs(ids...) - return pu +func (_u *ProjectUpdate) RemoveVersionIDs(ids ...uuid.UUID) *ProjectUpdate { + _u.mutation.RemoveVersionIDs(ids...) + return _u } // RemoveVersions removes "versions" edges to ProjectVersion entities. -func (pu *ProjectUpdate) RemoveVersions(p ...*ProjectVersion) *ProjectUpdate { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *ProjectUpdate) RemoveVersions(v ...*ProjectVersion) *ProjectUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pu.RemoveVersionIDs(ids...) + return _u.RemoveVersionIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (pu *ProjectUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) +func (_u *ProjectUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (pu *ProjectUpdate) SaveX(ctx context.Context) int { - affected, err := pu.Save(ctx) +func (_u *ProjectUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -178,60 +178,60 @@ func (pu *ProjectUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (pu *ProjectUpdate) Exec(ctx context.Context) error { - _, err := pu.Save(ctx) +func (_u *ProjectUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pu *ProjectUpdate) ExecX(ctx context.Context) { - if err := pu.Exec(ctx); err != nil { +func (_u *ProjectUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (pu *ProjectUpdate) check() error { - if pu.mutation.OrganizationCleared() && len(pu.mutation.OrganizationIDs()) > 0 { +func (_u *ProjectUpdate) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Project.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (pu *ProjectUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectUpdate { - pu.modifiers = append(pu.modifiers, modifiers...) - return pu +func (_u *ProjectUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := pu.check(); err != nil { - return n, err +func (_u *ProjectUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeUUID)) - if ps := pu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := pu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(project.FieldDescription, field.TypeString, value) } - if pu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(project.FieldDescription, field.TypeString) } - if value, ok := pu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(project.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := pu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(project.FieldDeletedAt, field.TypeTime, value) } - if pu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(project.FieldDeletedAt, field.TypeTime) } - if pu.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -244,7 +244,7 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pu.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !pu.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -260,7 +260,7 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pu.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -276,7 +276,7 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if pu.mutation.VersionsCleared() { + if _u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -289,7 +289,7 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pu.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !pu.mutation.VersionsCleared() { + if nodes := _u.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !_u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -305,7 +305,7 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pu.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -321,8 +321,8 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(pu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{project.Label} } else if sqlgraph.IsConstraintError(err) { @@ -330,8 +330,8 @@ func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - pu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // ProjectUpdateOne is the builder for updating a single Project entity. @@ -344,157 +344,157 @@ type ProjectUpdateOne struct { } // SetDescription sets the "description" field. -func (puo *ProjectUpdateOne) SetDescription(s string) *ProjectUpdateOne { - puo.mutation.SetDescription(s) - return puo +func (_u *ProjectUpdateOne) SetDescription(v string) *ProjectUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (puo *ProjectUpdateOne) SetNillableDescription(s *string) *ProjectUpdateOne { - if s != nil { - puo.SetDescription(*s) +func (_u *ProjectUpdateOne) SetNillableDescription(v *string) *ProjectUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return puo + return _u } // ClearDescription clears the value of the "description" field. -func (puo *ProjectUpdateOne) ClearDescription() *ProjectUpdateOne { - puo.mutation.ClearDescription() - return puo +func (_u *ProjectUpdateOne) ClearDescription() *ProjectUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetUpdatedAt sets the "updated_at" field. -func (puo *ProjectUpdateOne) SetUpdatedAt(t time.Time) *ProjectUpdateOne { - puo.mutation.SetUpdatedAt(t) - return puo +func (_u *ProjectUpdateOne) SetUpdatedAt(v time.Time) *ProjectUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (puo *ProjectUpdateOne) SetNillableUpdatedAt(t *time.Time) *ProjectUpdateOne { - if t != nil { - puo.SetUpdatedAt(*t) +func (_u *ProjectUpdateOne) SetNillableUpdatedAt(v *time.Time) *ProjectUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return puo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (puo *ProjectUpdateOne) SetDeletedAt(t time.Time) *ProjectUpdateOne { - puo.mutation.SetDeletedAt(t) - return puo +func (_u *ProjectUpdateOne) SetDeletedAt(v time.Time) *ProjectUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (puo *ProjectUpdateOne) SetNillableDeletedAt(t *time.Time) *ProjectUpdateOne { - if t != nil { - puo.SetDeletedAt(*t) +func (_u *ProjectUpdateOne) SetNillableDeletedAt(v *time.Time) *ProjectUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return puo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (puo *ProjectUpdateOne) ClearDeletedAt() *ProjectUpdateOne { - puo.mutation.ClearDeletedAt() - return puo +func (_u *ProjectUpdateOne) ClearDeletedAt() *ProjectUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (puo *ProjectUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *ProjectUpdateOne { - puo.mutation.AddWorkflowIDs(ids...) - return puo +func (_u *ProjectUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *ProjectUpdateOne { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (puo *ProjectUpdateOne) AddWorkflows(w ...*Workflow) *ProjectUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectUpdateOne) AddWorkflows(v ...*Workflow) *ProjectUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return puo.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // AddVersionIDs adds the "versions" edge to the ProjectVersion entity by IDs. -func (puo *ProjectUpdateOne) AddVersionIDs(ids ...uuid.UUID) *ProjectUpdateOne { - puo.mutation.AddVersionIDs(ids...) - return puo +func (_u *ProjectUpdateOne) AddVersionIDs(ids ...uuid.UUID) *ProjectUpdateOne { + _u.mutation.AddVersionIDs(ids...) + return _u } // AddVersions adds the "versions" edges to the ProjectVersion entity. -func (puo *ProjectUpdateOne) AddVersions(p ...*ProjectVersion) *ProjectUpdateOne { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *ProjectUpdateOne) AddVersions(v ...*ProjectVersion) *ProjectUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return puo.AddVersionIDs(ids...) + return _u.AddVersionIDs(ids...) } // Mutation returns the ProjectMutation object of the builder. -func (puo *ProjectUpdateOne) Mutation() *ProjectMutation { - return puo.mutation +func (_u *ProjectUpdateOne) Mutation() *ProjectMutation { + return _u.mutation } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (puo *ProjectUpdateOne) ClearWorkflows() *ProjectUpdateOne { - puo.mutation.ClearWorkflows() - return puo +func (_u *ProjectUpdateOne) ClearWorkflows() *ProjectUpdateOne { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (puo *ProjectUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *ProjectUpdateOne { - puo.mutation.RemoveWorkflowIDs(ids...) - return puo +func (_u *ProjectUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *ProjectUpdateOne { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (puo *ProjectUpdateOne) RemoveWorkflows(w ...*Workflow) *ProjectUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectUpdateOne) RemoveWorkflows(v ...*Workflow) *ProjectUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return puo.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // ClearVersions clears all "versions" edges to the ProjectVersion entity. -func (puo *ProjectUpdateOne) ClearVersions() *ProjectUpdateOne { - puo.mutation.ClearVersions() - return puo +func (_u *ProjectUpdateOne) ClearVersions() *ProjectUpdateOne { + _u.mutation.ClearVersions() + return _u } // RemoveVersionIDs removes the "versions" edge to ProjectVersion entities by IDs. -func (puo *ProjectUpdateOne) RemoveVersionIDs(ids ...uuid.UUID) *ProjectUpdateOne { - puo.mutation.RemoveVersionIDs(ids...) - return puo +func (_u *ProjectUpdateOne) RemoveVersionIDs(ids ...uuid.UUID) *ProjectUpdateOne { + _u.mutation.RemoveVersionIDs(ids...) + return _u } // RemoveVersions removes "versions" edges to ProjectVersion entities. -func (puo *ProjectUpdateOne) RemoveVersions(p ...*ProjectVersion) *ProjectUpdateOne { - ids := make([]uuid.UUID, len(p)) - for i := range p { - ids[i] = p[i].ID +func (_u *ProjectUpdateOne) RemoveVersions(v ...*ProjectVersion) *ProjectUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return puo.RemoveVersionIDs(ids...) + return _u.RemoveVersionIDs(ids...) } // Where appends a list predicates to the ProjectUpdate builder. -func (puo *ProjectUpdateOne) Where(ps ...predicate.Project) *ProjectUpdateOne { - puo.mutation.Where(ps...) - return puo +func (_u *ProjectUpdateOne) Where(ps ...predicate.Project) *ProjectUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (puo *ProjectUpdateOne) Select(field string, fields ...string) *ProjectUpdateOne { - puo.fields = append([]string{field}, fields...) - return puo +func (_u *ProjectUpdateOne) Select(field string, fields ...string) *ProjectUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Project entity. -func (puo *ProjectUpdateOne) Save(ctx context.Context) (*Project, error) { - return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) +func (_u *ProjectUpdateOne) Save(ctx context.Context) (*Project, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (puo *ProjectUpdateOne) SaveX(ctx context.Context) *Project { - node, err := puo.Save(ctx) +func (_u *ProjectUpdateOne) SaveX(ctx context.Context) *Project { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -502,43 +502,43 @@ func (puo *ProjectUpdateOne) SaveX(ctx context.Context) *Project { } // Exec executes the query on the entity. -func (puo *ProjectUpdateOne) Exec(ctx context.Context) error { - _, err := puo.Save(ctx) +func (_u *ProjectUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (puo *ProjectUpdateOne) ExecX(ctx context.Context) { - if err := puo.Exec(ctx); err != nil { +func (_u *ProjectUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (puo *ProjectUpdateOne) check() error { - if puo.mutation.OrganizationCleared() && len(puo.mutation.OrganizationIDs()) > 0 { +func (_u *ProjectUpdateOne) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Project.organization"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (puo *ProjectUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectUpdateOne { - puo.modifiers = append(puo.modifiers, modifiers...) - return puo +func (_u *ProjectUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err error) { - if err := puo.check(); err != nil { +func (_u *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeUUID)) - id, ok := puo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Project.id" for update`)} } _spec.Node.ID.Value = id - if fields := puo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, project.FieldID) for _, f := range fields { @@ -550,29 +550,29 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } } } - if ps := puo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := puo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(project.FieldDescription, field.TypeString, value) } - if puo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(project.FieldDescription, field.TypeString) } - if value, ok := puo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(project.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := puo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(project.FieldDeletedAt, field.TypeTime, value) } - if puo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(project.FieldDeletedAt, field.TypeTime) } - if puo.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -585,7 +585,7 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := puo.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !puo.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -601,7 +601,7 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := puo.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -617,7 +617,7 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if puo.mutation.VersionsCleared() { + if _u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -630,7 +630,7 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := puo.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !puo.mutation.VersionsCleared() { + if nodes := _u.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !_u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -646,7 +646,7 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := puo.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -662,11 +662,11 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(puo.modifiers...) - _node = &Project{config: puo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Project{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{project.Label} } else if sqlgraph.IsConstraintError(err) { @@ -674,6 +674,6 @@ func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err e } return nil, err } - puo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/projectversion.go b/app/controlplane/pkg/data/ent/projectversion.go index fb1d007a6..79a08be3c 100644 --- a/app/controlplane/pkg/data/ent/projectversion.go +++ b/app/controlplane/pkg/data/ent/projectversion.go @@ -100,7 +100,7 @@ func (*ProjectVersion) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the ProjectVersion fields. -func (pv *ProjectVersion) assignValues(columns []string, values []any) error { +func (_m *ProjectVersion) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -110,70 +110,70 @@ func (pv *ProjectVersion) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - pv.ID = *value + _m.ID = *value } case projectversion.FieldVersion: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field version", values[i]) } else if value.Valid { - pv.Version = value.String + _m.Version = value.String } case projectversion.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - pv.CreatedAt = value.Time + _m.CreatedAt = value.Time } case projectversion.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - pv.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case projectversion.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - pv.DeletedAt = value.Time + _m.DeletedAt = value.Time } case projectversion.FieldProjectID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field project_id", values[i]) } else if value != nil { - pv.ProjectID = *value + _m.ProjectID = *value } case projectversion.FieldPrerelease: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field prerelease", values[i]) } else if value.Valid { - pv.Prerelease = value.Bool + _m.Prerelease = value.Bool } case projectversion.FieldWorkflowRunCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field workflow_run_count", values[i]) } else if value.Valid { - pv.WorkflowRunCount = int(value.Int64) + _m.WorkflowRunCount = int(value.Int64) } case projectversion.FieldReleasedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field released_at", values[i]) } else if value.Valid { - pv.ReleasedAt = value.Time + _m.ReleasedAt = value.Time } case projectversion.FieldLastRunAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_run_at", values[i]) } else if value.Valid { - pv.LastRunAt = value.Time + _m.LastRunAt = value.Time } case projectversion.FieldLatest: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field latest", values[i]) } else if value.Valid { - pv.Latest = value.Bool + _m.Latest = value.Bool } default: - pv.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -181,72 +181,72 @@ func (pv *ProjectVersion) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the ProjectVersion. // This includes values selected through modifiers, order, etc. -func (pv *ProjectVersion) Value(name string) (ent.Value, error) { - return pv.selectValues.Get(name) +func (_m *ProjectVersion) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryProject queries the "project" edge of the ProjectVersion entity. -func (pv *ProjectVersion) QueryProject() *ProjectQuery { - return NewProjectVersionClient(pv.config).QueryProject(pv) +func (_m *ProjectVersion) QueryProject() *ProjectQuery { + return NewProjectVersionClient(_m.config).QueryProject(_m) } // QueryRuns queries the "runs" edge of the ProjectVersion entity. -func (pv *ProjectVersion) QueryRuns() *WorkflowRunQuery { - return NewProjectVersionClient(pv.config).QueryRuns(pv) +func (_m *ProjectVersion) QueryRuns() *WorkflowRunQuery { + return NewProjectVersionClient(_m.config).QueryRuns(_m) } // Update returns a builder for updating this ProjectVersion. // Note that you need to call ProjectVersion.Unwrap() before calling this method if this ProjectVersion // was returned from a transaction, and the transaction was committed or rolled back. -func (pv *ProjectVersion) Update() *ProjectVersionUpdateOne { - return NewProjectVersionClient(pv.config).UpdateOne(pv) +func (_m *ProjectVersion) Update() *ProjectVersionUpdateOne { + return NewProjectVersionClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the ProjectVersion entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (pv *ProjectVersion) Unwrap() *ProjectVersion { - _tx, ok := pv.config.driver.(*txDriver) +func (_m *ProjectVersion) Unwrap() *ProjectVersion { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: ProjectVersion is not a transactional entity") } - pv.config.driver = _tx.drv - return pv + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (pv *ProjectVersion) String() string { +func (_m *ProjectVersion) String() string { var builder strings.Builder builder.WriteString("ProjectVersion(") - builder.WriteString(fmt.Sprintf("id=%v, ", pv.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("version=") - builder.WriteString(pv.Version) + builder.WriteString(_m.Version) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(pv.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(pv.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(pv.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("project_id=") - builder.WriteString(fmt.Sprintf("%v", pv.ProjectID)) + builder.WriteString(fmt.Sprintf("%v", _m.ProjectID)) builder.WriteString(", ") builder.WriteString("prerelease=") - builder.WriteString(fmt.Sprintf("%v", pv.Prerelease)) + builder.WriteString(fmt.Sprintf("%v", _m.Prerelease)) builder.WriteString(", ") builder.WriteString("workflow_run_count=") - builder.WriteString(fmt.Sprintf("%v", pv.WorkflowRunCount)) + builder.WriteString(fmt.Sprintf("%v", _m.WorkflowRunCount)) builder.WriteString(", ") builder.WriteString("released_at=") - builder.WriteString(pv.ReleasedAt.Format(time.ANSIC)) + builder.WriteString(_m.ReleasedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("last_run_at=") - builder.WriteString(pv.LastRunAt.Format(time.ANSIC)) + builder.WriteString(_m.LastRunAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("latest=") - builder.WriteString(fmt.Sprintf("%v", pv.Latest)) + builder.WriteString(fmt.Sprintf("%v", _m.Latest)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/projectversion_create.go b/app/controlplane/pkg/data/ent/projectversion_create.go index 6ac416ffc..0a9193a3d 100644 --- a/app/controlplane/pkg/data/ent/projectversion_create.go +++ b/app/controlplane/pkg/data/ent/projectversion_create.go @@ -27,185 +27,185 @@ type ProjectVersionCreate struct { } // SetVersion sets the "version" field. -func (pvc *ProjectVersionCreate) SetVersion(s string) *ProjectVersionCreate { - pvc.mutation.SetVersion(s) - return pvc +func (_c *ProjectVersionCreate) SetVersion(v string) *ProjectVersionCreate { + _c.mutation.SetVersion(v) + return _c } // SetNillableVersion sets the "version" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableVersion(s *string) *ProjectVersionCreate { - if s != nil { - pvc.SetVersion(*s) +func (_c *ProjectVersionCreate) SetNillableVersion(v *string) *ProjectVersionCreate { + if v != nil { + _c.SetVersion(*v) } - return pvc + return _c } // SetCreatedAt sets the "created_at" field. -func (pvc *ProjectVersionCreate) SetCreatedAt(t time.Time) *ProjectVersionCreate { - pvc.mutation.SetCreatedAt(t) - return pvc +func (_c *ProjectVersionCreate) SetCreatedAt(v time.Time) *ProjectVersionCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableCreatedAt(t *time.Time) *ProjectVersionCreate { - if t != nil { - pvc.SetCreatedAt(*t) +func (_c *ProjectVersionCreate) SetNillableCreatedAt(v *time.Time) *ProjectVersionCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return pvc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (pvc *ProjectVersionCreate) SetUpdatedAt(t time.Time) *ProjectVersionCreate { - pvc.mutation.SetUpdatedAt(t) - return pvc +func (_c *ProjectVersionCreate) SetUpdatedAt(v time.Time) *ProjectVersionCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableUpdatedAt(t *time.Time) *ProjectVersionCreate { - if t != nil { - pvc.SetUpdatedAt(*t) +func (_c *ProjectVersionCreate) SetNillableUpdatedAt(v *time.Time) *ProjectVersionCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return pvc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (pvc *ProjectVersionCreate) SetDeletedAt(t time.Time) *ProjectVersionCreate { - pvc.mutation.SetDeletedAt(t) - return pvc +func (_c *ProjectVersionCreate) SetDeletedAt(v time.Time) *ProjectVersionCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableDeletedAt(t *time.Time) *ProjectVersionCreate { - if t != nil { - pvc.SetDeletedAt(*t) +func (_c *ProjectVersionCreate) SetNillableDeletedAt(v *time.Time) *ProjectVersionCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return pvc + return _c } // SetProjectID sets the "project_id" field. -func (pvc *ProjectVersionCreate) SetProjectID(u uuid.UUID) *ProjectVersionCreate { - pvc.mutation.SetProjectID(u) - return pvc +func (_c *ProjectVersionCreate) SetProjectID(v uuid.UUID) *ProjectVersionCreate { + _c.mutation.SetProjectID(v) + return _c } // SetPrerelease sets the "prerelease" field. -func (pvc *ProjectVersionCreate) SetPrerelease(b bool) *ProjectVersionCreate { - pvc.mutation.SetPrerelease(b) - return pvc +func (_c *ProjectVersionCreate) SetPrerelease(v bool) *ProjectVersionCreate { + _c.mutation.SetPrerelease(v) + return _c } // SetNillablePrerelease sets the "prerelease" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillablePrerelease(b *bool) *ProjectVersionCreate { - if b != nil { - pvc.SetPrerelease(*b) +func (_c *ProjectVersionCreate) SetNillablePrerelease(v *bool) *ProjectVersionCreate { + if v != nil { + _c.SetPrerelease(*v) } - return pvc + return _c } // SetWorkflowRunCount sets the "workflow_run_count" field. -func (pvc *ProjectVersionCreate) SetWorkflowRunCount(i int) *ProjectVersionCreate { - pvc.mutation.SetWorkflowRunCount(i) - return pvc +func (_c *ProjectVersionCreate) SetWorkflowRunCount(v int) *ProjectVersionCreate { + _c.mutation.SetWorkflowRunCount(v) + return _c } // SetNillableWorkflowRunCount sets the "workflow_run_count" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableWorkflowRunCount(i *int) *ProjectVersionCreate { - if i != nil { - pvc.SetWorkflowRunCount(*i) +func (_c *ProjectVersionCreate) SetNillableWorkflowRunCount(v *int) *ProjectVersionCreate { + if v != nil { + _c.SetWorkflowRunCount(*v) } - return pvc + return _c } // SetReleasedAt sets the "released_at" field. -func (pvc *ProjectVersionCreate) SetReleasedAt(t time.Time) *ProjectVersionCreate { - pvc.mutation.SetReleasedAt(t) - return pvc +func (_c *ProjectVersionCreate) SetReleasedAt(v time.Time) *ProjectVersionCreate { + _c.mutation.SetReleasedAt(v) + return _c } // SetNillableReleasedAt sets the "released_at" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableReleasedAt(t *time.Time) *ProjectVersionCreate { - if t != nil { - pvc.SetReleasedAt(*t) +func (_c *ProjectVersionCreate) SetNillableReleasedAt(v *time.Time) *ProjectVersionCreate { + if v != nil { + _c.SetReleasedAt(*v) } - return pvc + return _c } // SetLastRunAt sets the "last_run_at" field. -func (pvc *ProjectVersionCreate) SetLastRunAt(t time.Time) *ProjectVersionCreate { - pvc.mutation.SetLastRunAt(t) - return pvc +func (_c *ProjectVersionCreate) SetLastRunAt(v time.Time) *ProjectVersionCreate { + _c.mutation.SetLastRunAt(v) + return _c } // SetNillableLastRunAt sets the "last_run_at" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableLastRunAt(t *time.Time) *ProjectVersionCreate { - if t != nil { - pvc.SetLastRunAt(*t) +func (_c *ProjectVersionCreate) SetNillableLastRunAt(v *time.Time) *ProjectVersionCreate { + if v != nil { + _c.SetLastRunAt(*v) } - return pvc + return _c } // SetLatest sets the "latest" field. -func (pvc *ProjectVersionCreate) SetLatest(b bool) *ProjectVersionCreate { - pvc.mutation.SetLatest(b) - return pvc +func (_c *ProjectVersionCreate) SetLatest(v bool) *ProjectVersionCreate { + _c.mutation.SetLatest(v) + return _c } // SetNillableLatest sets the "latest" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableLatest(b *bool) *ProjectVersionCreate { - if b != nil { - pvc.SetLatest(*b) +func (_c *ProjectVersionCreate) SetNillableLatest(v *bool) *ProjectVersionCreate { + if v != nil { + _c.SetLatest(*v) } - return pvc + return _c } // SetID sets the "id" field. -func (pvc *ProjectVersionCreate) SetID(u uuid.UUID) *ProjectVersionCreate { - pvc.mutation.SetID(u) - return pvc +func (_c *ProjectVersionCreate) SetID(v uuid.UUID) *ProjectVersionCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (pvc *ProjectVersionCreate) SetNillableID(u *uuid.UUID) *ProjectVersionCreate { - if u != nil { - pvc.SetID(*u) +func (_c *ProjectVersionCreate) SetNillableID(v *uuid.UUID) *ProjectVersionCreate { + if v != nil { + _c.SetID(*v) } - return pvc + return _c } // SetProject sets the "project" edge to the Project entity. -func (pvc *ProjectVersionCreate) SetProject(p *Project) *ProjectVersionCreate { - return pvc.SetProjectID(p.ID) +func (_c *ProjectVersionCreate) SetProject(v *Project) *ProjectVersionCreate { + return _c.SetProjectID(v.ID) } // AddRunIDs adds the "runs" edge to the WorkflowRun entity by IDs. -func (pvc *ProjectVersionCreate) AddRunIDs(ids ...uuid.UUID) *ProjectVersionCreate { - pvc.mutation.AddRunIDs(ids...) - return pvc +func (_c *ProjectVersionCreate) AddRunIDs(ids ...uuid.UUID) *ProjectVersionCreate { + _c.mutation.AddRunIDs(ids...) + return _c } // AddRuns adds the "runs" edges to the WorkflowRun entity. -func (pvc *ProjectVersionCreate) AddRuns(w ...*WorkflowRun) *ProjectVersionCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *ProjectVersionCreate) AddRuns(v ...*WorkflowRun) *ProjectVersionCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pvc.AddRunIDs(ids...) + return _c.AddRunIDs(ids...) } // Mutation returns the ProjectVersionMutation object of the builder. -func (pvc *ProjectVersionCreate) Mutation() *ProjectVersionMutation { - return pvc.mutation +func (_c *ProjectVersionCreate) Mutation() *ProjectVersionMutation { + return _c.mutation } // Save creates the ProjectVersion in the database. -func (pvc *ProjectVersionCreate) Save(ctx context.Context) (*ProjectVersion, error) { - pvc.defaults() - return withHooks(ctx, pvc.sqlSave, pvc.mutation, pvc.hooks) +func (_c *ProjectVersionCreate) Save(ctx context.Context) (*ProjectVersion, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (pvc *ProjectVersionCreate) SaveX(ctx context.Context) *ProjectVersion { - v, err := pvc.Save(ctx) +func (_c *ProjectVersionCreate) SaveX(ctx context.Context) *ProjectVersion { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -213,90 +213,90 @@ func (pvc *ProjectVersionCreate) SaveX(ctx context.Context) *ProjectVersion { } // Exec executes the query. -func (pvc *ProjectVersionCreate) Exec(ctx context.Context) error { - _, err := pvc.Save(ctx) +func (_c *ProjectVersionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pvc *ProjectVersionCreate) ExecX(ctx context.Context) { - if err := pvc.Exec(ctx); err != nil { +func (_c *ProjectVersionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (pvc *ProjectVersionCreate) defaults() { - if _, ok := pvc.mutation.Version(); !ok { +func (_c *ProjectVersionCreate) defaults() { + if _, ok := _c.mutation.Version(); !ok { v := projectversion.DefaultVersion - pvc.mutation.SetVersion(v) + _c.mutation.SetVersion(v) } - if _, ok := pvc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := projectversion.DefaultCreatedAt() - pvc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := pvc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := projectversion.DefaultUpdatedAt() - pvc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := pvc.mutation.Prerelease(); !ok { + if _, ok := _c.mutation.Prerelease(); !ok { v := projectversion.DefaultPrerelease - pvc.mutation.SetPrerelease(v) + _c.mutation.SetPrerelease(v) } - if _, ok := pvc.mutation.WorkflowRunCount(); !ok { + if _, ok := _c.mutation.WorkflowRunCount(); !ok { v := projectversion.DefaultWorkflowRunCount - pvc.mutation.SetWorkflowRunCount(v) + _c.mutation.SetWorkflowRunCount(v) } - if _, ok := pvc.mutation.Latest(); !ok { + if _, ok := _c.mutation.Latest(); !ok { v := projectversion.DefaultLatest - pvc.mutation.SetLatest(v) + _c.mutation.SetLatest(v) } - if _, ok := pvc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := projectversion.DefaultID() - pvc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (pvc *ProjectVersionCreate) check() error { - if _, ok := pvc.mutation.Version(); !ok { +func (_c *ProjectVersionCreate) check() error { + if _, ok := _c.mutation.Version(); !ok { return &ValidationError{Name: "version", err: errors.New(`ent: missing required field "ProjectVersion.version"`)} } - if v, ok := pvc.mutation.Version(); ok { + if v, ok := _c.mutation.Version(); ok { if err := projectversion.VersionValidator(v); err != nil { return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ProjectVersion.version": %w`, err)} } } - if _, ok := pvc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ProjectVersion.created_at"`)} } - if _, ok := pvc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ProjectVersion.updated_at"`)} } - if _, ok := pvc.mutation.ProjectID(); !ok { + if _, ok := _c.mutation.ProjectID(); !ok { return &ValidationError{Name: "project_id", err: errors.New(`ent: missing required field "ProjectVersion.project_id"`)} } - if _, ok := pvc.mutation.Prerelease(); !ok { + if _, ok := _c.mutation.Prerelease(); !ok { return &ValidationError{Name: "prerelease", err: errors.New(`ent: missing required field "ProjectVersion.prerelease"`)} } - if _, ok := pvc.mutation.WorkflowRunCount(); !ok { + if _, ok := _c.mutation.WorkflowRunCount(); !ok { return &ValidationError{Name: "workflow_run_count", err: errors.New(`ent: missing required field "ProjectVersion.workflow_run_count"`)} } - if _, ok := pvc.mutation.Latest(); !ok { + if _, ok := _c.mutation.Latest(); !ok { return &ValidationError{Name: "latest", err: errors.New(`ent: missing required field "ProjectVersion.latest"`)} } - if len(pvc.mutation.ProjectIDs()) == 0 { + if len(_c.mutation.ProjectIDs()) == 0 { return &ValidationError{Name: "project", err: errors.New(`ent: missing required edge "ProjectVersion.project"`)} } return nil } -func (pvc *ProjectVersionCreate) sqlSave(ctx context.Context) (*ProjectVersion, error) { - if err := pvc.check(); err != nil { +func (_c *ProjectVersionCreate) sqlSave(ctx context.Context) (*ProjectVersion, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := pvc.createSpec() - if err := sqlgraph.CreateNode(ctx, pvc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -309,58 +309,58 @@ func (pvc *ProjectVersionCreate) sqlSave(ctx context.Context) (*ProjectVersion, return nil, err } } - pvc.mutation.id = &_node.ID - pvc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (pvc *ProjectVersionCreate) createSpec() (*ProjectVersion, *sqlgraph.CreateSpec) { +func (_c *ProjectVersionCreate) createSpec() (*ProjectVersion, *sqlgraph.CreateSpec) { var ( - _node = &ProjectVersion{config: pvc.config} + _node = &ProjectVersion{config: _c.config} _spec = sqlgraph.NewCreateSpec(projectversion.Table, sqlgraph.NewFieldSpec(projectversion.FieldID, field.TypeUUID)) ) - _spec.OnConflict = pvc.conflict - if id, ok := pvc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := pvc.mutation.Version(); ok { + if value, ok := _c.mutation.Version(); ok { _spec.SetField(projectversion.FieldVersion, field.TypeString, value) _node.Version = value } - if value, ok := pvc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(projectversion.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := pvc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(projectversion.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := pvc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(projectversion.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := pvc.mutation.Prerelease(); ok { + if value, ok := _c.mutation.Prerelease(); ok { _spec.SetField(projectversion.FieldPrerelease, field.TypeBool, value) _node.Prerelease = value } - if value, ok := pvc.mutation.WorkflowRunCount(); ok { + if value, ok := _c.mutation.WorkflowRunCount(); ok { _spec.SetField(projectversion.FieldWorkflowRunCount, field.TypeInt, value) _node.WorkflowRunCount = value } - if value, ok := pvc.mutation.ReleasedAt(); ok { + if value, ok := _c.mutation.ReleasedAt(); ok { _spec.SetField(projectversion.FieldReleasedAt, field.TypeTime, value) _node.ReleasedAt = value } - if value, ok := pvc.mutation.LastRunAt(); ok { + if value, ok := _c.mutation.LastRunAt(); ok { _spec.SetField(projectversion.FieldLastRunAt, field.TypeTime, value) _node.LastRunAt = value } - if value, ok := pvc.mutation.Latest(); ok { + if value, ok := _c.mutation.Latest(); ok { _spec.SetField(projectversion.FieldLatest, field.TypeBool, value) _node.Latest = value } - if nodes := pvc.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -377,7 +377,7 @@ func (pvc *ProjectVersionCreate) createSpec() (*ProjectVersion, *sqlgraph.Create _node.ProjectID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := pvc.mutation.RunsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.RunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -412,10 +412,10 @@ func (pvc *ProjectVersionCreate) createSpec() (*ProjectVersion, *sqlgraph.Create // SetVersion(v+v). // }). // Exec(ctx) -func (pvc *ProjectVersionCreate) OnConflict(opts ...sql.ConflictOption) *ProjectVersionUpsertOne { - pvc.conflict = opts +func (_c *ProjectVersionCreate) OnConflict(opts ...sql.ConflictOption) *ProjectVersionUpsertOne { + _c.conflict = opts return &ProjectVersionUpsertOne{ - create: pvc, + create: _c, } } @@ -425,10 +425,10 @@ func (pvc *ProjectVersionCreate) OnConflict(opts ...sql.ConflictOption) *Project // client.ProjectVersion.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (pvc *ProjectVersionCreate) OnConflictColumns(columns ...string) *ProjectVersionUpsertOne { - pvc.conflict = append(pvc.conflict, sql.ConflictColumns(columns...)) +func (_c *ProjectVersionCreate) OnConflictColumns(columns ...string) *ProjectVersionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ProjectVersionUpsertOne{ - create: pvc, + create: _c, } } @@ -829,16 +829,16 @@ type ProjectVersionCreateBulk struct { } // Save creates the ProjectVersion entities in the database. -func (pvcb *ProjectVersionCreateBulk) Save(ctx context.Context) ([]*ProjectVersion, error) { - if pvcb.err != nil { - return nil, pvcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(pvcb.builders)) - nodes := make([]*ProjectVersion, len(pvcb.builders)) - mutators := make([]Mutator, len(pvcb.builders)) - for i := range pvcb.builders { +func (_c *ProjectVersionCreateBulk) Save(ctx context.Context) ([]*ProjectVersion, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*ProjectVersion, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := pvcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*ProjectVersionMutation) @@ -852,12 +852,12 @@ func (pvcb *ProjectVersionCreateBulk) Save(ctx context.Context) ([]*ProjectVersi var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, pvcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = pvcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, pvcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -877,7 +877,7 @@ func (pvcb *ProjectVersionCreateBulk) Save(ctx context.Context) ([]*ProjectVersi }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, pvcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -885,8 +885,8 @@ func (pvcb *ProjectVersionCreateBulk) Save(ctx context.Context) ([]*ProjectVersi } // SaveX is like Save, but panics if an error occurs. -func (pvcb *ProjectVersionCreateBulk) SaveX(ctx context.Context) []*ProjectVersion { - v, err := pvcb.Save(ctx) +func (_c *ProjectVersionCreateBulk) SaveX(ctx context.Context) []*ProjectVersion { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -894,14 +894,14 @@ func (pvcb *ProjectVersionCreateBulk) SaveX(ctx context.Context) []*ProjectVersi } // Exec executes the query. -func (pvcb *ProjectVersionCreateBulk) Exec(ctx context.Context) error { - _, err := pvcb.Save(ctx) +func (_c *ProjectVersionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pvcb *ProjectVersionCreateBulk) ExecX(ctx context.Context) { - if err := pvcb.Exec(ctx); err != nil { +func (_c *ProjectVersionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -921,10 +921,10 @@ func (pvcb *ProjectVersionCreateBulk) ExecX(ctx context.Context) { // SetVersion(v+v). // }). // Exec(ctx) -func (pvcb *ProjectVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProjectVersionUpsertBulk { - pvcb.conflict = opts +func (_c *ProjectVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProjectVersionUpsertBulk { + _c.conflict = opts return &ProjectVersionUpsertBulk{ - create: pvcb, + create: _c, } } @@ -934,10 +934,10 @@ func (pvcb *ProjectVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *Pr // client.ProjectVersion.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (pvcb *ProjectVersionCreateBulk) OnConflictColumns(columns ...string) *ProjectVersionUpsertBulk { - pvcb.conflict = append(pvcb.conflict, sql.ConflictColumns(columns...)) +func (_c *ProjectVersionCreateBulk) OnConflictColumns(columns ...string) *ProjectVersionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ProjectVersionUpsertBulk{ - create: pvcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/projectversion_delete.go b/app/controlplane/pkg/data/ent/projectversion_delete.go index dc902eee7..c3112466e 100644 --- a/app/controlplane/pkg/data/ent/projectversion_delete.go +++ b/app/controlplane/pkg/data/ent/projectversion_delete.go @@ -20,56 +20,56 @@ type ProjectVersionDelete struct { } // Where appends a list predicates to the ProjectVersionDelete builder. -func (pvd *ProjectVersionDelete) Where(ps ...predicate.ProjectVersion) *ProjectVersionDelete { - pvd.mutation.Where(ps...) - return pvd +func (_d *ProjectVersionDelete) Where(ps ...predicate.ProjectVersion) *ProjectVersionDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (pvd *ProjectVersionDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, pvd.sqlExec, pvd.mutation, pvd.hooks) +func (_d *ProjectVersionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (pvd *ProjectVersionDelete) ExecX(ctx context.Context) int { - n, err := pvd.Exec(ctx) +func (_d *ProjectVersionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (pvd *ProjectVersionDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *ProjectVersionDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(projectversion.Table, sqlgraph.NewFieldSpec(projectversion.FieldID, field.TypeUUID)) - if ps := pvd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, pvd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - pvd.mutation.done = true + _d.mutation.done = true return affected, err } // ProjectVersionDeleteOne is the builder for deleting a single ProjectVersion entity. type ProjectVersionDeleteOne struct { - pvd *ProjectVersionDelete + _d *ProjectVersionDelete } // Where appends a list predicates to the ProjectVersionDelete builder. -func (pvdo *ProjectVersionDeleteOne) Where(ps ...predicate.ProjectVersion) *ProjectVersionDeleteOne { - pvdo.pvd.mutation.Where(ps...) - return pvdo +func (_d *ProjectVersionDeleteOne) Where(ps ...predicate.ProjectVersion) *ProjectVersionDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (pvdo *ProjectVersionDeleteOne) Exec(ctx context.Context) error { - n, err := pvdo.pvd.Exec(ctx) +func (_d *ProjectVersionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (pvdo *ProjectVersionDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (pvdo *ProjectVersionDeleteOne) ExecX(ctx context.Context) { - if err := pvdo.Exec(ctx); err != nil { +func (_d *ProjectVersionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/projectversion_query.go b/app/controlplane/pkg/data/ent/projectversion_query.go index 18cfa5e7a..4ae04b451 100644 --- a/app/controlplane/pkg/data/ent/projectversion_query.go +++ b/app/controlplane/pkg/data/ent/projectversion_query.go @@ -36,44 +36,44 @@ type ProjectVersionQuery struct { } // Where adds a new predicate for the ProjectVersionQuery builder. -func (pvq *ProjectVersionQuery) Where(ps ...predicate.ProjectVersion) *ProjectVersionQuery { - pvq.predicates = append(pvq.predicates, ps...) - return pvq +func (_q *ProjectVersionQuery) Where(ps ...predicate.ProjectVersion) *ProjectVersionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (pvq *ProjectVersionQuery) Limit(limit int) *ProjectVersionQuery { - pvq.ctx.Limit = &limit - return pvq +func (_q *ProjectVersionQuery) Limit(limit int) *ProjectVersionQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (pvq *ProjectVersionQuery) Offset(offset int) *ProjectVersionQuery { - pvq.ctx.Offset = &offset - return pvq +func (_q *ProjectVersionQuery) Offset(offset int) *ProjectVersionQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (pvq *ProjectVersionQuery) Unique(unique bool) *ProjectVersionQuery { - pvq.ctx.Unique = &unique - return pvq +func (_q *ProjectVersionQuery) Unique(unique bool) *ProjectVersionQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (pvq *ProjectVersionQuery) Order(o ...projectversion.OrderOption) *ProjectVersionQuery { - pvq.order = append(pvq.order, o...) - return pvq +func (_q *ProjectVersionQuery) Order(o ...projectversion.OrderOption) *ProjectVersionQuery { + _q.order = append(_q.order, o...) + return _q } // QueryProject chains the current query on the "project" edge. -func (pvq *ProjectVersionQuery) QueryProject() *ProjectQuery { - query := (&ProjectClient{config: pvq.config}).Query() +func (_q *ProjectVersionQuery) QueryProject() *ProjectQuery { + query := (&ProjectClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pvq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := pvq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -82,20 +82,20 @@ func (pvq *ProjectVersionQuery) QueryProject() *ProjectQuery { sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, projectversion.ProjectTable, projectversion.ProjectColumn), ) - fromU = sqlgraph.SetNeighbors(pvq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryRuns chains the current query on the "runs" edge. -func (pvq *ProjectVersionQuery) QueryRuns() *WorkflowRunQuery { - query := (&WorkflowRunClient{config: pvq.config}).Query() +func (_q *ProjectVersionQuery) QueryRuns() *WorkflowRunQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pvq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := pvq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -104,7 +104,7 @@ func (pvq *ProjectVersionQuery) QueryRuns() *WorkflowRunQuery { sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, projectversion.RunsTable, projectversion.RunsColumn), ) - fromU = sqlgraph.SetNeighbors(pvq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -112,8 +112,8 @@ func (pvq *ProjectVersionQuery) QueryRuns() *WorkflowRunQuery { // First returns the first ProjectVersion entity from the query. // Returns a *NotFoundError when no ProjectVersion was found. -func (pvq *ProjectVersionQuery) First(ctx context.Context) (*ProjectVersion, error) { - nodes, err := pvq.Limit(1).All(setContextOp(ctx, pvq.ctx, ent.OpQueryFirst)) +func (_q *ProjectVersionQuery) First(ctx context.Context) (*ProjectVersion, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -124,8 +124,8 @@ func (pvq *ProjectVersionQuery) First(ctx context.Context) (*ProjectVersion, err } // FirstX is like First, but panics if an error occurs. -func (pvq *ProjectVersionQuery) FirstX(ctx context.Context) *ProjectVersion { - node, err := pvq.First(ctx) +func (_q *ProjectVersionQuery) FirstX(ctx context.Context) *ProjectVersion { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,9 +134,9 @@ func (pvq *ProjectVersionQuery) FirstX(ctx context.Context) *ProjectVersion { // FirstID returns the first ProjectVersion ID from the query. // Returns a *NotFoundError when no ProjectVersion ID was found. -func (pvq *ProjectVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ProjectVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pvq.Limit(1).IDs(setContextOp(ctx, pvq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -147,8 +147,8 @@ func (pvq *ProjectVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err } // FirstIDX is like FirstID, but panics if an error occurs. -func (pvq *ProjectVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := pvq.FirstID(ctx) +func (_q *ProjectVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,8 +158,8 @@ func (pvq *ProjectVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single ProjectVersion entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one ProjectVersion entity is found. // Returns a *NotFoundError when no ProjectVersion entities are found. -func (pvq *ProjectVersionQuery) Only(ctx context.Context) (*ProjectVersion, error) { - nodes, err := pvq.Limit(2).All(setContextOp(ctx, pvq.ctx, ent.OpQueryOnly)) +func (_q *ProjectVersionQuery) Only(ctx context.Context) (*ProjectVersion, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -174,8 +174,8 @@ func (pvq *ProjectVersionQuery) Only(ctx context.Context) (*ProjectVersion, erro } // OnlyX is like Only, but panics if an error occurs. -func (pvq *ProjectVersionQuery) OnlyX(ctx context.Context) *ProjectVersion { - node, err := pvq.Only(ctx) +func (_q *ProjectVersionQuery) OnlyX(ctx context.Context) *ProjectVersion { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -185,9 +185,9 @@ func (pvq *ProjectVersionQuery) OnlyX(ctx context.Context) *ProjectVersion { // OnlyID is like Only, but returns the only ProjectVersion ID in the query. // Returns a *NotSingularError when more than one ProjectVersion ID is found. // Returns a *NotFoundError when no entities are found. -func (pvq *ProjectVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ProjectVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pvq.Limit(2).IDs(setContextOp(ctx, pvq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -202,8 +202,8 @@ func (pvq *ProjectVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err e } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (pvq *ProjectVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := pvq.OnlyID(ctx) +func (_q *ProjectVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -211,18 +211,18 @@ func (pvq *ProjectVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of ProjectVersions. -func (pvq *ProjectVersionQuery) All(ctx context.Context) ([]*ProjectVersion, error) { - ctx = setContextOp(ctx, pvq.ctx, ent.OpQueryAll) - if err := pvq.prepareQuery(ctx); err != nil { +func (_q *ProjectVersionQuery) All(ctx context.Context) ([]*ProjectVersion, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*ProjectVersion, *ProjectVersionQuery]() - return withInterceptors[[]*ProjectVersion](ctx, pvq, qr, pvq.inters) + return withInterceptors[[]*ProjectVersion](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (pvq *ProjectVersionQuery) AllX(ctx context.Context) []*ProjectVersion { - nodes, err := pvq.All(ctx) +func (_q *ProjectVersionQuery) AllX(ctx context.Context) []*ProjectVersion { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -230,20 +230,20 @@ func (pvq *ProjectVersionQuery) AllX(ctx context.Context) []*ProjectVersion { } // IDs executes the query and returns a list of ProjectVersion IDs. -func (pvq *ProjectVersionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if pvq.ctx.Unique == nil && pvq.path != nil { - pvq.Unique(true) +func (_q *ProjectVersionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, pvq.ctx, ent.OpQueryIDs) - if err = pvq.Select(projectversion.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(projectversion.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (pvq *ProjectVersionQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := pvq.IDs(ctx) +func (_q *ProjectVersionQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -251,17 +251,17 @@ func (pvq *ProjectVersionQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (pvq *ProjectVersionQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, pvq.ctx, ent.OpQueryCount) - if err := pvq.prepareQuery(ctx); err != nil { +func (_q *ProjectVersionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, pvq, querierCount[*ProjectVersionQuery](), pvq.inters) + return withInterceptors[int](ctx, _q, querierCount[*ProjectVersionQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (pvq *ProjectVersionQuery) CountX(ctx context.Context) int { - count, err := pvq.Count(ctx) +func (_q *ProjectVersionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -269,9 +269,9 @@ func (pvq *ProjectVersionQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (pvq *ProjectVersionQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, pvq.ctx, ent.OpQueryExist) - switch _, err := pvq.FirstID(ctx); { +func (_q *ProjectVersionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -282,8 +282,8 @@ func (pvq *ProjectVersionQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (pvq *ProjectVersionQuery) ExistX(ctx context.Context) bool { - exist, err := pvq.Exist(ctx) +func (_q *ProjectVersionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -292,45 +292,45 @@ func (pvq *ProjectVersionQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the ProjectVersionQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (pvq *ProjectVersionQuery) Clone() *ProjectVersionQuery { - if pvq == nil { +func (_q *ProjectVersionQuery) Clone() *ProjectVersionQuery { + if _q == nil { return nil } return &ProjectVersionQuery{ - config: pvq.config, - ctx: pvq.ctx.Clone(), - order: append([]projectversion.OrderOption{}, pvq.order...), - inters: append([]Interceptor{}, pvq.inters...), - predicates: append([]predicate.ProjectVersion{}, pvq.predicates...), - withProject: pvq.withProject.Clone(), - withRuns: pvq.withRuns.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]projectversion.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ProjectVersion{}, _q.predicates...), + withProject: _q.withProject.Clone(), + withRuns: _q.withRuns.Clone(), // clone intermediate query. - sql: pvq.sql.Clone(), - path: pvq.path, - modifiers: append([]func(*sql.Selector){}, pvq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithProject tells the query-builder to eager-load the nodes that are connected to // the "project" edge. The optional arguments are used to configure the query builder of the edge. -func (pvq *ProjectVersionQuery) WithProject(opts ...func(*ProjectQuery)) *ProjectVersionQuery { - query := (&ProjectClient{config: pvq.config}).Query() +func (_q *ProjectVersionQuery) WithProject(opts ...func(*ProjectQuery)) *ProjectVersionQuery { + query := (&ProjectClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - pvq.withProject = query - return pvq + _q.withProject = query + return _q } // WithRuns tells the query-builder to eager-load the nodes that are connected to // the "runs" edge. The optional arguments are used to configure the query builder of the edge. -func (pvq *ProjectVersionQuery) WithRuns(opts ...func(*WorkflowRunQuery)) *ProjectVersionQuery { - query := (&WorkflowRunClient{config: pvq.config}).Query() +func (_q *ProjectVersionQuery) WithRuns(opts ...func(*WorkflowRunQuery)) *ProjectVersionQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - pvq.withRuns = query - return pvq + _q.withRuns = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -347,10 +347,10 @@ func (pvq *ProjectVersionQuery) WithRuns(opts ...func(*WorkflowRunQuery)) *Proje // GroupBy(projectversion.FieldVersion). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (pvq *ProjectVersionQuery) GroupBy(field string, fields ...string) *ProjectVersionGroupBy { - pvq.ctx.Fields = append([]string{field}, fields...) - grbuild := &ProjectVersionGroupBy{build: pvq} - grbuild.flds = &pvq.ctx.Fields +func (_q *ProjectVersionQuery) GroupBy(field string, fields ...string) *ProjectVersionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProjectVersionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = projectversion.Label grbuild.scan = grbuild.Scan return grbuild @@ -368,83 +368,83 @@ func (pvq *ProjectVersionQuery) GroupBy(field string, fields ...string) *Project // client.ProjectVersion.Query(). // Select(projectversion.FieldVersion). // Scan(ctx, &v) -func (pvq *ProjectVersionQuery) Select(fields ...string) *ProjectVersionSelect { - pvq.ctx.Fields = append(pvq.ctx.Fields, fields...) - sbuild := &ProjectVersionSelect{ProjectVersionQuery: pvq} +func (_q *ProjectVersionQuery) Select(fields ...string) *ProjectVersionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ProjectVersionSelect{ProjectVersionQuery: _q} sbuild.label = projectversion.Label - sbuild.flds, sbuild.scan = &pvq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a ProjectVersionSelect configured with the given aggregations. -func (pvq *ProjectVersionQuery) Aggregate(fns ...AggregateFunc) *ProjectVersionSelect { - return pvq.Select().Aggregate(fns...) +func (_q *ProjectVersionQuery) Aggregate(fns ...AggregateFunc) *ProjectVersionSelect { + return _q.Select().Aggregate(fns...) } -func (pvq *ProjectVersionQuery) prepareQuery(ctx context.Context) error { - for _, inter := range pvq.inters { +func (_q *ProjectVersionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, pvq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range pvq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !projectversion.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if pvq.path != nil { - prev, err := pvq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - pvq.sql = prev + _q.sql = prev } return nil } -func (pvq *ProjectVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ProjectVersion, error) { +func (_q *ProjectVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ProjectVersion, error) { var ( nodes = []*ProjectVersion{} - _spec = pvq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - pvq.withProject != nil, - pvq.withRuns != nil, + _q.withProject != nil, + _q.withRuns != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*ProjectVersion).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &ProjectVersion{config: pvq.config} + node := &ProjectVersion{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(pvq.modifiers) > 0 { - _spec.Modifiers = pvq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, pvq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := pvq.withProject; query != nil { - if err := pvq.loadProject(ctx, query, nodes, nil, + if query := _q.withProject; query != nil { + if err := _q.loadProject(ctx, query, nodes, nil, func(n *ProjectVersion, e *Project) { n.Edges.Project = e }); err != nil { return nil, err } } - if query := pvq.withRuns; query != nil { - if err := pvq.loadRuns(ctx, query, nodes, + if query := _q.withRuns; query != nil { + if err := _q.loadRuns(ctx, query, nodes, func(n *ProjectVersion) { n.Edges.Runs = []*WorkflowRun{} }, func(n *ProjectVersion, e *WorkflowRun) { n.Edges.Runs = append(n.Edges.Runs, e) }); err != nil { return nil, err @@ -453,7 +453,7 @@ func (pvq *ProjectVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) return nodes, nil } -func (pvq *ProjectVersionQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*ProjectVersion, init func(*ProjectVersion), assign func(*ProjectVersion, *Project)) error { +func (_q *ProjectVersionQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*ProjectVersion, init func(*ProjectVersion), assign func(*ProjectVersion, *Project)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*ProjectVersion) for i := range nodes { @@ -482,7 +482,7 @@ func (pvq *ProjectVersionQuery) loadProject(ctx context.Context, query *ProjectQ } return nil } -func (pvq *ProjectVersionQuery) loadRuns(ctx context.Context, query *WorkflowRunQuery, nodes []*ProjectVersion, init func(*ProjectVersion), assign func(*ProjectVersion, *WorkflowRun)) error { +func (_q *ProjectVersionQuery) loadRuns(ctx context.Context, query *WorkflowRunQuery, nodes []*ProjectVersion, init func(*ProjectVersion), assign func(*ProjectVersion, *WorkflowRun)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*ProjectVersion) for i := range nodes { @@ -514,27 +514,27 @@ func (pvq *ProjectVersionQuery) loadRuns(ctx context.Context, query *WorkflowRun return nil } -func (pvq *ProjectVersionQuery) sqlCount(ctx context.Context) (int, error) { - _spec := pvq.querySpec() - if len(pvq.modifiers) > 0 { - _spec.Modifiers = pvq.modifiers +func (_q *ProjectVersionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = pvq.ctx.Fields - if len(pvq.ctx.Fields) > 0 { - _spec.Unique = pvq.ctx.Unique != nil && *pvq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, pvq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (pvq *ProjectVersionQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *ProjectVersionQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(projectversion.Table, projectversion.Columns, sqlgraph.NewFieldSpec(projectversion.FieldID, field.TypeUUID)) - _spec.From = pvq.sql - if unique := pvq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if pvq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := pvq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, projectversion.FieldID) for i := range fields { @@ -542,24 +542,24 @@ func (pvq *ProjectVersionQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if pvq.withProject != nil { + if _q.withProject != nil { _spec.Node.AddColumnOnce(projectversion.FieldProjectID) } } - if ps := pvq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := pvq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := pvq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := pvq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -569,36 +569,36 @@ func (pvq *ProjectVersionQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (pvq *ProjectVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(pvq.driver.Dialect()) +func (_q *ProjectVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(projectversion.Table) - columns := pvq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = projectversion.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if pvq.sql != nil { - selector = pvq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if pvq.ctx.Unique != nil && *pvq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range pvq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range pvq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range pvq.order { + for _, p := range _q.order { p(selector) } - if offset := pvq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := pvq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -607,33 +607,33 @@ func (pvq *ProjectVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (pvq *ProjectVersionQuery) ForUpdate(opts ...sql.LockOption) *ProjectVersionQuery { - if pvq.driver.Dialect() == dialect.Postgres { - pvq.Unique(false) +func (_q *ProjectVersionQuery) ForUpdate(opts ...sql.LockOption) *ProjectVersionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - pvq.modifiers = append(pvq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return pvq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (pvq *ProjectVersionQuery) ForShare(opts ...sql.LockOption) *ProjectVersionQuery { - if pvq.driver.Dialect() == dialect.Postgres { - pvq.Unique(false) +func (_q *ProjectVersionQuery) ForShare(opts ...sql.LockOption) *ProjectVersionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - pvq.modifiers = append(pvq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return pvq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (pvq *ProjectVersionQuery) Modify(modifiers ...func(s *sql.Selector)) *ProjectVersionSelect { - pvq.modifiers = append(pvq.modifiers, modifiers...) - return pvq.Select() +func (_q *ProjectVersionQuery) Modify(modifiers ...func(s *sql.Selector)) *ProjectVersionSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // ProjectVersionGroupBy is the group-by builder for ProjectVersion entities. @@ -643,41 +643,41 @@ type ProjectVersionGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (pvgb *ProjectVersionGroupBy) Aggregate(fns ...AggregateFunc) *ProjectVersionGroupBy { - pvgb.fns = append(pvgb.fns, fns...) - return pvgb +func (_g *ProjectVersionGroupBy) Aggregate(fns ...AggregateFunc) *ProjectVersionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (pvgb *ProjectVersionGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, pvgb.build.ctx, ent.OpQueryGroupBy) - if err := pvgb.build.prepareQuery(ctx); err != nil { +func (_g *ProjectVersionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ProjectVersionQuery, *ProjectVersionGroupBy](ctx, pvgb.build, pvgb, pvgb.build.inters, v) + return scanWithInterceptors[*ProjectVersionQuery, *ProjectVersionGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (pvgb *ProjectVersionGroupBy) sqlScan(ctx context.Context, root *ProjectVersionQuery, v any) error { +func (_g *ProjectVersionGroupBy) sqlScan(ctx context.Context, root *ProjectVersionQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(pvgb.fns)) - for _, fn := range pvgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*pvgb.flds)+len(pvgb.fns)) - for _, f := range *pvgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*pvgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := pvgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -691,27 +691,27 @@ type ProjectVersionSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (pvs *ProjectVersionSelect) Aggregate(fns ...AggregateFunc) *ProjectVersionSelect { - pvs.fns = append(pvs.fns, fns...) - return pvs +func (_s *ProjectVersionSelect) Aggregate(fns ...AggregateFunc) *ProjectVersionSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (pvs *ProjectVersionSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, pvs.ctx, ent.OpQuerySelect) - if err := pvs.prepareQuery(ctx); err != nil { +func (_s *ProjectVersionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ProjectVersionQuery, *ProjectVersionSelect](ctx, pvs.ProjectVersionQuery, pvs, pvs.inters, v) + return scanWithInterceptors[*ProjectVersionQuery, *ProjectVersionSelect](ctx, _s.ProjectVersionQuery, _s, _s.inters, v) } -func (pvs *ProjectVersionSelect) sqlScan(ctx context.Context, root *ProjectVersionQuery, v any) error { +func (_s *ProjectVersionSelect) sqlScan(ctx context.Context, root *ProjectVersionQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(pvs.fns)) - for _, fn := range pvs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*pvs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -719,7 +719,7 @@ func (pvs *ProjectVersionSelect) sqlScan(ctx context.Context, root *ProjectVersi } rows := &sql.Rows{} query, args := selector.Query() - if err := pvs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -727,7 +727,7 @@ func (pvs *ProjectVersionSelect) sqlScan(ctx context.Context, root *ProjectVersi } // Modify adds a query modifier for attaching custom logic to queries. -func (pvs *ProjectVersionSelect) Modify(modifiers ...func(s *sql.Selector)) *ProjectVersionSelect { - pvs.modifiers = append(pvs.modifiers, modifiers...) - return pvs +func (_s *ProjectVersionSelect) Modify(modifiers ...func(s *sql.Selector)) *ProjectVersionSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/projectversion_update.go b/app/controlplane/pkg/data/ent/projectversion_update.go index 51b93a979..1d4bded24 100644 --- a/app/controlplane/pkg/data/ent/projectversion_update.go +++ b/app/controlplane/pkg/data/ent/projectversion_update.go @@ -27,222 +27,222 @@ type ProjectVersionUpdate struct { } // Where appends a list predicates to the ProjectVersionUpdate builder. -func (pvu *ProjectVersionUpdate) Where(ps ...predicate.ProjectVersion) *ProjectVersionUpdate { - pvu.mutation.Where(ps...) - return pvu +func (_u *ProjectVersionUpdate) Where(ps ...predicate.ProjectVersion) *ProjectVersionUpdate { + _u.mutation.Where(ps...) + return _u } // SetVersion sets the "version" field. -func (pvu *ProjectVersionUpdate) SetVersion(s string) *ProjectVersionUpdate { - pvu.mutation.SetVersion(s) - return pvu +func (_u *ProjectVersionUpdate) SetVersion(v string) *ProjectVersionUpdate { + _u.mutation.SetVersion(v) + return _u } // SetNillableVersion sets the "version" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableVersion(s *string) *ProjectVersionUpdate { - if s != nil { - pvu.SetVersion(*s) +func (_u *ProjectVersionUpdate) SetNillableVersion(v *string) *ProjectVersionUpdate { + if v != nil { + _u.SetVersion(*v) } - return pvu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (pvu *ProjectVersionUpdate) SetUpdatedAt(t time.Time) *ProjectVersionUpdate { - pvu.mutation.SetUpdatedAt(t) - return pvu +func (_u *ProjectVersionUpdate) SetUpdatedAt(v time.Time) *ProjectVersionUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableUpdatedAt(t *time.Time) *ProjectVersionUpdate { - if t != nil { - pvu.SetUpdatedAt(*t) +func (_u *ProjectVersionUpdate) SetNillableUpdatedAt(v *time.Time) *ProjectVersionUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return pvu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (pvu *ProjectVersionUpdate) SetDeletedAt(t time.Time) *ProjectVersionUpdate { - pvu.mutation.SetDeletedAt(t) - return pvu +func (_u *ProjectVersionUpdate) SetDeletedAt(v time.Time) *ProjectVersionUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableDeletedAt(t *time.Time) *ProjectVersionUpdate { - if t != nil { - pvu.SetDeletedAt(*t) +func (_u *ProjectVersionUpdate) SetNillableDeletedAt(v *time.Time) *ProjectVersionUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return pvu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (pvu *ProjectVersionUpdate) ClearDeletedAt() *ProjectVersionUpdate { - pvu.mutation.ClearDeletedAt() - return pvu +func (_u *ProjectVersionUpdate) ClearDeletedAt() *ProjectVersionUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetProjectID sets the "project_id" field. -func (pvu *ProjectVersionUpdate) SetProjectID(u uuid.UUID) *ProjectVersionUpdate { - pvu.mutation.SetProjectID(u) - return pvu +func (_u *ProjectVersionUpdate) SetProjectID(v uuid.UUID) *ProjectVersionUpdate { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableProjectID(u *uuid.UUID) *ProjectVersionUpdate { - if u != nil { - pvu.SetProjectID(*u) +func (_u *ProjectVersionUpdate) SetNillableProjectID(v *uuid.UUID) *ProjectVersionUpdate { + if v != nil { + _u.SetProjectID(*v) } - return pvu + return _u } // SetPrerelease sets the "prerelease" field. -func (pvu *ProjectVersionUpdate) SetPrerelease(b bool) *ProjectVersionUpdate { - pvu.mutation.SetPrerelease(b) - return pvu +func (_u *ProjectVersionUpdate) SetPrerelease(v bool) *ProjectVersionUpdate { + _u.mutation.SetPrerelease(v) + return _u } // SetNillablePrerelease sets the "prerelease" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillablePrerelease(b *bool) *ProjectVersionUpdate { - if b != nil { - pvu.SetPrerelease(*b) +func (_u *ProjectVersionUpdate) SetNillablePrerelease(v *bool) *ProjectVersionUpdate { + if v != nil { + _u.SetPrerelease(*v) } - return pvu + return _u } // SetWorkflowRunCount sets the "workflow_run_count" field. -func (pvu *ProjectVersionUpdate) SetWorkflowRunCount(i int) *ProjectVersionUpdate { - pvu.mutation.ResetWorkflowRunCount() - pvu.mutation.SetWorkflowRunCount(i) - return pvu +func (_u *ProjectVersionUpdate) SetWorkflowRunCount(v int) *ProjectVersionUpdate { + _u.mutation.ResetWorkflowRunCount() + _u.mutation.SetWorkflowRunCount(v) + return _u } // SetNillableWorkflowRunCount sets the "workflow_run_count" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableWorkflowRunCount(i *int) *ProjectVersionUpdate { - if i != nil { - pvu.SetWorkflowRunCount(*i) +func (_u *ProjectVersionUpdate) SetNillableWorkflowRunCount(v *int) *ProjectVersionUpdate { + if v != nil { + _u.SetWorkflowRunCount(*v) } - return pvu + return _u } -// AddWorkflowRunCount adds i to the "workflow_run_count" field. -func (pvu *ProjectVersionUpdate) AddWorkflowRunCount(i int) *ProjectVersionUpdate { - pvu.mutation.AddWorkflowRunCount(i) - return pvu +// AddWorkflowRunCount adds value to the "workflow_run_count" field. +func (_u *ProjectVersionUpdate) AddWorkflowRunCount(v int) *ProjectVersionUpdate { + _u.mutation.AddWorkflowRunCount(v) + return _u } // SetReleasedAt sets the "released_at" field. -func (pvu *ProjectVersionUpdate) SetReleasedAt(t time.Time) *ProjectVersionUpdate { - pvu.mutation.SetReleasedAt(t) - return pvu +func (_u *ProjectVersionUpdate) SetReleasedAt(v time.Time) *ProjectVersionUpdate { + _u.mutation.SetReleasedAt(v) + return _u } // SetNillableReleasedAt sets the "released_at" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableReleasedAt(t *time.Time) *ProjectVersionUpdate { - if t != nil { - pvu.SetReleasedAt(*t) +func (_u *ProjectVersionUpdate) SetNillableReleasedAt(v *time.Time) *ProjectVersionUpdate { + if v != nil { + _u.SetReleasedAt(*v) } - return pvu + return _u } // ClearReleasedAt clears the value of the "released_at" field. -func (pvu *ProjectVersionUpdate) ClearReleasedAt() *ProjectVersionUpdate { - pvu.mutation.ClearReleasedAt() - return pvu +func (_u *ProjectVersionUpdate) ClearReleasedAt() *ProjectVersionUpdate { + _u.mutation.ClearReleasedAt() + return _u } // SetLastRunAt sets the "last_run_at" field. -func (pvu *ProjectVersionUpdate) SetLastRunAt(t time.Time) *ProjectVersionUpdate { - pvu.mutation.SetLastRunAt(t) - return pvu +func (_u *ProjectVersionUpdate) SetLastRunAt(v time.Time) *ProjectVersionUpdate { + _u.mutation.SetLastRunAt(v) + return _u } // SetNillableLastRunAt sets the "last_run_at" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableLastRunAt(t *time.Time) *ProjectVersionUpdate { - if t != nil { - pvu.SetLastRunAt(*t) +func (_u *ProjectVersionUpdate) SetNillableLastRunAt(v *time.Time) *ProjectVersionUpdate { + if v != nil { + _u.SetLastRunAt(*v) } - return pvu + return _u } // ClearLastRunAt clears the value of the "last_run_at" field. -func (pvu *ProjectVersionUpdate) ClearLastRunAt() *ProjectVersionUpdate { - pvu.mutation.ClearLastRunAt() - return pvu +func (_u *ProjectVersionUpdate) ClearLastRunAt() *ProjectVersionUpdate { + _u.mutation.ClearLastRunAt() + return _u } // SetLatest sets the "latest" field. -func (pvu *ProjectVersionUpdate) SetLatest(b bool) *ProjectVersionUpdate { - pvu.mutation.SetLatest(b) - return pvu +func (_u *ProjectVersionUpdate) SetLatest(v bool) *ProjectVersionUpdate { + _u.mutation.SetLatest(v) + return _u } // SetNillableLatest sets the "latest" field if the given value is not nil. -func (pvu *ProjectVersionUpdate) SetNillableLatest(b *bool) *ProjectVersionUpdate { - if b != nil { - pvu.SetLatest(*b) +func (_u *ProjectVersionUpdate) SetNillableLatest(v *bool) *ProjectVersionUpdate { + if v != nil { + _u.SetLatest(*v) } - return pvu + return _u } // SetProject sets the "project" edge to the Project entity. -func (pvu *ProjectVersionUpdate) SetProject(p *Project) *ProjectVersionUpdate { - return pvu.SetProjectID(p.ID) +func (_u *ProjectVersionUpdate) SetProject(v *Project) *ProjectVersionUpdate { + return _u.SetProjectID(v.ID) } // AddRunIDs adds the "runs" edge to the WorkflowRun entity by IDs. -func (pvu *ProjectVersionUpdate) AddRunIDs(ids ...uuid.UUID) *ProjectVersionUpdate { - pvu.mutation.AddRunIDs(ids...) - return pvu +func (_u *ProjectVersionUpdate) AddRunIDs(ids ...uuid.UUID) *ProjectVersionUpdate { + _u.mutation.AddRunIDs(ids...) + return _u } // AddRuns adds the "runs" edges to the WorkflowRun entity. -func (pvu *ProjectVersionUpdate) AddRuns(w ...*WorkflowRun) *ProjectVersionUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectVersionUpdate) AddRuns(v ...*WorkflowRun) *ProjectVersionUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pvu.AddRunIDs(ids...) + return _u.AddRunIDs(ids...) } // Mutation returns the ProjectVersionMutation object of the builder. -func (pvu *ProjectVersionUpdate) Mutation() *ProjectVersionMutation { - return pvu.mutation +func (_u *ProjectVersionUpdate) Mutation() *ProjectVersionMutation { + return _u.mutation } // ClearProject clears the "project" edge to the Project entity. -func (pvu *ProjectVersionUpdate) ClearProject() *ProjectVersionUpdate { - pvu.mutation.ClearProject() - return pvu +func (_u *ProjectVersionUpdate) ClearProject() *ProjectVersionUpdate { + _u.mutation.ClearProject() + return _u } // ClearRuns clears all "runs" edges to the WorkflowRun entity. -func (pvu *ProjectVersionUpdate) ClearRuns() *ProjectVersionUpdate { - pvu.mutation.ClearRuns() - return pvu +func (_u *ProjectVersionUpdate) ClearRuns() *ProjectVersionUpdate { + _u.mutation.ClearRuns() + return _u } // RemoveRunIDs removes the "runs" edge to WorkflowRun entities by IDs. -func (pvu *ProjectVersionUpdate) RemoveRunIDs(ids ...uuid.UUID) *ProjectVersionUpdate { - pvu.mutation.RemoveRunIDs(ids...) - return pvu +func (_u *ProjectVersionUpdate) RemoveRunIDs(ids ...uuid.UUID) *ProjectVersionUpdate { + _u.mutation.RemoveRunIDs(ids...) + return _u } // RemoveRuns removes "runs" edges to WorkflowRun entities. -func (pvu *ProjectVersionUpdate) RemoveRuns(w ...*WorkflowRun) *ProjectVersionUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectVersionUpdate) RemoveRuns(v ...*WorkflowRun) *ProjectVersionUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pvu.RemoveRunIDs(ids...) + return _u.RemoveRunIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (pvu *ProjectVersionUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, pvu.sqlSave, pvu.mutation, pvu.hooks) +func (_u *ProjectVersionUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (pvu *ProjectVersionUpdate) SaveX(ctx context.Context) int { - affected, err := pvu.Save(ctx) +func (_u *ProjectVersionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -250,86 +250,86 @@ func (pvu *ProjectVersionUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (pvu *ProjectVersionUpdate) Exec(ctx context.Context) error { - _, err := pvu.Save(ctx) +func (_u *ProjectVersionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pvu *ProjectVersionUpdate) ExecX(ctx context.Context) { - if err := pvu.Exec(ctx); err != nil { +func (_u *ProjectVersionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (pvu *ProjectVersionUpdate) check() error { - if v, ok := pvu.mutation.Version(); ok { +func (_u *ProjectVersionUpdate) check() error { + if v, ok := _u.mutation.Version(); ok { if err := projectversion.VersionValidator(v); err != nil { return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ProjectVersion.version": %w`, err)} } } - if pvu.mutation.ProjectCleared() && len(pvu.mutation.ProjectIDs()) > 0 { + if _u.mutation.ProjectCleared() && len(_u.mutation.ProjectIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "ProjectVersion.project"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (pvu *ProjectVersionUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectVersionUpdate { - pvu.modifiers = append(pvu.modifiers, modifiers...) - return pvu +func (_u *ProjectVersionUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectVersionUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := pvu.check(); err != nil { - return n, err +func (_u *ProjectVersionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(projectversion.Table, projectversion.Columns, sqlgraph.NewFieldSpec(projectversion.FieldID, field.TypeUUID)) - if ps := pvu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := pvu.mutation.Version(); ok { + if value, ok := _u.mutation.Version(); ok { _spec.SetField(projectversion.FieldVersion, field.TypeString, value) } - if value, ok := pvu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(projectversion.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := pvu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(projectversion.FieldDeletedAt, field.TypeTime, value) } - if pvu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(projectversion.FieldDeletedAt, field.TypeTime) } - if value, ok := pvu.mutation.Prerelease(); ok { + if value, ok := _u.mutation.Prerelease(); ok { _spec.SetField(projectversion.FieldPrerelease, field.TypeBool, value) } - if value, ok := pvu.mutation.WorkflowRunCount(); ok { + if value, ok := _u.mutation.WorkflowRunCount(); ok { _spec.SetField(projectversion.FieldWorkflowRunCount, field.TypeInt, value) } - if value, ok := pvu.mutation.AddedWorkflowRunCount(); ok { + if value, ok := _u.mutation.AddedWorkflowRunCount(); ok { _spec.AddField(projectversion.FieldWorkflowRunCount, field.TypeInt, value) } - if value, ok := pvu.mutation.ReleasedAt(); ok { + if value, ok := _u.mutation.ReleasedAt(); ok { _spec.SetField(projectversion.FieldReleasedAt, field.TypeTime, value) } - if pvu.mutation.ReleasedAtCleared() { + if _u.mutation.ReleasedAtCleared() { _spec.ClearField(projectversion.FieldReleasedAt, field.TypeTime) } - if value, ok := pvu.mutation.LastRunAt(); ok { + if value, ok := _u.mutation.LastRunAt(); ok { _spec.SetField(projectversion.FieldLastRunAt, field.TypeTime, value) } - if pvu.mutation.LastRunAtCleared() { + if _u.mutation.LastRunAtCleared() { _spec.ClearField(projectversion.FieldLastRunAt, field.TypeTime) } - if value, ok := pvu.mutation.Latest(); ok { + if value, ok := _u.mutation.Latest(); ok { _spec.SetField(projectversion.FieldLatest, field.TypeBool, value) } - if pvu.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -342,7 +342,7 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvu.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -358,7 +358,7 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if pvu.mutation.RunsCleared() { + if _u.mutation.RunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -371,7 +371,7 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvu.mutation.RemovedRunsIDs(); len(nodes) > 0 && !pvu.mutation.RunsCleared() { + if nodes := _u.mutation.RemovedRunsIDs(); len(nodes) > 0 && !_u.mutation.RunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -387,7 +387,7 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvu.mutation.RunsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.RunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -403,8 +403,8 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(pvu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, pvu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{projectversion.Label} } else if sqlgraph.IsConstraintError(err) { @@ -412,8 +412,8 @@ func (pvu *ProjectVersionUpdate) sqlSave(ctx context.Context) (n int, err error) } return 0, err } - pvu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // ProjectVersionUpdateOne is the builder for updating a single ProjectVersion entity. @@ -426,229 +426,229 @@ type ProjectVersionUpdateOne struct { } // SetVersion sets the "version" field. -func (pvuo *ProjectVersionUpdateOne) SetVersion(s string) *ProjectVersionUpdateOne { - pvuo.mutation.SetVersion(s) - return pvuo +func (_u *ProjectVersionUpdateOne) SetVersion(v string) *ProjectVersionUpdateOne { + _u.mutation.SetVersion(v) + return _u } // SetNillableVersion sets the "version" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableVersion(s *string) *ProjectVersionUpdateOne { - if s != nil { - pvuo.SetVersion(*s) +func (_u *ProjectVersionUpdateOne) SetNillableVersion(v *string) *ProjectVersionUpdateOne { + if v != nil { + _u.SetVersion(*v) } - return pvuo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (pvuo *ProjectVersionUpdateOne) SetUpdatedAt(t time.Time) *ProjectVersionUpdateOne { - pvuo.mutation.SetUpdatedAt(t) - return pvuo +func (_u *ProjectVersionUpdateOne) SetUpdatedAt(v time.Time) *ProjectVersionUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableUpdatedAt(t *time.Time) *ProjectVersionUpdateOne { - if t != nil { - pvuo.SetUpdatedAt(*t) +func (_u *ProjectVersionUpdateOne) SetNillableUpdatedAt(v *time.Time) *ProjectVersionUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return pvuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (pvuo *ProjectVersionUpdateOne) SetDeletedAt(t time.Time) *ProjectVersionUpdateOne { - pvuo.mutation.SetDeletedAt(t) - return pvuo +func (_u *ProjectVersionUpdateOne) SetDeletedAt(v time.Time) *ProjectVersionUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableDeletedAt(t *time.Time) *ProjectVersionUpdateOne { - if t != nil { - pvuo.SetDeletedAt(*t) +func (_u *ProjectVersionUpdateOne) SetNillableDeletedAt(v *time.Time) *ProjectVersionUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return pvuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (pvuo *ProjectVersionUpdateOne) ClearDeletedAt() *ProjectVersionUpdateOne { - pvuo.mutation.ClearDeletedAt() - return pvuo +func (_u *ProjectVersionUpdateOne) ClearDeletedAt() *ProjectVersionUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetProjectID sets the "project_id" field. -func (pvuo *ProjectVersionUpdateOne) SetProjectID(u uuid.UUID) *ProjectVersionUpdateOne { - pvuo.mutation.SetProjectID(u) - return pvuo +func (_u *ProjectVersionUpdateOne) SetProjectID(v uuid.UUID) *ProjectVersionUpdateOne { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableProjectID(u *uuid.UUID) *ProjectVersionUpdateOne { - if u != nil { - pvuo.SetProjectID(*u) +func (_u *ProjectVersionUpdateOne) SetNillableProjectID(v *uuid.UUID) *ProjectVersionUpdateOne { + if v != nil { + _u.SetProjectID(*v) } - return pvuo + return _u } // SetPrerelease sets the "prerelease" field. -func (pvuo *ProjectVersionUpdateOne) SetPrerelease(b bool) *ProjectVersionUpdateOne { - pvuo.mutation.SetPrerelease(b) - return pvuo +func (_u *ProjectVersionUpdateOne) SetPrerelease(v bool) *ProjectVersionUpdateOne { + _u.mutation.SetPrerelease(v) + return _u } // SetNillablePrerelease sets the "prerelease" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillablePrerelease(b *bool) *ProjectVersionUpdateOne { - if b != nil { - pvuo.SetPrerelease(*b) +func (_u *ProjectVersionUpdateOne) SetNillablePrerelease(v *bool) *ProjectVersionUpdateOne { + if v != nil { + _u.SetPrerelease(*v) } - return pvuo + return _u } // SetWorkflowRunCount sets the "workflow_run_count" field. -func (pvuo *ProjectVersionUpdateOne) SetWorkflowRunCount(i int) *ProjectVersionUpdateOne { - pvuo.mutation.ResetWorkflowRunCount() - pvuo.mutation.SetWorkflowRunCount(i) - return pvuo +func (_u *ProjectVersionUpdateOne) SetWorkflowRunCount(v int) *ProjectVersionUpdateOne { + _u.mutation.ResetWorkflowRunCount() + _u.mutation.SetWorkflowRunCount(v) + return _u } // SetNillableWorkflowRunCount sets the "workflow_run_count" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableWorkflowRunCount(i *int) *ProjectVersionUpdateOne { - if i != nil { - pvuo.SetWorkflowRunCount(*i) +func (_u *ProjectVersionUpdateOne) SetNillableWorkflowRunCount(v *int) *ProjectVersionUpdateOne { + if v != nil { + _u.SetWorkflowRunCount(*v) } - return pvuo + return _u } -// AddWorkflowRunCount adds i to the "workflow_run_count" field. -func (pvuo *ProjectVersionUpdateOne) AddWorkflowRunCount(i int) *ProjectVersionUpdateOne { - pvuo.mutation.AddWorkflowRunCount(i) - return pvuo +// AddWorkflowRunCount adds value to the "workflow_run_count" field. +func (_u *ProjectVersionUpdateOne) AddWorkflowRunCount(v int) *ProjectVersionUpdateOne { + _u.mutation.AddWorkflowRunCount(v) + return _u } // SetReleasedAt sets the "released_at" field. -func (pvuo *ProjectVersionUpdateOne) SetReleasedAt(t time.Time) *ProjectVersionUpdateOne { - pvuo.mutation.SetReleasedAt(t) - return pvuo +func (_u *ProjectVersionUpdateOne) SetReleasedAt(v time.Time) *ProjectVersionUpdateOne { + _u.mutation.SetReleasedAt(v) + return _u } // SetNillableReleasedAt sets the "released_at" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableReleasedAt(t *time.Time) *ProjectVersionUpdateOne { - if t != nil { - pvuo.SetReleasedAt(*t) +func (_u *ProjectVersionUpdateOne) SetNillableReleasedAt(v *time.Time) *ProjectVersionUpdateOne { + if v != nil { + _u.SetReleasedAt(*v) } - return pvuo + return _u } // ClearReleasedAt clears the value of the "released_at" field. -func (pvuo *ProjectVersionUpdateOne) ClearReleasedAt() *ProjectVersionUpdateOne { - pvuo.mutation.ClearReleasedAt() - return pvuo +func (_u *ProjectVersionUpdateOne) ClearReleasedAt() *ProjectVersionUpdateOne { + _u.mutation.ClearReleasedAt() + return _u } // SetLastRunAt sets the "last_run_at" field. -func (pvuo *ProjectVersionUpdateOne) SetLastRunAt(t time.Time) *ProjectVersionUpdateOne { - pvuo.mutation.SetLastRunAt(t) - return pvuo +func (_u *ProjectVersionUpdateOne) SetLastRunAt(v time.Time) *ProjectVersionUpdateOne { + _u.mutation.SetLastRunAt(v) + return _u } // SetNillableLastRunAt sets the "last_run_at" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableLastRunAt(t *time.Time) *ProjectVersionUpdateOne { - if t != nil { - pvuo.SetLastRunAt(*t) +func (_u *ProjectVersionUpdateOne) SetNillableLastRunAt(v *time.Time) *ProjectVersionUpdateOne { + if v != nil { + _u.SetLastRunAt(*v) } - return pvuo + return _u } // ClearLastRunAt clears the value of the "last_run_at" field. -func (pvuo *ProjectVersionUpdateOne) ClearLastRunAt() *ProjectVersionUpdateOne { - pvuo.mutation.ClearLastRunAt() - return pvuo +func (_u *ProjectVersionUpdateOne) ClearLastRunAt() *ProjectVersionUpdateOne { + _u.mutation.ClearLastRunAt() + return _u } // SetLatest sets the "latest" field. -func (pvuo *ProjectVersionUpdateOne) SetLatest(b bool) *ProjectVersionUpdateOne { - pvuo.mutation.SetLatest(b) - return pvuo +func (_u *ProjectVersionUpdateOne) SetLatest(v bool) *ProjectVersionUpdateOne { + _u.mutation.SetLatest(v) + return _u } // SetNillableLatest sets the "latest" field if the given value is not nil. -func (pvuo *ProjectVersionUpdateOne) SetNillableLatest(b *bool) *ProjectVersionUpdateOne { - if b != nil { - pvuo.SetLatest(*b) +func (_u *ProjectVersionUpdateOne) SetNillableLatest(v *bool) *ProjectVersionUpdateOne { + if v != nil { + _u.SetLatest(*v) } - return pvuo + return _u } // SetProject sets the "project" edge to the Project entity. -func (pvuo *ProjectVersionUpdateOne) SetProject(p *Project) *ProjectVersionUpdateOne { - return pvuo.SetProjectID(p.ID) +func (_u *ProjectVersionUpdateOne) SetProject(v *Project) *ProjectVersionUpdateOne { + return _u.SetProjectID(v.ID) } // AddRunIDs adds the "runs" edge to the WorkflowRun entity by IDs. -func (pvuo *ProjectVersionUpdateOne) AddRunIDs(ids ...uuid.UUID) *ProjectVersionUpdateOne { - pvuo.mutation.AddRunIDs(ids...) - return pvuo +func (_u *ProjectVersionUpdateOne) AddRunIDs(ids ...uuid.UUID) *ProjectVersionUpdateOne { + _u.mutation.AddRunIDs(ids...) + return _u } // AddRuns adds the "runs" edges to the WorkflowRun entity. -func (pvuo *ProjectVersionUpdateOne) AddRuns(w ...*WorkflowRun) *ProjectVersionUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectVersionUpdateOne) AddRuns(v ...*WorkflowRun) *ProjectVersionUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pvuo.AddRunIDs(ids...) + return _u.AddRunIDs(ids...) } // Mutation returns the ProjectVersionMutation object of the builder. -func (pvuo *ProjectVersionUpdateOne) Mutation() *ProjectVersionMutation { - return pvuo.mutation +func (_u *ProjectVersionUpdateOne) Mutation() *ProjectVersionMutation { + return _u.mutation } // ClearProject clears the "project" edge to the Project entity. -func (pvuo *ProjectVersionUpdateOne) ClearProject() *ProjectVersionUpdateOne { - pvuo.mutation.ClearProject() - return pvuo +func (_u *ProjectVersionUpdateOne) ClearProject() *ProjectVersionUpdateOne { + _u.mutation.ClearProject() + return _u } // ClearRuns clears all "runs" edges to the WorkflowRun entity. -func (pvuo *ProjectVersionUpdateOne) ClearRuns() *ProjectVersionUpdateOne { - pvuo.mutation.ClearRuns() - return pvuo +func (_u *ProjectVersionUpdateOne) ClearRuns() *ProjectVersionUpdateOne { + _u.mutation.ClearRuns() + return _u } // RemoveRunIDs removes the "runs" edge to WorkflowRun entities by IDs. -func (pvuo *ProjectVersionUpdateOne) RemoveRunIDs(ids ...uuid.UUID) *ProjectVersionUpdateOne { - pvuo.mutation.RemoveRunIDs(ids...) - return pvuo +func (_u *ProjectVersionUpdateOne) RemoveRunIDs(ids ...uuid.UUID) *ProjectVersionUpdateOne { + _u.mutation.RemoveRunIDs(ids...) + return _u } // RemoveRuns removes "runs" edges to WorkflowRun entities. -func (pvuo *ProjectVersionUpdateOne) RemoveRuns(w ...*WorkflowRun) *ProjectVersionUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ProjectVersionUpdateOne) RemoveRuns(v ...*WorkflowRun) *ProjectVersionUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return pvuo.RemoveRunIDs(ids...) + return _u.RemoveRunIDs(ids...) } // Where appends a list predicates to the ProjectVersionUpdate builder. -func (pvuo *ProjectVersionUpdateOne) Where(ps ...predicate.ProjectVersion) *ProjectVersionUpdateOne { - pvuo.mutation.Where(ps...) - return pvuo +func (_u *ProjectVersionUpdateOne) Where(ps ...predicate.ProjectVersion) *ProjectVersionUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (pvuo *ProjectVersionUpdateOne) Select(field string, fields ...string) *ProjectVersionUpdateOne { - pvuo.fields = append([]string{field}, fields...) - return pvuo +func (_u *ProjectVersionUpdateOne) Select(field string, fields ...string) *ProjectVersionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated ProjectVersion entity. -func (pvuo *ProjectVersionUpdateOne) Save(ctx context.Context) (*ProjectVersion, error) { - return withHooks(ctx, pvuo.sqlSave, pvuo.mutation, pvuo.hooks) +func (_u *ProjectVersionUpdateOne) Save(ctx context.Context) (*ProjectVersion, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (pvuo *ProjectVersionUpdateOne) SaveX(ctx context.Context) *ProjectVersion { - node, err := pvuo.Save(ctx) +func (_u *ProjectVersionUpdateOne) SaveX(ctx context.Context) *ProjectVersion { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -656,48 +656,48 @@ func (pvuo *ProjectVersionUpdateOne) SaveX(ctx context.Context) *ProjectVersion } // Exec executes the query on the entity. -func (pvuo *ProjectVersionUpdateOne) Exec(ctx context.Context) error { - _, err := pvuo.Save(ctx) +func (_u *ProjectVersionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (pvuo *ProjectVersionUpdateOne) ExecX(ctx context.Context) { - if err := pvuo.Exec(ctx); err != nil { +func (_u *ProjectVersionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (pvuo *ProjectVersionUpdateOne) check() error { - if v, ok := pvuo.mutation.Version(); ok { +func (_u *ProjectVersionUpdateOne) check() error { + if v, ok := _u.mutation.Version(); ok { if err := projectversion.VersionValidator(v); err != nil { return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ProjectVersion.version": %w`, err)} } } - if pvuo.mutation.ProjectCleared() && len(pvuo.mutation.ProjectIDs()) > 0 { + if _u.mutation.ProjectCleared() && len(_u.mutation.ProjectIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "ProjectVersion.project"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (pvuo *ProjectVersionUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectVersionUpdateOne { - pvuo.modifiers = append(pvuo.modifiers, modifiers...) - return pvuo +func (_u *ProjectVersionUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ProjectVersionUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *ProjectVersion, err error) { - if err := pvuo.check(); err != nil { +func (_u *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *ProjectVersion, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(projectversion.Table, projectversion.Columns, sqlgraph.NewFieldSpec(projectversion.FieldID, field.TypeUUID)) - id, ok := pvuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ProjectVersion.id" for update`)} } _spec.Node.ID.Value = id - if fields := pvuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, projectversion.FieldID) for _, f := range fields { @@ -709,50 +709,50 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } } } - if ps := pvuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := pvuo.mutation.Version(); ok { + if value, ok := _u.mutation.Version(); ok { _spec.SetField(projectversion.FieldVersion, field.TypeString, value) } - if value, ok := pvuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(projectversion.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := pvuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(projectversion.FieldDeletedAt, field.TypeTime, value) } - if pvuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(projectversion.FieldDeletedAt, field.TypeTime) } - if value, ok := pvuo.mutation.Prerelease(); ok { + if value, ok := _u.mutation.Prerelease(); ok { _spec.SetField(projectversion.FieldPrerelease, field.TypeBool, value) } - if value, ok := pvuo.mutation.WorkflowRunCount(); ok { + if value, ok := _u.mutation.WorkflowRunCount(); ok { _spec.SetField(projectversion.FieldWorkflowRunCount, field.TypeInt, value) } - if value, ok := pvuo.mutation.AddedWorkflowRunCount(); ok { + if value, ok := _u.mutation.AddedWorkflowRunCount(); ok { _spec.AddField(projectversion.FieldWorkflowRunCount, field.TypeInt, value) } - if value, ok := pvuo.mutation.ReleasedAt(); ok { + if value, ok := _u.mutation.ReleasedAt(); ok { _spec.SetField(projectversion.FieldReleasedAt, field.TypeTime, value) } - if pvuo.mutation.ReleasedAtCleared() { + if _u.mutation.ReleasedAtCleared() { _spec.ClearField(projectversion.FieldReleasedAt, field.TypeTime) } - if value, ok := pvuo.mutation.LastRunAt(); ok { + if value, ok := _u.mutation.LastRunAt(); ok { _spec.SetField(projectversion.FieldLastRunAt, field.TypeTime, value) } - if pvuo.mutation.LastRunAtCleared() { + if _u.mutation.LastRunAtCleared() { _spec.ClearField(projectversion.FieldLastRunAt, field.TypeTime) } - if value, ok := pvuo.mutation.Latest(); ok { + if value, ok := _u.mutation.Latest(); ok { _spec.SetField(projectversion.FieldLatest, field.TypeBool, value) } - if pvuo.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -765,7 +765,7 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvuo.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -781,7 +781,7 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if pvuo.mutation.RunsCleared() { + if _u.mutation.RunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -794,7 +794,7 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvuo.mutation.RemovedRunsIDs(); len(nodes) > 0 && !pvuo.mutation.RunsCleared() { + if nodes := _u.mutation.RemovedRunsIDs(); len(nodes) > 0 && !_u.mutation.RunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -810,7 +810,7 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pvuo.mutation.RunsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.RunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -826,11 +826,11 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(pvuo.modifiers...) - _node = &ProjectVersion{config: pvuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &ProjectVersion{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, pvuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{projectversion.Label} } else if sqlgraph.IsConstraintError(err) { @@ -838,6 +838,6 @@ func (pvuo *ProjectVersionUpdateOne) sqlSave(ctx context.Context) (_node *Projec } return nil, err } - pvuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/referrer.go b/app/controlplane/pkg/data/ent/referrer.go index 50a713312..c00a72aea 100644 --- a/app/controlplane/pkg/data/ent/referrer.go +++ b/app/controlplane/pkg/data/ent/referrer.go @@ -101,7 +101,7 @@ func (*Referrer) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Referrer fields. -func (r *Referrer) assignValues(columns []string, values []any) error { +func (_m *Referrer) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -111,37 +111,37 @@ func (r *Referrer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - r.ID = *value + _m.ID = *value } case referrer.FieldDigest: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field digest", values[i]) } else if value.Valid { - r.Digest = value.String + _m.Digest = value.String } case referrer.FieldKind: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field kind", values[i]) } else if value.Valid { - r.Kind = value.String + _m.Kind = value.String } case referrer.FieldDownloadable: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field downloadable", values[i]) } else if value.Valid { - r.Downloadable = value.Bool + _m.Downloadable = value.Bool } case referrer.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - r.CreatedAt = value.Time + _m.CreatedAt = value.Time } case referrer.FieldMetadata: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field metadata", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &r.Metadata); err != nil { + if err := json.Unmarshal(*value, &_m.Metadata); err != nil { return fmt.Errorf("unmarshal field metadata: %w", err) } } @@ -149,12 +149,12 @@ func (r *Referrer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field annotations", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &r.Annotations); err != nil { + if err := json.Unmarshal(*value, &_m.Annotations); err != nil { return fmt.Errorf("unmarshal field annotations: %w", err) } } default: - r.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -162,65 +162,65 @@ func (r *Referrer) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Referrer. // This includes values selected through modifiers, order, etc. -func (r *Referrer) Value(name string) (ent.Value, error) { - return r.selectValues.Get(name) +func (_m *Referrer) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryReferredBy queries the "referred_by" edge of the Referrer entity. -func (r *Referrer) QueryReferredBy() *ReferrerQuery { - return NewReferrerClient(r.config).QueryReferredBy(r) +func (_m *Referrer) QueryReferredBy() *ReferrerQuery { + return NewReferrerClient(_m.config).QueryReferredBy(_m) } // QueryReferences queries the "references" edge of the Referrer entity. -func (r *Referrer) QueryReferences() *ReferrerQuery { - return NewReferrerClient(r.config).QueryReferences(r) +func (_m *Referrer) QueryReferences() *ReferrerQuery { + return NewReferrerClient(_m.config).QueryReferences(_m) } // QueryWorkflows queries the "workflows" edge of the Referrer entity. -func (r *Referrer) QueryWorkflows() *WorkflowQuery { - return NewReferrerClient(r.config).QueryWorkflows(r) +func (_m *Referrer) QueryWorkflows() *WorkflowQuery { + return NewReferrerClient(_m.config).QueryWorkflows(_m) } // Update returns a builder for updating this Referrer. // Note that you need to call Referrer.Unwrap() before calling this method if this Referrer // was returned from a transaction, and the transaction was committed or rolled back. -func (r *Referrer) Update() *ReferrerUpdateOne { - return NewReferrerClient(r.config).UpdateOne(r) +func (_m *Referrer) Update() *ReferrerUpdateOne { + return NewReferrerClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Referrer entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (r *Referrer) Unwrap() *Referrer { - _tx, ok := r.config.driver.(*txDriver) +func (_m *Referrer) Unwrap() *Referrer { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Referrer is not a transactional entity") } - r.config.driver = _tx.drv - return r + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (r *Referrer) String() string { +func (_m *Referrer) String() string { var builder strings.Builder builder.WriteString("Referrer(") - builder.WriteString(fmt.Sprintf("id=%v, ", r.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("digest=") - builder.WriteString(r.Digest) + builder.WriteString(_m.Digest) builder.WriteString(", ") builder.WriteString("kind=") - builder.WriteString(r.Kind) + builder.WriteString(_m.Kind) builder.WriteString(", ") builder.WriteString("downloadable=") - builder.WriteString(fmt.Sprintf("%v", r.Downloadable)) + builder.WriteString(fmt.Sprintf("%v", _m.Downloadable)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(r.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("metadata=") - builder.WriteString(fmt.Sprintf("%v", r.Metadata)) + builder.WriteString(fmt.Sprintf("%v", _m.Metadata)) builder.WriteString(", ") builder.WriteString("annotations=") - builder.WriteString(fmt.Sprintf("%v", r.Annotations)) + builder.WriteString(fmt.Sprintf("%v", _m.Annotations)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/referrer_create.go b/app/controlplane/pkg/data/ent/referrer_create.go index 02d006551..876037b65 100644 --- a/app/controlplane/pkg/data/ent/referrer_create.go +++ b/app/controlplane/pkg/data/ent/referrer_create.go @@ -26,122 +26,122 @@ type ReferrerCreate struct { } // SetDigest sets the "digest" field. -func (rc *ReferrerCreate) SetDigest(s string) *ReferrerCreate { - rc.mutation.SetDigest(s) - return rc +func (_c *ReferrerCreate) SetDigest(v string) *ReferrerCreate { + _c.mutation.SetDigest(v) + return _c } // SetKind sets the "kind" field. -func (rc *ReferrerCreate) SetKind(s string) *ReferrerCreate { - rc.mutation.SetKind(s) - return rc +func (_c *ReferrerCreate) SetKind(v string) *ReferrerCreate { + _c.mutation.SetKind(v) + return _c } // SetDownloadable sets the "downloadable" field. -func (rc *ReferrerCreate) SetDownloadable(b bool) *ReferrerCreate { - rc.mutation.SetDownloadable(b) - return rc +func (_c *ReferrerCreate) SetDownloadable(v bool) *ReferrerCreate { + _c.mutation.SetDownloadable(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (rc *ReferrerCreate) SetCreatedAt(t time.Time) *ReferrerCreate { - rc.mutation.SetCreatedAt(t) - return rc +func (_c *ReferrerCreate) SetCreatedAt(v time.Time) *ReferrerCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (rc *ReferrerCreate) SetNillableCreatedAt(t *time.Time) *ReferrerCreate { - if t != nil { - rc.SetCreatedAt(*t) +func (_c *ReferrerCreate) SetNillableCreatedAt(v *time.Time) *ReferrerCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return rc + return _c } // SetMetadata sets the "metadata" field. -func (rc *ReferrerCreate) SetMetadata(m map[string]string) *ReferrerCreate { - rc.mutation.SetMetadata(m) - return rc +func (_c *ReferrerCreate) SetMetadata(v map[string]string) *ReferrerCreate { + _c.mutation.SetMetadata(v) + return _c } // SetAnnotations sets the "annotations" field. -func (rc *ReferrerCreate) SetAnnotations(m map[string]string) *ReferrerCreate { - rc.mutation.SetAnnotations(m) - return rc +func (_c *ReferrerCreate) SetAnnotations(v map[string]string) *ReferrerCreate { + _c.mutation.SetAnnotations(v) + return _c } // SetID sets the "id" field. -func (rc *ReferrerCreate) SetID(u uuid.UUID) *ReferrerCreate { - rc.mutation.SetID(u) - return rc +func (_c *ReferrerCreate) SetID(v uuid.UUID) *ReferrerCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (rc *ReferrerCreate) SetNillableID(u *uuid.UUID) *ReferrerCreate { - if u != nil { - rc.SetID(*u) +func (_c *ReferrerCreate) SetNillableID(v *uuid.UUID) *ReferrerCreate { + if v != nil { + _c.SetID(*v) } - return rc + return _c } // AddReferredByIDs adds the "referred_by" edge to the Referrer entity by IDs. -func (rc *ReferrerCreate) AddReferredByIDs(ids ...uuid.UUID) *ReferrerCreate { - rc.mutation.AddReferredByIDs(ids...) - return rc +func (_c *ReferrerCreate) AddReferredByIDs(ids ...uuid.UUID) *ReferrerCreate { + _c.mutation.AddReferredByIDs(ids...) + return _c } // AddReferredBy adds the "referred_by" edges to the Referrer entity. -func (rc *ReferrerCreate) AddReferredBy(r ...*Referrer) *ReferrerCreate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_c *ReferrerCreate) AddReferredBy(v ...*Referrer) *ReferrerCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return rc.AddReferredByIDs(ids...) + return _c.AddReferredByIDs(ids...) } // AddReferenceIDs adds the "references" edge to the Referrer entity by IDs. -func (rc *ReferrerCreate) AddReferenceIDs(ids ...uuid.UUID) *ReferrerCreate { - rc.mutation.AddReferenceIDs(ids...) - return rc +func (_c *ReferrerCreate) AddReferenceIDs(ids ...uuid.UUID) *ReferrerCreate { + _c.mutation.AddReferenceIDs(ids...) + return _c } // AddReferences adds the "references" edges to the Referrer entity. -func (rc *ReferrerCreate) AddReferences(r ...*Referrer) *ReferrerCreate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_c *ReferrerCreate) AddReferences(v ...*Referrer) *ReferrerCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return rc.AddReferenceIDs(ids...) + return _c.AddReferenceIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (rc *ReferrerCreate) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerCreate { - rc.mutation.AddWorkflowIDs(ids...) - return rc +func (_c *ReferrerCreate) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerCreate { + _c.mutation.AddWorkflowIDs(ids...) + return _c } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (rc *ReferrerCreate) AddWorkflows(w ...*Workflow) *ReferrerCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *ReferrerCreate) AddWorkflows(v ...*Workflow) *ReferrerCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return rc.AddWorkflowIDs(ids...) + return _c.AddWorkflowIDs(ids...) } // Mutation returns the ReferrerMutation object of the builder. -func (rc *ReferrerCreate) Mutation() *ReferrerMutation { - return rc.mutation +func (_c *ReferrerCreate) Mutation() *ReferrerMutation { + return _c.mutation } // Save creates the Referrer in the database. -func (rc *ReferrerCreate) Save(ctx context.Context) (*Referrer, error) { - rc.defaults() - return withHooks(ctx, rc.sqlSave, rc.mutation, rc.hooks) +func (_c *ReferrerCreate) Save(ctx context.Context) (*Referrer, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (rc *ReferrerCreate) SaveX(ctx context.Context) *Referrer { - v, err := rc.Save(ctx) +func (_c *ReferrerCreate) SaveX(ctx context.Context) *Referrer { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -149,53 +149,53 @@ func (rc *ReferrerCreate) SaveX(ctx context.Context) *Referrer { } // Exec executes the query. -func (rc *ReferrerCreate) Exec(ctx context.Context) error { - _, err := rc.Save(ctx) +func (_c *ReferrerCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (rc *ReferrerCreate) ExecX(ctx context.Context) { - if err := rc.Exec(ctx); err != nil { +func (_c *ReferrerCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (rc *ReferrerCreate) defaults() { - if _, ok := rc.mutation.CreatedAt(); !ok { +func (_c *ReferrerCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := referrer.DefaultCreatedAt() - rc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := rc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := referrer.DefaultID() - rc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (rc *ReferrerCreate) check() error { - if _, ok := rc.mutation.Digest(); !ok { +func (_c *ReferrerCreate) check() error { + if _, ok := _c.mutation.Digest(); !ok { return &ValidationError{Name: "digest", err: errors.New(`ent: missing required field "Referrer.digest"`)} } - if _, ok := rc.mutation.Kind(); !ok { + if _, ok := _c.mutation.Kind(); !ok { return &ValidationError{Name: "kind", err: errors.New(`ent: missing required field "Referrer.kind"`)} } - if _, ok := rc.mutation.Downloadable(); !ok { + if _, ok := _c.mutation.Downloadable(); !ok { return &ValidationError{Name: "downloadable", err: errors.New(`ent: missing required field "Referrer.downloadable"`)} } - if _, ok := rc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Referrer.created_at"`)} } return nil } -func (rc *ReferrerCreate) sqlSave(ctx context.Context) (*Referrer, error) { - if err := rc.check(); err != nil { +func (_c *ReferrerCreate) sqlSave(ctx context.Context) (*Referrer, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := rc.createSpec() - if err := sqlgraph.CreateNode(ctx, rc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -208,46 +208,46 @@ func (rc *ReferrerCreate) sqlSave(ctx context.Context) (*Referrer, error) { return nil, err } } - rc.mutation.id = &_node.ID - rc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (rc *ReferrerCreate) createSpec() (*Referrer, *sqlgraph.CreateSpec) { +func (_c *ReferrerCreate) createSpec() (*Referrer, *sqlgraph.CreateSpec) { var ( - _node = &Referrer{config: rc.config} + _node = &Referrer{config: _c.config} _spec = sqlgraph.NewCreateSpec(referrer.Table, sqlgraph.NewFieldSpec(referrer.FieldID, field.TypeUUID)) ) - _spec.OnConflict = rc.conflict - if id, ok := rc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := rc.mutation.Digest(); ok { + if value, ok := _c.mutation.Digest(); ok { _spec.SetField(referrer.FieldDigest, field.TypeString, value) _node.Digest = value } - if value, ok := rc.mutation.Kind(); ok { + if value, ok := _c.mutation.Kind(); ok { _spec.SetField(referrer.FieldKind, field.TypeString, value) _node.Kind = value } - if value, ok := rc.mutation.Downloadable(); ok { + if value, ok := _c.mutation.Downloadable(); ok { _spec.SetField(referrer.FieldDownloadable, field.TypeBool, value) _node.Downloadable = value } - if value, ok := rc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(referrer.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := rc.mutation.Metadata(); ok { + if value, ok := _c.mutation.Metadata(); ok { _spec.SetField(referrer.FieldMetadata, field.TypeJSON, value) _node.Metadata = value } - if value, ok := rc.mutation.Annotations(); ok { + if value, ok := _c.mutation.Annotations(); ok { _spec.SetField(referrer.FieldAnnotations, field.TypeJSON, value) _node.Annotations = value } - if nodes := rc.mutation.ReferredByIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ReferredByIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -263,7 +263,7 @@ func (rc *ReferrerCreate) createSpec() (*Referrer, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := rc.mutation.ReferencesIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ReferencesIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -279,7 +279,7 @@ func (rc *ReferrerCreate) createSpec() (*Referrer, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := rc.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -314,10 +314,10 @@ func (rc *ReferrerCreate) createSpec() (*Referrer, *sqlgraph.CreateSpec) { // SetDigest(v+v). // }). // Exec(ctx) -func (rc *ReferrerCreate) OnConflict(opts ...sql.ConflictOption) *ReferrerUpsertOne { - rc.conflict = opts +func (_c *ReferrerCreate) OnConflict(opts ...sql.ConflictOption) *ReferrerUpsertOne { + _c.conflict = opts return &ReferrerUpsertOne{ - create: rc, + create: _c, } } @@ -327,10 +327,10 @@ func (rc *ReferrerCreate) OnConflict(opts ...sql.ConflictOption) *ReferrerUpsert // client.Referrer.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (rc *ReferrerCreate) OnConflictColumns(columns ...string) *ReferrerUpsertOne { - rc.conflict = append(rc.conflict, sql.ConflictColumns(columns...)) +func (_c *ReferrerCreate) OnConflictColumns(columns ...string) *ReferrerUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ReferrerUpsertOne{ - create: rc, + create: _c, } } @@ -460,16 +460,16 @@ type ReferrerCreateBulk struct { } // Save creates the Referrer entities in the database. -func (rcb *ReferrerCreateBulk) Save(ctx context.Context) ([]*Referrer, error) { - if rcb.err != nil { - return nil, rcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(rcb.builders)) - nodes := make([]*Referrer, len(rcb.builders)) - mutators := make([]Mutator, len(rcb.builders)) - for i := range rcb.builders { +func (_c *ReferrerCreateBulk) Save(ctx context.Context) ([]*Referrer, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Referrer, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := rcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*ReferrerMutation) @@ -483,12 +483,12 @@ func (rcb *ReferrerCreateBulk) Save(ctx context.Context) ([]*Referrer, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, rcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = rcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, rcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -508,7 +508,7 @@ func (rcb *ReferrerCreateBulk) Save(ctx context.Context) ([]*Referrer, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, rcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -516,8 +516,8 @@ func (rcb *ReferrerCreateBulk) Save(ctx context.Context) ([]*Referrer, error) { } // SaveX is like Save, but panics if an error occurs. -func (rcb *ReferrerCreateBulk) SaveX(ctx context.Context) []*Referrer { - v, err := rcb.Save(ctx) +func (_c *ReferrerCreateBulk) SaveX(ctx context.Context) []*Referrer { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -525,14 +525,14 @@ func (rcb *ReferrerCreateBulk) SaveX(ctx context.Context) []*Referrer { } // Exec executes the query. -func (rcb *ReferrerCreateBulk) Exec(ctx context.Context) error { - _, err := rcb.Save(ctx) +func (_c *ReferrerCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (rcb *ReferrerCreateBulk) ExecX(ctx context.Context) { - if err := rcb.Exec(ctx); err != nil { +func (_c *ReferrerCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -552,10 +552,10 @@ func (rcb *ReferrerCreateBulk) ExecX(ctx context.Context) { // SetDigest(v+v). // }). // Exec(ctx) -func (rcb *ReferrerCreateBulk) OnConflict(opts ...sql.ConflictOption) *ReferrerUpsertBulk { - rcb.conflict = opts +func (_c *ReferrerCreateBulk) OnConflict(opts ...sql.ConflictOption) *ReferrerUpsertBulk { + _c.conflict = opts return &ReferrerUpsertBulk{ - create: rcb, + create: _c, } } @@ -565,10 +565,10 @@ func (rcb *ReferrerCreateBulk) OnConflict(opts ...sql.ConflictOption) *ReferrerU // client.Referrer.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (rcb *ReferrerCreateBulk) OnConflictColumns(columns ...string) *ReferrerUpsertBulk { - rcb.conflict = append(rcb.conflict, sql.ConflictColumns(columns...)) +func (_c *ReferrerCreateBulk) OnConflictColumns(columns ...string) *ReferrerUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &ReferrerUpsertBulk{ - create: rcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/referrer_delete.go b/app/controlplane/pkg/data/ent/referrer_delete.go index 560054075..3733667be 100644 --- a/app/controlplane/pkg/data/ent/referrer_delete.go +++ b/app/controlplane/pkg/data/ent/referrer_delete.go @@ -20,56 +20,56 @@ type ReferrerDelete struct { } // Where appends a list predicates to the ReferrerDelete builder. -func (rd *ReferrerDelete) Where(ps ...predicate.Referrer) *ReferrerDelete { - rd.mutation.Where(ps...) - return rd +func (_d *ReferrerDelete) Where(ps ...predicate.Referrer) *ReferrerDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (rd *ReferrerDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, rd.sqlExec, rd.mutation, rd.hooks) +func (_d *ReferrerDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (rd *ReferrerDelete) ExecX(ctx context.Context) int { - n, err := rd.Exec(ctx) +func (_d *ReferrerDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (rd *ReferrerDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *ReferrerDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(referrer.Table, sqlgraph.NewFieldSpec(referrer.FieldID, field.TypeUUID)) - if ps := rd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, rd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - rd.mutation.done = true + _d.mutation.done = true return affected, err } // ReferrerDeleteOne is the builder for deleting a single Referrer entity. type ReferrerDeleteOne struct { - rd *ReferrerDelete + _d *ReferrerDelete } // Where appends a list predicates to the ReferrerDelete builder. -func (rdo *ReferrerDeleteOne) Where(ps ...predicate.Referrer) *ReferrerDeleteOne { - rdo.rd.mutation.Where(ps...) - return rdo +func (_d *ReferrerDeleteOne) Where(ps ...predicate.Referrer) *ReferrerDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (rdo *ReferrerDeleteOne) Exec(ctx context.Context) error { - n, err := rdo.rd.Exec(ctx) +func (_d *ReferrerDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (rdo *ReferrerDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (rdo *ReferrerDeleteOne) ExecX(ctx context.Context) { - if err := rdo.Exec(ctx); err != nil { +func (_d *ReferrerDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/referrer_query.go b/app/controlplane/pkg/data/ent/referrer_query.go index 305c165ca..e456bfb1e 100644 --- a/app/controlplane/pkg/data/ent/referrer_query.go +++ b/app/controlplane/pkg/data/ent/referrer_query.go @@ -36,44 +36,44 @@ type ReferrerQuery struct { } // Where adds a new predicate for the ReferrerQuery builder. -func (rq *ReferrerQuery) Where(ps ...predicate.Referrer) *ReferrerQuery { - rq.predicates = append(rq.predicates, ps...) - return rq +func (_q *ReferrerQuery) Where(ps ...predicate.Referrer) *ReferrerQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (rq *ReferrerQuery) Limit(limit int) *ReferrerQuery { - rq.ctx.Limit = &limit - return rq +func (_q *ReferrerQuery) Limit(limit int) *ReferrerQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (rq *ReferrerQuery) Offset(offset int) *ReferrerQuery { - rq.ctx.Offset = &offset - return rq +func (_q *ReferrerQuery) Offset(offset int) *ReferrerQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (rq *ReferrerQuery) Unique(unique bool) *ReferrerQuery { - rq.ctx.Unique = &unique - return rq +func (_q *ReferrerQuery) Unique(unique bool) *ReferrerQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (rq *ReferrerQuery) Order(o ...referrer.OrderOption) *ReferrerQuery { - rq.order = append(rq.order, o...) - return rq +func (_q *ReferrerQuery) Order(o ...referrer.OrderOption) *ReferrerQuery { + _q.order = append(_q.order, o...) + return _q } // QueryReferredBy chains the current query on the "referred_by" edge. -func (rq *ReferrerQuery) QueryReferredBy() *ReferrerQuery { - query := (&ReferrerClient{config: rq.config}).Query() +func (_q *ReferrerQuery) QueryReferredBy() *ReferrerQuery { + query := (&ReferrerClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := rq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := rq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -82,20 +82,20 @@ func (rq *ReferrerQuery) QueryReferredBy() *ReferrerQuery { sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, referrer.ReferredByTable, referrer.ReferredByPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(rq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryReferences chains the current query on the "references" edge. -func (rq *ReferrerQuery) QueryReferences() *ReferrerQuery { - query := (&ReferrerClient{config: rq.config}).Query() +func (_q *ReferrerQuery) QueryReferences() *ReferrerQuery { + query := (&ReferrerClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := rq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := rq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -104,20 +104,20 @@ func (rq *ReferrerQuery) QueryReferences() *ReferrerQuery { sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, referrer.ReferencesTable, referrer.ReferencesPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(rq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflows chains the current query on the "workflows" edge. -func (rq *ReferrerQuery) QueryWorkflows() *WorkflowQuery { - query := (&WorkflowClient{config: rq.config}).Query() +func (_q *ReferrerQuery) QueryWorkflows() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := rq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := rq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -126,7 +126,7 @@ func (rq *ReferrerQuery) QueryWorkflows() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, referrer.WorkflowsTable, referrer.WorkflowsPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(rq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -134,8 +134,8 @@ func (rq *ReferrerQuery) QueryWorkflows() *WorkflowQuery { // First returns the first Referrer entity from the query. // Returns a *NotFoundError when no Referrer was found. -func (rq *ReferrerQuery) First(ctx context.Context) (*Referrer, error) { - nodes, err := rq.Limit(1).All(setContextOp(ctx, rq.ctx, ent.OpQueryFirst)) +func (_q *ReferrerQuery) First(ctx context.Context) (*Referrer, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -146,8 +146,8 @@ func (rq *ReferrerQuery) First(ctx context.Context) (*Referrer, error) { } // FirstX is like First, but panics if an error occurs. -func (rq *ReferrerQuery) FirstX(ctx context.Context) *Referrer { - node, err := rq.First(ctx) +func (_q *ReferrerQuery) FirstX(ctx context.Context) *Referrer { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -156,9 +156,9 @@ func (rq *ReferrerQuery) FirstX(ctx context.Context) *Referrer { // FirstID returns the first Referrer ID from the query. // Returns a *NotFoundError when no Referrer ID was found. -func (rq *ReferrerQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ReferrerQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rq.Limit(1).IDs(setContextOp(ctx, rq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -169,8 +169,8 @@ func (rq *ReferrerQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) } // FirstIDX is like FirstID, but panics if an error occurs. -func (rq *ReferrerQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := rq.FirstID(ctx) +func (_q *ReferrerQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -180,8 +180,8 @@ func (rq *ReferrerQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Referrer entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Referrer entity is found. // Returns a *NotFoundError when no Referrer entities are found. -func (rq *ReferrerQuery) Only(ctx context.Context) (*Referrer, error) { - nodes, err := rq.Limit(2).All(setContextOp(ctx, rq.ctx, ent.OpQueryOnly)) +func (_q *ReferrerQuery) Only(ctx context.Context) (*Referrer, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -196,8 +196,8 @@ func (rq *ReferrerQuery) Only(ctx context.Context) (*Referrer, error) { } // OnlyX is like Only, but panics if an error occurs. -func (rq *ReferrerQuery) OnlyX(ctx context.Context) *Referrer { - node, err := rq.Only(ctx) +func (_q *ReferrerQuery) OnlyX(ctx context.Context) *Referrer { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -207,9 +207,9 @@ func (rq *ReferrerQuery) OnlyX(ctx context.Context) *Referrer { // OnlyID is like Only, but returns the only Referrer ID in the query. // Returns a *NotSingularError when more than one Referrer ID is found. // Returns a *NotFoundError when no entities are found. -func (rq *ReferrerQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *ReferrerQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rq.Limit(2).IDs(setContextOp(ctx, rq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -224,8 +224,8 @@ func (rq *ReferrerQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (rq *ReferrerQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := rq.OnlyID(ctx) +func (_q *ReferrerQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -233,18 +233,18 @@ func (rq *ReferrerQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Referrers. -func (rq *ReferrerQuery) All(ctx context.Context) ([]*Referrer, error) { - ctx = setContextOp(ctx, rq.ctx, ent.OpQueryAll) - if err := rq.prepareQuery(ctx); err != nil { +func (_q *ReferrerQuery) All(ctx context.Context) ([]*Referrer, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Referrer, *ReferrerQuery]() - return withInterceptors[[]*Referrer](ctx, rq, qr, rq.inters) + return withInterceptors[[]*Referrer](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (rq *ReferrerQuery) AllX(ctx context.Context) []*Referrer { - nodes, err := rq.All(ctx) +func (_q *ReferrerQuery) AllX(ctx context.Context) []*Referrer { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -252,20 +252,20 @@ func (rq *ReferrerQuery) AllX(ctx context.Context) []*Referrer { } // IDs executes the query and returns a list of Referrer IDs. -func (rq *ReferrerQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if rq.ctx.Unique == nil && rq.path != nil { - rq.Unique(true) +func (_q *ReferrerQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, rq.ctx, ent.OpQueryIDs) - if err = rq.Select(referrer.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(referrer.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (rq *ReferrerQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := rq.IDs(ctx) +func (_q *ReferrerQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -273,17 +273,17 @@ func (rq *ReferrerQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (rq *ReferrerQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, rq.ctx, ent.OpQueryCount) - if err := rq.prepareQuery(ctx); err != nil { +func (_q *ReferrerQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, rq, querierCount[*ReferrerQuery](), rq.inters) + return withInterceptors[int](ctx, _q, querierCount[*ReferrerQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (rq *ReferrerQuery) CountX(ctx context.Context) int { - count, err := rq.Count(ctx) +func (_q *ReferrerQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -291,9 +291,9 @@ func (rq *ReferrerQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (rq *ReferrerQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, rq.ctx, ent.OpQueryExist) - switch _, err := rq.FirstID(ctx); { +func (_q *ReferrerQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -304,8 +304,8 @@ func (rq *ReferrerQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (rq *ReferrerQuery) ExistX(ctx context.Context) bool { - exist, err := rq.Exist(ctx) +func (_q *ReferrerQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -314,57 +314,57 @@ func (rq *ReferrerQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the ReferrerQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (rq *ReferrerQuery) Clone() *ReferrerQuery { - if rq == nil { +func (_q *ReferrerQuery) Clone() *ReferrerQuery { + if _q == nil { return nil } return &ReferrerQuery{ - config: rq.config, - ctx: rq.ctx.Clone(), - order: append([]referrer.OrderOption{}, rq.order...), - inters: append([]Interceptor{}, rq.inters...), - predicates: append([]predicate.Referrer{}, rq.predicates...), - withReferredBy: rq.withReferredBy.Clone(), - withReferences: rq.withReferences.Clone(), - withWorkflows: rq.withWorkflows.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]referrer.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Referrer{}, _q.predicates...), + withReferredBy: _q.withReferredBy.Clone(), + withReferences: _q.withReferences.Clone(), + withWorkflows: _q.withWorkflows.Clone(), // clone intermediate query. - sql: rq.sql.Clone(), - path: rq.path, - modifiers: append([]func(*sql.Selector){}, rq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithReferredBy tells the query-builder to eager-load the nodes that are connected to // the "referred_by" edge. The optional arguments are used to configure the query builder of the edge. -func (rq *ReferrerQuery) WithReferredBy(opts ...func(*ReferrerQuery)) *ReferrerQuery { - query := (&ReferrerClient{config: rq.config}).Query() +func (_q *ReferrerQuery) WithReferredBy(opts ...func(*ReferrerQuery)) *ReferrerQuery { + query := (&ReferrerClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - rq.withReferredBy = query - return rq + _q.withReferredBy = query + return _q } // WithReferences tells the query-builder to eager-load the nodes that are connected to // the "references" edge. The optional arguments are used to configure the query builder of the edge. -func (rq *ReferrerQuery) WithReferences(opts ...func(*ReferrerQuery)) *ReferrerQuery { - query := (&ReferrerClient{config: rq.config}).Query() +func (_q *ReferrerQuery) WithReferences(opts ...func(*ReferrerQuery)) *ReferrerQuery { + query := (&ReferrerClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - rq.withReferences = query - return rq + _q.withReferences = query + return _q } // WithWorkflows tells the query-builder to eager-load the nodes that are connected to // the "workflows" edge. The optional arguments are used to configure the query builder of the edge. -func (rq *ReferrerQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *ReferrerQuery { - query := (&WorkflowClient{config: rq.config}).Query() +func (_q *ReferrerQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *ReferrerQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - rq.withWorkflows = query - return rq + _q.withWorkflows = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -381,10 +381,10 @@ func (rq *ReferrerQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *ReferrerQu // GroupBy(referrer.FieldDigest). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (rq *ReferrerQuery) GroupBy(field string, fields ...string) *ReferrerGroupBy { - rq.ctx.Fields = append([]string{field}, fields...) - grbuild := &ReferrerGroupBy{build: rq} - grbuild.flds = &rq.ctx.Fields +func (_q *ReferrerQuery) GroupBy(field string, fields ...string) *ReferrerGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ReferrerGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = referrer.Label grbuild.scan = grbuild.Scan return grbuild @@ -402,92 +402,92 @@ func (rq *ReferrerQuery) GroupBy(field string, fields ...string) *ReferrerGroupB // client.Referrer.Query(). // Select(referrer.FieldDigest). // Scan(ctx, &v) -func (rq *ReferrerQuery) Select(fields ...string) *ReferrerSelect { - rq.ctx.Fields = append(rq.ctx.Fields, fields...) - sbuild := &ReferrerSelect{ReferrerQuery: rq} +func (_q *ReferrerQuery) Select(fields ...string) *ReferrerSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ReferrerSelect{ReferrerQuery: _q} sbuild.label = referrer.Label - sbuild.flds, sbuild.scan = &rq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a ReferrerSelect configured with the given aggregations. -func (rq *ReferrerQuery) Aggregate(fns ...AggregateFunc) *ReferrerSelect { - return rq.Select().Aggregate(fns...) +func (_q *ReferrerQuery) Aggregate(fns ...AggregateFunc) *ReferrerSelect { + return _q.Select().Aggregate(fns...) } -func (rq *ReferrerQuery) prepareQuery(ctx context.Context) error { - for _, inter := range rq.inters { +func (_q *ReferrerQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, rq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range rq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !referrer.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if rq.path != nil { - prev, err := rq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - rq.sql = prev + _q.sql = prev } return nil } -func (rq *ReferrerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Referrer, error) { +func (_q *ReferrerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Referrer, error) { var ( nodes = []*Referrer{} - _spec = rq.querySpec() + _spec = _q.querySpec() loadedTypes = [3]bool{ - rq.withReferredBy != nil, - rq.withReferences != nil, - rq.withWorkflows != nil, + _q.withReferredBy != nil, + _q.withReferences != nil, + _q.withWorkflows != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*Referrer).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Referrer{config: rq.config} + node := &Referrer{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(rq.modifiers) > 0 { - _spec.Modifiers = rq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, rq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := rq.withReferredBy; query != nil { - if err := rq.loadReferredBy(ctx, query, nodes, + if query := _q.withReferredBy; query != nil { + if err := _q.loadReferredBy(ctx, query, nodes, func(n *Referrer) { n.Edges.ReferredBy = []*Referrer{} }, func(n *Referrer, e *Referrer) { n.Edges.ReferredBy = append(n.Edges.ReferredBy, e) }); err != nil { return nil, err } } - if query := rq.withReferences; query != nil { - if err := rq.loadReferences(ctx, query, nodes, + if query := _q.withReferences; query != nil { + if err := _q.loadReferences(ctx, query, nodes, func(n *Referrer) { n.Edges.References = []*Referrer{} }, func(n *Referrer, e *Referrer) { n.Edges.References = append(n.Edges.References, e) }); err != nil { return nil, err } } - if query := rq.withWorkflows; query != nil { - if err := rq.loadWorkflows(ctx, query, nodes, + if query := _q.withWorkflows; query != nil { + if err := _q.loadWorkflows(ctx, query, nodes, func(n *Referrer) { n.Edges.Workflows = []*Workflow{} }, func(n *Referrer, e *Workflow) { n.Edges.Workflows = append(n.Edges.Workflows, e) }); err != nil { return nil, err @@ -496,7 +496,7 @@ func (rq *ReferrerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Ref return nodes, nil } -func (rq *ReferrerQuery) loadReferredBy(ctx context.Context, query *ReferrerQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Referrer)) error { +func (_q *ReferrerQuery) loadReferredBy(ctx context.Context, query *ReferrerQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Referrer)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*Referrer) nids := make(map[uuid.UUID]map[*Referrer]struct{}) @@ -557,7 +557,7 @@ func (rq *ReferrerQuery) loadReferredBy(ctx context.Context, query *ReferrerQuer } return nil } -func (rq *ReferrerQuery) loadReferences(ctx context.Context, query *ReferrerQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Referrer)) error { +func (_q *ReferrerQuery) loadReferences(ctx context.Context, query *ReferrerQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Referrer)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*Referrer) nids := make(map[uuid.UUID]map[*Referrer]struct{}) @@ -618,7 +618,7 @@ func (rq *ReferrerQuery) loadReferences(ctx context.Context, query *ReferrerQuer } return nil } -func (rq *ReferrerQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Workflow)) error { +func (_q *ReferrerQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*Referrer, init func(*Referrer), assign func(*Referrer, *Workflow)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*Referrer) nids := make(map[uuid.UUID]map[*Referrer]struct{}) @@ -680,27 +680,27 @@ func (rq *ReferrerQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery return nil } -func (rq *ReferrerQuery) sqlCount(ctx context.Context) (int, error) { - _spec := rq.querySpec() - if len(rq.modifiers) > 0 { - _spec.Modifiers = rq.modifiers +func (_q *ReferrerQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = rq.ctx.Fields - if len(rq.ctx.Fields) > 0 { - _spec.Unique = rq.ctx.Unique != nil && *rq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, rq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (rq *ReferrerQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *ReferrerQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(referrer.Table, referrer.Columns, sqlgraph.NewFieldSpec(referrer.FieldID, field.TypeUUID)) - _spec.From = rq.sql - if unique := rq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if rq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := rq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, referrer.FieldID) for i := range fields { @@ -709,20 +709,20 @@ func (rq *ReferrerQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := rq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := rq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := rq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := rq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -732,36 +732,36 @@ func (rq *ReferrerQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (rq *ReferrerQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(rq.driver.Dialect()) +func (_q *ReferrerQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(referrer.Table) - columns := rq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = referrer.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if rq.sql != nil { - selector = rq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if rq.ctx.Unique != nil && *rq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range rq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range rq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range rq.order { + for _, p := range _q.order { p(selector) } - if offset := rq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := rq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -770,33 +770,33 @@ func (rq *ReferrerQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (rq *ReferrerQuery) ForUpdate(opts ...sql.LockOption) *ReferrerQuery { - if rq.driver.Dialect() == dialect.Postgres { - rq.Unique(false) +func (_q *ReferrerQuery) ForUpdate(opts ...sql.LockOption) *ReferrerQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - rq.modifiers = append(rq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return rq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (rq *ReferrerQuery) ForShare(opts ...sql.LockOption) *ReferrerQuery { - if rq.driver.Dialect() == dialect.Postgres { - rq.Unique(false) +func (_q *ReferrerQuery) ForShare(opts ...sql.LockOption) *ReferrerQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - rq.modifiers = append(rq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return rq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (rq *ReferrerQuery) Modify(modifiers ...func(s *sql.Selector)) *ReferrerSelect { - rq.modifiers = append(rq.modifiers, modifiers...) - return rq.Select() +func (_q *ReferrerQuery) Modify(modifiers ...func(s *sql.Selector)) *ReferrerSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // ReferrerGroupBy is the group-by builder for Referrer entities. @@ -806,41 +806,41 @@ type ReferrerGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (rgb *ReferrerGroupBy) Aggregate(fns ...AggregateFunc) *ReferrerGroupBy { - rgb.fns = append(rgb.fns, fns...) - return rgb +func (_g *ReferrerGroupBy) Aggregate(fns ...AggregateFunc) *ReferrerGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (rgb *ReferrerGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, rgb.build.ctx, ent.OpQueryGroupBy) - if err := rgb.build.prepareQuery(ctx); err != nil { +func (_g *ReferrerGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ReferrerQuery, *ReferrerGroupBy](ctx, rgb.build, rgb, rgb.build.inters, v) + return scanWithInterceptors[*ReferrerQuery, *ReferrerGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (rgb *ReferrerGroupBy) sqlScan(ctx context.Context, root *ReferrerQuery, v any) error { +func (_g *ReferrerGroupBy) sqlScan(ctx context.Context, root *ReferrerQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(rgb.fns)) - for _, fn := range rgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*rgb.flds)+len(rgb.fns)) - for _, f := range *rgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*rgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := rgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -854,27 +854,27 @@ type ReferrerSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (rs *ReferrerSelect) Aggregate(fns ...AggregateFunc) *ReferrerSelect { - rs.fns = append(rs.fns, fns...) - return rs +func (_s *ReferrerSelect) Aggregate(fns ...AggregateFunc) *ReferrerSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (rs *ReferrerSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, rs.ctx, ent.OpQuerySelect) - if err := rs.prepareQuery(ctx); err != nil { +func (_s *ReferrerSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*ReferrerQuery, *ReferrerSelect](ctx, rs.ReferrerQuery, rs, rs.inters, v) + return scanWithInterceptors[*ReferrerQuery, *ReferrerSelect](ctx, _s.ReferrerQuery, _s, _s.inters, v) } -func (rs *ReferrerSelect) sqlScan(ctx context.Context, root *ReferrerQuery, v any) error { +func (_s *ReferrerSelect) sqlScan(ctx context.Context, root *ReferrerQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(rs.fns)) - for _, fn := range rs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*rs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -882,7 +882,7 @@ func (rs *ReferrerSelect) sqlScan(ctx context.Context, root *ReferrerQuery, v an } rows := &sql.Rows{} query, args := selector.Query() - if err := rs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -890,7 +890,7 @@ func (rs *ReferrerSelect) sqlScan(ctx context.Context, root *ReferrerQuery, v an } // Modify adds a query modifier for attaching custom logic to queries. -func (rs *ReferrerSelect) Modify(modifiers ...func(s *sql.Selector)) *ReferrerSelect { - rs.modifiers = append(rs.modifiers, modifiers...) - return rs +func (_s *ReferrerSelect) Modify(modifiers ...func(s *sql.Selector)) *ReferrerSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/referrer_update.go b/app/controlplane/pkg/data/ent/referrer_update.go index b0cf60710..64dc92c0d 100644 --- a/app/controlplane/pkg/data/ent/referrer_update.go +++ b/app/controlplane/pkg/data/ent/referrer_update.go @@ -25,96 +25,96 @@ type ReferrerUpdate struct { } // Where appends a list predicates to the ReferrerUpdate builder. -func (ru *ReferrerUpdate) Where(ps ...predicate.Referrer) *ReferrerUpdate { - ru.mutation.Where(ps...) - return ru +func (_u *ReferrerUpdate) Where(ps ...predicate.Referrer) *ReferrerUpdate { + _u.mutation.Where(ps...) + return _u } // AddReferenceIDs adds the "references" edge to the Referrer entity by IDs. -func (ru *ReferrerUpdate) AddReferenceIDs(ids ...uuid.UUID) *ReferrerUpdate { - ru.mutation.AddReferenceIDs(ids...) - return ru +func (_u *ReferrerUpdate) AddReferenceIDs(ids ...uuid.UUID) *ReferrerUpdate { + _u.mutation.AddReferenceIDs(ids...) + return _u } // AddReferences adds the "references" edges to the Referrer entity. -func (ru *ReferrerUpdate) AddReferences(r ...*Referrer) *ReferrerUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *ReferrerUpdate) AddReferences(v ...*Referrer) *ReferrerUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ru.AddReferenceIDs(ids...) + return _u.AddReferenceIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (ru *ReferrerUpdate) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdate { - ru.mutation.AddWorkflowIDs(ids...) - return ru +func (_u *ReferrerUpdate) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdate { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (ru *ReferrerUpdate) AddWorkflows(w ...*Workflow) *ReferrerUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ReferrerUpdate) AddWorkflows(v ...*Workflow) *ReferrerUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ru.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // Mutation returns the ReferrerMutation object of the builder. -func (ru *ReferrerUpdate) Mutation() *ReferrerMutation { - return ru.mutation +func (_u *ReferrerUpdate) Mutation() *ReferrerMutation { + return _u.mutation } // ClearReferences clears all "references" edges to the Referrer entity. -func (ru *ReferrerUpdate) ClearReferences() *ReferrerUpdate { - ru.mutation.ClearReferences() - return ru +func (_u *ReferrerUpdate) ClearReferences() *ReferrerUpdate { + _u.mutation.ClearReferences() + return _u } // RemoveReferenceIDs removes the "references" edge to Referrer entities by IDs. -func (ru *ReferrerUpdate) RemoveReferenceIDs(ids ...uuid.UUID) *ReferrerUpdate { - ru.mutation.RemoveReferenceIDs(ids...) - return ru +func (_u *ReferrerUpdate) RemoveReferenceIDs(ids ...uuid.UUID) *ReferrerUpdate { + _u.mutation.RemoveReferenceIDs(ids...) + return _u } // RemoveReferences removes "references" edges to Referrer entities. -func (ru *ReferrerUpdate) RemoveReferences(r ...*Referrer) *ReferrerUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *ReferrerUpdate) RemoveReferences(v ...*Referrer) *ReferrerUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ru.RemoveReferenceIDs(ids...) + return _u.RemoveReferenceIDs(ids...) } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (ru *ReferrerUpdate) ClearWorkflows() *ReferrerUpdate { - ru.mutation.ClearWorkflows() - return ru +func (_u *ReferrerUpdate) ClearWorkflows() *ReferrerUpdate { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (ru *ReferrerUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdate { - ru.mutation.RemoveWorkflowIDs(ids...) - return ru +func (_u *ReferrerUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdate { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (ru *ReferrerUpdate) RemoveWorkflows(w ...*Workflow) *ReferrerUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ReferrerUpdate) RemoveWorkflows(v ...*Workflow) *ReferrerUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ru.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (ru *ReferrerUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, ru.sqlSave, ru.mutation, ru.hooks) +func (_u *ReferrerUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (ru *ReferrerUpdate) SaveX(ctx context.Context) int { - affected, err := ru.Save(ctx) +func (_u *ReferrerUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -122,40 +122,40 @@ func (ru *ReferrerUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (ru *ReferrerUpdate) Exec(ctx context.Context) error { - _, err := ru.Save(ctx) +func (_u *ReferrerUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ru *ReferrerUpdate) ExecX(ctx context.Context) { - if err := ru.Exec(ctx); err != nil { +func (_u *ReferrerUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (ru *ReferrerUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ReferrerUpdate { - ru.modifiers = append(ru.modifiers, modifiers...) - return ru +func (_u *ReferrerUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ReferrerUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { +func (_u *ReferrerUpdate) sqlSave(ctx context.Context) (_node int, err error) { _spec := sqlgraph.NewUpdateSpec(referrer.Table, referrer.Columns, sqlgraph.NewFieldSpec(referrer.FieldID, field.TypeUUID)) - if ps := ru.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if ru.mutation.MetadataCleared() { + if _u.mutation.MetadataCleared() { _spec.ClearField(referrer.FieldMetadata, field.TypeJSON) } - if ru.mutation.AnnotationsCleared() { + if _u.mutation.AnnotationsCleared() { _spec.ClearField(referrer.FieldAnnotations, field.TypeJSON) } - if ru.mutation.ReferencesCleared() { + if _u.mutation.ReferencesCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -168,7 +168,7 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ru.mutation.RemovedReferencesIDs(); len(nodes) > 0 && !ru.mutation.ReferencesCleared() { + if nodes := _u.mutation.RemovedReferencesIDs(); len(nodes) > 0 && !_u.mutation.ReferencesCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -184,7 +184,7 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ru.mutation.ReferencesIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ReferencesIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -200,7 +200,7 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ru.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -213,7 +213,7 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ru.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !ru.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -229,7 +229,7 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ru.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -245,8 +245,8 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(ru.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, ru.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{referrer.Label} } else if sqlgraph.IsConstraintError(err) { @@ -254,8 +254,8 @@ func (ru *ReferrerUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - ru.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // ReferrerUpdateOne is the builder for updating a single Referrer entity. @@ -268,103 +268,103 @@ type ReferrerUpdateOne struct { } // AddReferenceIDs adds the "references" edge to the Referrer entity by IDs. -func (ruo *ReferrerUpdateOne) AddReferenceIDs(ids ...uuid.UUID) *ReferrerUpdateOne { - ruo.mutation.AddReferenceIDs(ids...) - return ruo +func (_u *ReferrerUpdateOne) AddReferenceIDs(ids ...uuid.UUID) *ReferrerUpdateOne { + _u.mutation.AddReferenceIDs(ids...) + return _u } // AddReferences adds the "references" edges to the Referrer entity. -func (ruo *ReferrerUpdateOne) AddReferences(r ...*Referrer) *ReferrerUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *ReferrerUpdateOne) AddReferences(v ...*Referrer) *ReferrerUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ruo.AddReferenceIDs(ids...) + return _u.AddReferenceIDs(ids...) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (ruo *ReferrerUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdateOne { - ruo.mutation.AddWorkflowIDs(ids...) - return ruo +func (_u *ReferrerUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdateOne { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (ruo *ReferrerUpdateOne) AddWorkflows(w ...*Workflow) *ReferrerUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ReferrerUpdateOne) AddWorkflows(v ...*Workflow) *ReferrerUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ruo.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // Mutation returns the ReferrerMutation object of the builder. -func (ruo *ReferrerUpdateOne) Mutation() *ReferrerMutation { - return ruo.mutation +func (_u *ReferrerUpdateOne) Mutation() *ReferrerMutation { + return _u.mutation } // ClearReferences clears all "references" edges to the Referrer entity. -func (ruo *ReferrerUpdateOne) ClearReferences() *ReferrerUpdateOne { - ruo.mutation.ClearReferences() - return ruo +func (_u *ReferrerUpdateOne) ClearReferences() *ReferrerUpdateOne { + _u.mutation.ClearReferences() + return _u } // RemoveReferenceIDs removes the "references" edge to Referrer entities by IDs. -func (ruo *ReferrerUpdateOne) RemoveReferenceIDs(ids ...uuid.UUID) *ReferrerUpdateOne { - ruo.mutation.RemoveReferenceIDs(ids...) - return ruo +func (_u *ReferrerUpdateOne) RemoveReferenceIDs(ids ...uuid.UUID) *ReferrerUpdateOne { + _u.mutation.RemoveReferenceIDs(ids...) + return _u } // RemoveReferences removes "references" edges to Referrer entities. -func (ruo *ReferrerUpdateOne) RemoveReferences(r ...*Referrer) *ReferrerUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *ReferrerUpdateOne) RemoveReferences(v ...*Referrer) *ReferrerUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ruo.RemoveReferenceIDs(ids...) + return _u.RemoveReferenceIDs(ids...) } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (ruo *ReferrerUpdateOne) ClearWorkflows() *ReferrerUpdateOne { - ruo.mutation.ClearWorkflows() - return ruo +func (_u *ReferrerUpdateOne) ClearWorkflows() *ReferrerUpdateOne { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (ruo *ReferrerUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdateOne { - ruo.mutation.RemoveWorkflowIDs(ids...) - return ruo +func (_u *ReferrerUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *ReferrerUpdateOne { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (ruo *ReferrerUpdateOne) RemoveWorkflows(w ...*Workflow) *ReferrerUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *ReferrerUpdateOne) RemoveWorkflows(v ...*Workflow) *ReferrerUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return ruo.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // Where appends a list predicates to the ReferrerUpdate builder. -func (ruo *ReferrerUpdateOne) Where(ps ...predicate.Referrer) *ReferrerUpdateOne { - ruo.mutation.Where(ps...) - return ruo +func (_u *ReferrerUpdateOne) Where(ps ...predicate.Referrer) *ReferrerUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (ruo *ReferrerUpdateOne) Select(field string, fields ...string) *ReferrerUpdateOne { - ruo.fields = append([]string{field}, fields...) - return ruo +func (_u *ReferrerUpdateOne) Select(field string, fields ...string) *ReferrerUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Referrer entity. -func (ruo *ReferrerUpdateOne) Save(ctx context.Context) (*Referrer, error) { - return withHooks(ctx, ruo.sqlSave, ruo.mutation, ruo.hooks) +func (_u *ReferrerUpdateOne) Save(ctx context.Context) (*Referrer, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (ruo *ReferrerUpdateOne) SaveX(ctx context.Context) *Referrer { - node, err := ruo.Save(ctx) +func (_u *ReferrerUpdateOne) SaveX(ctx context.Context) *Referrer { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -372,32 +372,32 @@ func (ruo *ReferrerUpdateOne) SaveX(ctx context.Context) *Referrer { } // Exec executes the query on the entity. -func (ruo *ReferrerUpdateOne) Exec(ctx context.Context) error { - _, err := ruo.Save(ctx) +func (_u *ReferrerUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ruo *ReferrerUpdateOne) ExecX(ctx context.Context) { - if err := ruo.Exec(ctx); err != nil { +func (_u *ReferrerUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (ruo *ReferrerUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ReferrerUpdateOne { - ruo.modifiers = append(ruo.modifiers, modifiers...) - return ruo +func (_u *ReferrerUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ReferrerUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err error) { +func (_u *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err error) { _spec := sqlgraph.NewUpdateSpec(referrer.Table, referrer.Columns, sqlgraph.NewFieldSpec(referrer.FieldID, field.TypeUUID)) - id, ok := ruo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Referrer.id" for update`)} } _spec.Node.ID.Value = id - if fields := ruo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, referrer.FieldID) for _, f := range fields { @@ -409,20 +409,20 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } } } - if ps := ruo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if ruo.mutation.MetadataCleared() { + if _u.mutation.MetadataCleared() { _spec.ClearField(referrer.FieldMetadata, field.TypeJSON) } - if ruo.mutation.AnnotationsCleared() { + if _u.mutation.AnnotationsCleared() { _spec.ClearField(referrer.FieldAnnotations, field.TypeJSON) } - if ruo.mutation.ReferencesCleared() { + if _u.mutation.ReferencesCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -435,7 +435,7 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ruo.mutation.RemovedReferencesIDs(); len(nodes) > 0 && !ruo.mutation.ReferencesCleared() { + if nodes := _u.mutation.RemovedReferencesIDs(); len(nodes) > 0 && !_u.mutation.ReferencesCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -451,7 +451,7 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ruo.mutation.ReferencesIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ReferencesIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -467,7 +467,7 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if ruo.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -480,7 +480,7 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ruo.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !ruo.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -496,7 +496,7 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := ruo.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -512,11 +512,11 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(ruo.modifiers...) - _node = &Referrer{config: ruo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Referrer{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, ruo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{referrer.Label} } else if sqlgraph.IsConstraintError(err) { @@ -524,6 +524,6 @@ func (ruo *ReferrerUpdateOne) sqlSave(ctx context.Context) (_node *Referrer, err } return nil, err } - ruo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/robotaccount.go b/app/controlplane/pkg/data/ent/robotaccount.go index af8ccb953..e2483dedb 100644 --- a/app/controlplane/pkg/data/ent/robotaccount.go +++ b/app/controlplane/pkg/data/ent/robotaccount.go @@ -74,7 +74,7 @@ func (*RobotAccount) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the RobotAccount fields. -func (ra *RobotAccount) assignValues(columns []string, values []any) error { +func (_m *RobotAccount) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -84,35 +84,35 @@ func (ra *RobotAccount) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - ra.ID = *value + _m.ID = *value } case robotaccount.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - ra.Name = value.String + _m.Name = value.String } case robotaccount.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - ra.CreatedAt = value.Time + _m.CreatedAt = value.Time } case robotaccount.FieldRevokedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field revoked_at", values[i]) } else if value.Valid { - ra.RevokedAt = value.Time + _m.RevokedAt = value.Time } case robotaccount.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field workflow_robotaccounts", values[i]) } else if value.Valid { - ra.workflow_robotaccounts = new(uuid.UUID) - *ra.workflow_robotaccounts = *value.S.(*uuid.UUID) + _m.workflow_robotaccounts = new(uuid.UUID) + *_m.workflow_robotaccounts = *value.S.(*uuid.UUID) } default: - ra.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -120,46 +120,46 @@ func (ra *RobotAccount) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the RobotAccount. // This includes values selected through modifiers, order, etc. -func (ra *RobotAccount) Value(name string) (ent.Value, error) { - return ra.selectValues.Get(name) +func (_m *RobotAccount) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryWorkflow queries the "workflow" edge of the RobotAccount entity. -func (ra *RobotAccount) QueryWorkflow() *WorkflowQuery { - return NewRobotAccountClient(ra.config).QueryWorkflow(ra) +func (_m *RobotAccount) QueryWorkflow() *WorkflowQuery { + return NewRobotAccountClient(_m.config).QueryWorkflow(_m) } // Update returns a builder for updating this RobotAccount. // Note that you need to call RobotAccount.Unwrap() before calling this method if this RobotAccount // was returned from a transaction, and the transaction was committed or rolled back. -func (ra *RobotAccount) Update() *RobotAccountUpdateOne { - return NewRobotAccountClient(ra.config).UpdateOne(ra) +func (_m *RobotAccount) Update() *RobotAccountUpdateOne { + return NewRobotAccountClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the RobotAccount entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (ra *RobotAccount) Unwrap() *RobotAccount { - _tx, ok := ra.config.driver.(*txDriver) +func (_m *RobotAccount) Unwrap() *RobotAccount { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: RobotAccount is not a transactional entity") } - ra.config.driver = _tx.drv - return ra + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (ra *RobotAccount) String() string { +func (_m *RobotAccount) String() string { var builder strings.Builder builder.WriteString("RobotAccount(") - builder.WriteString(fmt.Sprintf("id=%v, ", ra.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(ra.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(ra.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("revoked_at=") - builder.WriteString(ra.RevokedAt.Format(time.ANSIC)) + builder.WriteString(_m.RevokedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/robotaccount_create.go b/app/controlplane/pkg/data/ent/robotaccount_create.go index 38210f028..e714db92e 100644 --- a/app/controlplane/pkg/data/ent/robotaccount_create.go +++ b/app/controlplane/pkg/data/ent/robotaccount_create.go @@ -26,86 +26,86 @@ type RobotAccountCreate struct { } // SetName sets the "name" field. -func (rac *RobotAccountCreate) SetName(s string) *RobotAccountCreate { - rac.mutation.SetName(s) - return rac +func (_c *RobotAccountCreate) SetName(v string) *RobotAccountCreate { + _c.mutation.SetName(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (rac *RobotAccountCreate) SetCreatedAt(t time.Time) *RobotAccountCreate { - rac.mutation.SetCreatedAt(t) - return rac +func (_c *RobotAccountCreate) SetCreatedAt(v time.Time) *RobotAccountCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (rac *RobotAccountCreate) SetNillableCreatedAt(t *time.Time) *RobotAccountCreate { - if t != nil { - rac.SetCreatedAt(*t) +func (_c *RobotAccountCreate) SetNillableCreatedAt(v *time.Time) *RobotAccountCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return rac + return _c } // SetRevokedAt sets the "revoked_at" field. -func (rac *RobotAccountCreate) SetRevokedAt(t time.Time) *RobotAccountCreate { - rac.mutation.SetRevokedAt(t) - return rac +func (_c *RobotAccountCreate) SetRevokedAt(v time.Time) *RobotAccountCreate { + _c.mutation.SetRevokedAt(v) + return _c } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (rac *RobotAccountCreate) SetNillableRevokedAt(t *time.Time) *RobotAccountCreate { - if t != nil { - rac.SetRevokedAt(*t) +func (_c *RobotAccountCreate) SetNillableRevokedAt(v *time.Time) *RobotAccountCreate { + if v != nil { + _c.SetRevokedAt(*v) } - return rac + return _c } // SetID sets the "id" field. -func (rac *RobotAccountCreate) SetID(u uuid.UUID) *RobotAccountCreate { - rac.mutation.SetID(u) - return rac +func (_c *RobotAccountCreate) SetID(v uuid.UUID) *RobotAccountCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (rac *RobotAccountCreate) SetNillableID(u *uuid.UUID) *RobotAccountCreate { - if u != nil { - rac.SetID(*u) +func (_c *RobotAccountCreate) SetNillableID(v *uuid.UUID) *RobotAccountCreate { + if v != nil { + _c.SetID(*v) } - return rac + return _c } // SetWorkflowID sets the "workflow" edge to the Workflow entity by ID. -func (rac *RobotAccountCreate) SetWorkflowID(id uuid.UUID) *RobotAccountCreate { - rac.mutation.SetWorkflowID(id) - return rac +func (_c *RobotAccountCreate) SetWorkflowID(id uuid.UUID) *RobotAccountCreate { + _c.mutation.SetWorkflowID(id) + return _c } // SetNillableWorkflowID sets the "workflow" edge to the Workflow entity by ID if the given value is not nil. -func (rac *RobotAccountCreate) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountCreate { +func (_c *RobotAccountCreate) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountCreate { if id != nil { - rac = rac.SetWorkflowID(*id) + _c = _c.SetWorkflowID(*id) } - return rac + return _c } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (rac *RobotAccountCreate) SetWorkflow(w *Workflow) *RobotAccountCreate { - return rac.SetWorkflowID(w.ID) +func (_c *RobotAccountCreate) SetWorkflow(v *Workflow) *RobotAccountCreate { + return _c.SetWorkflowID(v.ID) } // Mutation returns the RobotAccountMutation object of the builder. -func (rac *RobotAccountCreate) Mutation() *RobotAccountMutation { - return rac.mutation +func (_c *RobotAccountCreate) Mutation() *RobotAccountMutation { + return _c.mutation } // Save creates the RobotAccount in the database. -func (rac *RobotAccountCreate) Save(ctx context.Context) (*RobotAccount, error) { - rac.defaults() - return withHooks(ctx, rac.sqlSave, rac.mutation, rac.hooks) +func (_c *RobotAccountCreate) Save(ctx context.Context) (*RobotAccount, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (rac *RobotAccountCreate) SaveX(ctx context.Context) *RobotAccount { - v, err := rac.Save(ctx) +func (_c *RobotAccountCreate) SaveX(ctx context.Context) *RobotAccount { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -113,47 +113,47 @@ func (rac *RobotAccountCreate) SaveX(ctx context.Context) *RobotAccount { } // Exec executes the query. -func (rac *RobotAccountCreate) Exec(ctx context.Context) error { - _, err := rac.Save(ctx) +func (_c *RobotAccountCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (rac *RobotAccountCreate) ExecX(ctx context.Context) { - if err := rac.Exec(ctx); err != nil { +func (_c *RobotAccountCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (rac *RobotAccountCreate) defaults() { - if _, ok := rac.mutation.CreatedAt(); !ok { +func (_c *RobotAccountCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := robotaccount.DefaultCreatedAt() - rac.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := rac.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := robotaccount.DefaultID() - rac.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (rac *RobotAccountCreate) check() error { - if _, ok := rac.mutation.Name(); !ok { +func (_c *RobotAccountCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "RobotAccount.name"`)} } - if _, ok := rac.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "RobotAccount.created_at"`)} } return nil } -func (rac *RobotAccountCreate) sqlSave(ctx context.Context) (*RobotAccount, error) { - if err := rac.check(); err != nil { +func (_c *RobotAccountCreate) sqlSave(ctx context.Context) (*RobotAccount, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := rac.createSpec() - if err := sqlgraph.CreateNode(ctx, rac.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -166,34 +166,34 @@ func (rac *RobotAccountCreate) sqlSave(ctx context.Context) (*RobotAccount, erro return nil, err } } - rac.mutation.id = &_node.ID - rac.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (rac *RobotAccountCreate) createSpec() (*RobotAccount, *sqlgraph.CreateSpec) { +func (_c *RobotAccountCreate) createSpec() (*RobotAccount, *sqlgraph.CreateSpec) { var ( - _node = &RobotAccount{config: rac.config} + _node = &RobotAccount{config: _c.config} _spec = sqlgraph.NewCreateSpec(robotaccount.Table, sqlgraph.NewFieldSpec(robotaccount.FieldID, field.TypeUUID)) ) - _spec.OnConflict = rac.conflict - if id, ok := rac.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := rac.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(robotaccount.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := rac.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(robotaccount.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := rac.mutation.RevokedAt(); ok { + if value, ok := _c.mutation.RevokedAt(); ok { _spec.SetField(robotaccount.FieldRevokedAt, field.TypeTime, value) _node.RevokedAt = value } - if nodes := rac.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -229,10 +229,10 @@ func (rac *RobotAccountCreate) createSpec() (*RobotAccount, *sqlgraph.CreateSpec // SetName(v+v). // }). // Exec(ctx) -func (rac *RobotAccountCreate) OnConflict(opts ...sql.ConflictOption) *RobotAccountUpsertOne { - rac.conflict = opts +func (_c *RobotAccountCreate) OnConflict(opts ...sql.ConflictOption) *RobotAccountUpsertOne { + _c.conflict = opts return &RobotAccountUpsertOne{ - create: rac, + create: _c, } } @@ -242,10 +242,10 @@ func (rac *RobotAccountCreate) OnConflict(opts ...sql.ConflictOption) *RobotAcco // client.RobotAccount.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (rac *RobotAccountCreate) OnConflictColumns(columns ...string) *RobotAccountUpsertOne { - rac.conflict = append(rac.conflict, sql.ConflictColumns(columns...)) +func (_c *RobotAccountCreate) OnConflictColumns(columns ...string) *RobotAccountUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &RobotAccountUpsertOne{ - create: rac, + create: _c, } } @@ -425,16 +425,16 @@ type RobotAccountCreateBulk struct { } // Save creates the RobotAccount entities in the database. -func (racb *RobotAccountCreateBulk) Save(ctx context.Context) ([]*RobotAccount, error) { - if racb.err != nil { - return nil, racb.err - } - specs := make([]*sqlgraph.CreateSpec, len(racb.builders)) - nodes := make([]*RobotAccount, len(racb.builders)) - mutators := make([]Mutator, len(racb.builders)) - for i := range racb.builders { +func (_c *RobotAccountCreateBulk) Save(ctx context.Context) ([]*RobotAccount, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*RobotAccount, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := racb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*RobotAccountMutation) @@ -448,12 +448,12 @@ func (racb *RobotAccountCreateBulk) Save(ctx context.Context) ([]*RobotAccount, var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, racb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = racb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, racb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -473,7 +473,7 @@ func (racb *RobotAccountCreateBulk) Save(ctx context.Context) ([]*RobotAccount, }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, racb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -481,8 +481,8 @@ func (racb *RobotAccountCreateBulk) Save(ctx context.Context) ([]*RobotAccount, } // SaveX is like Save, but panics if an error occurs. -func (racb *RobotAccountCreateBulk) SaveX(ctx context.Context) []*RobotAccount { - v, err := racb.Save(ctx) +func (_c *RobotAccountCreateBulk) SaveX(ctx context.Context) []*RobotAccount { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -490,14 +490,14 @@ func (racb *RobotAccountCreateBulk) SaveX(ctx context.Context) []*RobotAccount { } // Exec executes the query. -func (racb *RobotAccountCreateBulk) Exec(ctx context.Context) error { - _, err := racb.Save(ctx) +func (_c *RobotAccountCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (racb *RobotAccountCreateBulk) ExecX(ctx context.Context) { - if err := racb.Exec(ctx); err != nil { +func (_c *RobotAccountCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -517,10 +517,10 @@ func (racb *RobotAccountCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (racb *RobotAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *RobotAccountUpsertBulk { - racb.conflict = opts +func (_c *RobotAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *RobotAccountUpsertBulk { + _c.conflict = opts return &RobotAccountUpsertBulk{ - create: racb, + create: _c, } } @@ -530,10 +530,10 @@ func (racb *RobotAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *Robo // client.RobotAccount.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (racb *RobotAccountCreateBulk) OnConflictColumns(columns ...string) *RobotAccountUpsertBulk { - racb.conflict = append(racb.conflict, sql.ConflictColumns(columns...)) +func (_c *RobotAccountCreateBulk) OnConflictColumns(columns ...string) *RobotAccountUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &RobotAccountUpsertBulk{ - create: racb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/robotaccount_delete.go b/app/controlplane/pkg/data/ent/robotaccount_delete.go index 5b26cebca..37da2f6c3 100644 --- a/app/controlplane/pkg/data/ent/robotaccount_delete.go +++ b/app/controlplane/pkg/data/ent/robotaccount_delete.go @@ -20,56 +20,56 @@ type RobotAccountDelete struct { } // Where appends a list predicates to the RobotAccountDelete builder. -func (rad *RobotAccountDelete) Where(ps ...predicate.RobotAccount) *RobotAccountDelete { - rad.mutation.Where(ps...) - return rad +func (_d *RobotAccountDelete) Where(ps ...predicate.RobotAccount) *RobotAccountDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (rad *RobotAccountDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, rad.sqlExec, rad.mutation, rad.hooks) +func (_d *RobotAccountDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (rad *RobotAccountDelete) ExecX(ctx context.Context) int { - n, err := rad.Exec(ctx) +func (_d *RobotAccountDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (rad *RobotAccountDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *RobotAccountDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(robotaccount.Table, sqlgraph.NewFieldSpec(robotaccount.FieldID, field.TypeUUID)) - if ps := rad.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, rad.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - rad.mutation.done = true + _d.mutation.done = true return affected, err } // RobotAccountDeleteOne is the builder for deleting a single RobotAccount entity. type RobotAccountDeleteOne struct { - rad *RobotAccountDelete + _d *RobotAccountDelete } // Where appends a list predicates to the RobotAccountDelete builder. -func (rado *RobotAccountDeleteOne) Where(ps ...predicate.RobotAccount) *RobotAccountDeleteOne { - rado.rad.mutation.Where(ps...) - return rado +func (_d *RobotAccountDeleteOne) Where(ps ...predicate.RobotAccount) *RobotAccountDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (rado *RobotAccountDeleteOne) Exec(ctx context.Context) error { - n, err := rado.rad.Exec(ctx) +func (_d *RobotAccountDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (rado *RobotAccountDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (rado *RobotAccountDeleteOne) ExecX(ctx context.Context) { - if err := rado.Exec(ctx); err != nil { +func (_d *RobotAccountDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/robotaccount_query.go b/app/controlplane/pkg/data/ent/robotaccount_query.go index 765394074..421d01332 100644 --- a/app/controlplane/pkg/data/ent/robotaccount_query.go +++ b/app/controlplane/pkg/data/ent/robotaccount_query.go @@ -34,44 +34,44 @@ type RobotAccountQuery struct { } // Where adds a new predicate for the RobotAccountQuery builder. -func (raq *RobotAccountQuery) Where(ps ...predicate.RobotAccount) *RobotAccountQuery { - raq.predicates = append(raq.predicates, ps...) - return raq +func (_q *RobotAccountQuery) Where(ps ...predicate.RobotAccount) *RobotAccountQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (raq *RobotAccountQuery) Limit(limit int) *RobotAccountQuery { - raq.ctx.Limit = &limit - return raq +func (_q *RobotAccountQuery) Limit(limit int) *RobotAccountQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (raq *RobotAccountQuery) Offset(offset int) *RobotAccountQuery { - raq.ctx.Offset = &offset - return raq +func (_q *RobotAccountQuery) Offset(offset int) *RobotAccountQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (raq *RobotAccountQuery) Unique(unique bool) *RobotAccountQuery { - raq.ctx.Unique = &unique - return raq +func (_q *RobotAccountQuery) Unique(unique bool) *RobotAccountQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (raq *RobotAccountQuery) Order(o ...robotaccount.OrderOption) *RobotAccountQuery { - raq.order = append(raq.order, o...) - return raq +func (_q *RobotAccountQuery) Order(o ...robotaccount.OrderOption) *RobotAccountQuery { + _q.order = append(_q.order, o...) + return _q } // QueryWorkflow chains the current query on the "workflow" edge. -func (raq *RobotAccountQuery) QueryWorkflow() *WorkflowQuery { - query := (&WorkflowClient{config: raq.config}).Query() +func (_q *RobotAccountQuery) QueryWorkflow() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := raq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := raq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -80,7 +80,7 @@ func (raq *RobotAccountQuery) QueryWorkflow() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, robotaccount.WorkflowTable, robotaccount.WorkflowColumn), ) - fromU = sqlgraph.SetNeighbors(raq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -88,8 +88,8 @@ func (raq *RobotAccountQuery) QueryWorkflow() *WorkflowQuery { // First returns the first RobotAccount entity from the query. // Returns a *NotFoundError when no RobotAccount was found. -func (raq *RobotAccountQuery) First(ctx context.Context) (*RobotAccount, error) { - nodes, err := raq.Limit(1).All(setContextOp(ctx, raq.ctx, ent.OpQueryFirst)) +func (_q *RobotAccountQuery) First(ctx context.Context) (*RobotAccount, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -100,8 +100,8 @@ func (raq *RobotAccountQuery) First(ctx context.Context) (*RobotAccount, error) } // FirstX is like First, but panics if an error occurs. -func (raq *RobotAccountQuery) FirstX(ctx context.Context) *RobotAccount { - node, err := raq.First(ctx) +func (_q *RobotAccountQuery) FirstX(ctx context.Context) *RobotAccount { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -110,9 +110,9 @@ func (raq *RobotAccountQuery) FirstX(ctx context.Context) *RobotAccount { // FirstID returns the first RobotAccount ID from the query. // Returns a *NotFoundError when no RobotAccount ID was found. -func (raq *RobotAccountQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *RobotAccountQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = raq.Limit(1).IDs(setContextOp(ctx, raq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -123,8 +123,8 @@ func (raq *RobotAccountQuery) FirstID(ctx context.Context) (id uuid.UUID, err er } // FirstIDX is like FirstID, but panics if an error occurs. -func (raq *RobotAccountQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := raq.FirstID(ctx) +func (_q *RobotAccountQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,8 +134,8 @@ func (raq *RobotAccountQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single RobotAccount entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one RobotAccount entity is found. // Returns a *NotFoundError when no RobotAccount entities are found. -func (raq *RobotAccountQuery) Only(ctx context.Context) (*RobotAccount, error) { - nodes, err := raq.Limit(2).All(setContextOp(ctx, raq.ctx, ent.OpQueryOnly)) +func (_q *RobotAccountQuery) Only(ctx context.Context) (*RobotAccount, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -150,8 +150,8 @@ func (raq *RobotAccountQuery) Only(ctx context.Context) (*RobotAccount, error) { } // OnlyX is like Only, but panics if an error occurs. -func (raq *RobotAccountQuery) OnlyX(ctx context.Context) *RobotAccount { - node, err := raq.Only(ctx) +func (_q *RobotAccountQuery) OnlyX(ctx context.Context) *RobotAccount { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -161,9 +161,9 @@ func (raq *RobotAccountQuery) OnlyX(ctx context.Context) *RobotAccount { // OnlyID is like Only, but returns the only RobotAccount ID in the query. // Returns a *NotSingularError when more than one RobotAccount ID is found. // Returns a *NotFoundError when no entities are found. -func (raq *RobotAccountQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *RobotAccountQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = raq.Limit(2).IDs(setContextOp(ctx, raq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -178,8 +178,8 @@ func (raq *RobotAccountQuery) OnlyID(ctx context.Context) (id uuid.UUID, err err } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (raq *RobotAccountQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := raq.OnlyID(ctx) +func (_q *RobotAccountQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -187,18 +187,18 @@ func (raq *RobotAccountQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of RobotAccounts. -func (raq *RobotAccountQuery) All(ctx context.Context) ([]*RobotAccount, error) { - ctx = setContextOp(ctx, raq.ctx, ent.OpQueryAll) - if err := raq.prepareQuery(ctx); err != nil { +func (_q *RobotAccountQuery) All(ctx context.Context) ([]*RobotAccount, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*RobotAccount, *RobotAccountQuery]() - return withInterceptors[[]*RobotAccount](ctx, raq, qr, raq.inters) + return withInterceptors[[]*RobotAccount](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (raq *RobotAccountQuery) AllX(ctx context.Context) []*RobotAccount { - nodes, err := raq.All(ctx) +func (_q *RobotAccountQuery) AllX(ctx context.Context) []*RobotAccount { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -206,20 +206,20 @@ func (raq *RobotAccountQuery) AllX(ctx context.Context) []*RobotAccount { } // IDs executes the query and returns a list of RobotAccount IDs. -func (raq *RobotAccountQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if raq.ctx.Unique == nil && raq.path != nil { - raq.Unique(true) +func (_q *RobotAccountQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, raq.ctx, ent.OpQueryIDs) - if err = raq.Select(robotaccount.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(robotaccount.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (raq *RobotAccountQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := raq.IDs(ctx) +func (_q *RobotAccountQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -227,17 +227,17 @@ func (raq *RobotAccountQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (raq *RobotAccountQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, raq.ctx, ent.OpQueryCount) - if err := raq.prepareQuery(ctx); err != nil { +func (_q *RobotAccountQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, raq, querierCount[*RobotAccountQuery](), raq.inters) + return withInterceptors[int](ctx, _q, querierCount[*RobotAccountQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (raq *RobotAccountQuery) CountX(ctx context.Context) int { - count, err := raq.Count(ctx) +func (_q *RobotAccountQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -245,9 +245,9 @@ func (raq *RobotAccountQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (raq *RobotAccountQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, raq.ctx, ent.OpQueryExist) - switch _, err := raq.FirstID(ctx); { +func (_q *RobotAccountQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -258,8 +258,8 @@ func (raq *RobotAccountQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (raq *RobotAccountQuery) ExistX(ctx context.Context) bool { - exist, err := raq.Exist(ctx) +func (_q *RobotAccountQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -268,33 +268,33 @@ func (raq *RobotAccountQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the RobotAccountQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (raq *RobotAccountQuery) Clone() *RobotAccountQuery { - if raq == nil { +func (_q *RobotAccountQuery) Clone() *RobotAccountQuery { + if _q == nil { return nil } return &RobotAccountQuery{ - config: raq.config, - ctx: raq.ctx.Clone(), - order: append([]robotaccount.OrderOption{}, raq.order...), - inters: append([]Interceptor{}, raq.inters...), - predicates: append([]predicate.RobotAccount{}, raq.predicates...), - withWorkflow: raq.withWorkflow.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]robotaccount.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.RobotAccount{}, _q.predicates...), + withWorkflow: _q.withWorkflow.Clone(), // clone intermediate query. - sql: raq.sql.Clone(), - path: raq.path, - modifiers: append([]func(*sql.Selector){}, raq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithWorkflow tells the query-builder to eager-load the nodes that are connected to // the "workflow" edge. The optional arguments are used to configure the query builder of the edge. -func (raq *RobotAccountQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *RobotAccountQuery { - query := (&WorkflowClient{config: raq.config}).Query() +func (_q *RobotAccountQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *RobotAccountQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - raq.withWorkflow = query - return raq + _q.withWorkflow = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -311,10 +311,10 @@ func (raq *RobotAccountQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *RobotA // GroupBy(robotaccount.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (raq *RobotAccountQuery) GroupBy(field string, fields ...string) *RobotAccountGroupBy { - raq.ctx.Fields = append([]string{field}, fields...) - grbuild := &RobotAccountGroupBy{build: raq} - grbuild.flds = &raq.ctx.Fields +func (_q *RobotAccountQuery) GroupBy(field string, fields ...string) *RobotAccountGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &RobotAccountGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = robotaccount.Label grbuild.scan = grbuild.Scan return grbuild @@ -332,55 +332,55 @@ func (raq *RobotAccountQuery) GroupBy(field string, fields ...string) *RobotAcco // client.RobotAccount.Query(). // Select(robotaccount.FieldName). // Scan(ctx, &v) -func (raq *RobotAccountQuery) Select(fields ...string) *RobotAccountSelect { - raq.ctx.Fields = append(raq.ctx.Fields, fields...) - sbuild := &RobotAccountSelect{RobotAccountQuery: raq} +func (_q *RobotAccountQuery) Select(fields ...string) *RobotAccountSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &RobotAccountSelect{RobotAccountQuery: _q} sbuild.label = robotaccount.Label - sbuild.flds, sbuild.scan = &raq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a RobotAccountSelect configured with the given aggregations. -func (raq *RobotAccountQuery) Aggregate(fns ...AggregateFunc) *RobotAccountSelect { - return raq.Select().Aggregate(fns...) +func (_q *RobotAccountQuery) Aggregate(fns ...AggregateFunc) *RobotAccountSelect { + return _q.Select().Aggregate(fns...) } -func (raq *RobotAccountQuery) prepareQuery(ctx context.Context) error { - for _, inter := range raq.inters { +func (_q *RobotAccountQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, raq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range raq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !robotaccount.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if raq.path != nil { - prev, err := raq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - raq.sql = prev + _q.sql = prev } return nil } -func (raq *RobotAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RobotAccount, error) { +func (_q *RobotAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RobotAccount, error) { var ( nodes = []*RobotAccount{} - withFKs = raq.withFKs - _spec = raq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [1]bool{ - raq.withWorkflow != nil, + _q.withWorkflow != nil, } ) - if raq.withWorkflow != nil { + if _q.withWorkflow != nil { withFKs = true } if withFKs { @@ -390,25 +390,25 @@ func (raq *RobotAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([ return (*RobotAccount).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &RobotAccount{config: raq.config} + node := &RobotAccount{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(raq.modifiers) > 0 { - _spec.Modifiers = raq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, raq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := raq.withWorkflow; query != nil { - if err := raq.loadWorkflow(ctx, query, nodes, nil, + if query := _q.withWorkflow; query != nil { + if err := _q.loadWorkflow(ctx, query, nodes, nil, func(n *RobotAccount, e *Workflow) { n.Edges.Workflow = e }); err != nil { return nil, err } @@ -416,7 +416,7 @@ func (raq *RobotAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([ return nodes, nil } -func (raq *RobotAccountQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*RobotAccount, init func(*RobotAccount), assign func(*RobotAccount, *Workflow)) error { +func (_q *RobotAccountQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*RobotAccount, init func(*RobotAccount), assign func(*RobotAccount, *Workflow)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*RobotAccount) for i := range nodes { @@ -449,27 +449,27 @@ func (raq *RobotAccountQuery) loadWorkflow(ctx context.Context, query *WorkflowQ return nil } -func (raq *RobotAccountQuery) sqlCount(ctx context.Context) (int, error) { - _spec := raq.querySpec() - if len(raq.modifiers) > 0 { - _spec.Modifiers = raq.modifiers +func (_q *RobotAccountQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = raq.ctx.Fields - if len(raq.ctx.Fields) > 0 { - _spec.Unique = raq.ctx.Unique != nil && *raq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, raq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (raq *RobotAccountQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *RobotAccountQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(robotaccount.Table, robotaccount.Columns, sqlgraph.NewFieldSpec(robotaccount.FieldID, field.TypeUUID)) - _spec.From = raq.sql - if unique := raq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if raq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := raq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, robotaccount.FieldID) for i := range fields { @@ -478,20 +478,20 @@ func (raq *RobotAccountQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := raq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := raq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := raq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := raq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -501,36 +501,36 @@ func (raq *RobotAccountQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (raq *RobotAccountQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(raq.driver.Dialect()) +func (_q *RobotAccountQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(robotaccount.Table) - columns := raq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = robotaccount.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if raq.sql != nil { - selector = raq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if raq.ctx.Unique != nil && *raq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range raq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range raq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range raq.order { + for _, p := range _q.order { p(selector) } - if offset := raq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := raq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -539,33 +539,33 @@ func (raq *RobotAccountQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (raq *RobotAccountQuery) ForUpdate(opts ...sql.LockOption) *RobotAccountQuery { - if raq.driver.Dialect() == dialect.Postgres { - raq.Unique(false) +func (_q *RobotAccountQuery) ForUpdate(opts ...sql.LockOption) *RobotAccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - raq.modifiers = append(raq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return raq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (raq *RobotAccountQuery) ForShare(opts ...sql.LockOption) *RobotAccountQuery { - if raq.driver.Dialect() == dialect.Postgres { - raq.Unique(false) +func (_q *RobotAccountQuery) ForShare(opts ...sql.LockOption) *RobotAccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - raq.modifiers = append(raq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return raq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (raq *RobotAccountQuery) Modify(modifiers ...func(s *sql.Selector)) *RobotAccountSelect { - raq.modifiers = append(raq.modifiers, modifiers...) - return raq.Select() +func (_q *RobotAccountQuery) Modify(modifiers ...func(s *sql.Selector)) *RobotAccountSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // RobotAccountGroupBy is the group-by builder for RobotAccount entities. @@ -575,41 +575,41 @@ type RobotAccountGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (ragb *RobotAccountGroupBy) Aggregate(fns ...AggregateFunc) *RobotAccountGroupBy { - ragb.fns = append(ragb.fns, fns...) - return ragb +func (_g *RobotAccountGroupBy) Aggregate(fns ...AggregateFunc) *RobotAccountGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (ragb *RobotAccountGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ragb.build.ctx, ent.OpQueryGroupBy) - if err := ragb.build.prepareQuery(ctx); err != nil { +func (_g *RobotAccountGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*RobotAccountQuery, *RobotAccountGroupBy](ctx, ragb.build, ragb, ragb.build.inters, v) + return scanWithInterceptors[*RobotAccountQuery, *RobotAccountGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (ragb *RobotAccountGroupBy) sqlScan(ctx context.Context, root *RobotAccountQuery, v any) error { +func (_g *RobotAccountGroupBy) sqlScan(ctx context.Context, root *RobotAccountQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(ragb.fns)) - for _, fn := range ragb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*ragb.flds)+len(ragb.fns)) - for _, f := range *ragb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*ragb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ragb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -623,27 +623,27 @@ type RobotAccountSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ras *RobotAccountSelect) Aggregate(fns ...AggregateFunc) *RobotAccountSelect { - ras.fns = append(ras.fns, fns...) - return ras +func (_s *RobotAccountSelect) Aggregate(fns ...AggregateFunc) *RobotAccountSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ras *RobotAccountSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ras.ctx, ent.OpQuerySelect) - if err := ras.prepareQuery(ctx); err != nil { +func (_s *RobotAccountSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*RobotAccountQuery, *RobotAccountSelect](ctx, ras.RobotAccountQuery, ras, ras.inters, v) + return scanWithInterceptors[*RobotAccountQuery, *RobotAccountSelect](ctx, _s.RobotAccountQuery, _s, _s.inters, v) } -func (ras *RobotAccountSelect) sqlScan(ctx context.Context, root *RobotAccountQuery, v any) error { +func (_s *RobotAccountSelect) sqlScan(ctx context.Context, root *RobotAccountQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ras.fns)) - for _, fn := range ras.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ras.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -651,7 +651,7 @@ func (ras *RobotAccountSelect) sqlScan(ctx context.Context, root *RobotAccountQu } rows := &sql.Rows{} query, args := selector.Query() - if err := ras.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -659,7 +659,7 @@ func (ras *RobotAccountSelect) sqlScan(ctx context.Context, root *RobotAccountQu } // Modify adds a query modifier for attaching custom logic to queries. -func (ras *RobotAccountSelect) Modify(modifiers ...func(s *sql.Selector)) *RobotAccountSelect { - ras.modifiers = append(ras.modifiers, modifiers...) - return ras +func (_s *RobotAccountSelect) Modify(modifiers ...func(s *sql.Selector)) *RobotAccountSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/robotaccount_update.go b/app/controlplane/pkg/data/ent/robotaccount_update.go index 2da40015b..d5317ae7b 100644 --- a/app/controlplane/pkg/data/ent/robotaccount_update.go +++ b/app/controlplane/pkg/data/ent/robotaccount_update.go @@ -26,83 +26,83 @@ type RobotAccountUpdate struct { } // Where appends a list predicates to the RobotAccountUpdate builder. -func (rau *RobotAccountUpdate) Where(ps ...predicate.RobotAccount) *RobotAccountUpdate { - rau.mutation.Where(ps...) - return rau +func (_u *RobotAccountUpdate) Where(ps ...predicate.RobotAccount) *RobotAccountUpdate { + _u.mutation.Where(ps...) + return _u } // SetName sets the "name" field. -func (rau *RobotAccountUpdate) SetName(s string) *RobotAccountUpdate { - rau.mutation.SetName(s) - return rau +func (_u *RobotAccountUpdate) SetName(v string) *RobotAccountUpdate { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (rau *RobotAccountUpdate) SetNillableName(s *string) *RobotAccountUpdate { - if s != nil { - rau.SetName(*s) +func (_u *RobotAccountUpdate) SetNillableName(v *string) *RobotAccountUpdate { + if v != nil { + _u.SetName(*v) } - return rau + return _u } // SetRevokedAt sets the "revoked_at" field. -func (rau *RobotAccountUpdate) SetRevokedAt(t time.Time) *RobotAccountUpdate { - rau.mutation.SetRevokedAt(t) - return rau +func (_u *RobotAccountUpdate) SetRevokedAt(v time.Time) *RobotAccountUpdate { + _u.mutation.SetRevokedAt(v) + return _u } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (rau *RobotAccountUpdate) SetNillableRevokedAt(t *time.Time) *RobotAccountUpdate { - if t != nil { - rau.SetRevokedAt(*t) +func (_u *RobotAccountUpdate) SetNillableRevokedAt(v *time.Time) *RobotAccountUpdate { + if v != nil { + _u.SetRevokedAt(*v) } - return rau + return _u } // ClearRevokedAt clears the value of the "revoked_at" field. -func (rau *RobotAccountUpdate) ClearRevokedAt() *RobotAccountUpdate { - rau.mutation.ClearRevokedAt() - return rau +func (_u *RobotAccountUpdate) ClearRevokedAt() *RobotAccountUpdate { + _u.mutation.ClearRevokedAt() + return _u } // SetWorkflowID sets the "workflow" edge to the Workflow entity by ID. -func (rau *RobotAccountUpdate) SetWorkflowID(id uuid.UUID) *RobotAccountUpdate { - rau.mutation.SetWorkflowID(id) - return rau +func (_u *RobotAccountUpdate) SetWorkflowID(id uuid.UUID) *RobotAccountUpdate { + _u.mutation.SetWorkflowID(id) + return _u } // SetNillableWorkflowID sets the "workflow" edge to the Workflow entity by ID if the given value is not nil. -func (rau *RobotAccountUpdate) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountUpdate { +func (_u *RobotAccountUpdate) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountUpdate { if id != nil { - rau = rau.SetWorkflowID(*id) + _u = _u.SetWorkflowID(*id) } - return rau + return _u } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (rau *RobotAccountUpdate) SetWorkflow(w *Workflow) *RobotAccountUpdate { - return rau.SetWorkflowID(w.ID) +func (_u *RobotAccountUpdate) SetWorkflow(v *Workflow) *RobotAccountUpdate { + return _u.SetWorkflowID(v.ID) } // Mutation returns the RobotAccountMutation object of the builder. -func (rau *RobotAccountUpdate) Mutation() *RobotAccountMutation { - return rau.mutation +func (_u *RobotAccountUpdate) Mutation() *RobotAccountMutation { + return _u.mutation } // ClearWorkflow clears the "workflow" edge to the Workflow entity. -func (rau *RobotAccountUpdate) ClearWorkflow() *RobotAccountUpdate { - rau.mutation.ClearWorkflow() - return rau +func (_u *RobotAccountUpdate) ClearWorkflow() *RobotAccountUpdate { + _u.mutation.ClearWorkflow() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (rau *RobotAccountUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, rau.sqlSave, rau.mutation, rau.hooks) +func (_u *RobotAccountUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (rau *RobotAccountUpdate) SaveX(ctx context.Context) int { - affected, err := rau.Save(ctx) +func (_u *RobotAccountUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -110,43 +110,43 @@ func (rau *RobotAccountUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (rau *RobotAccountUpdate) Exec(ctx context.Context) error { - _, err := rau.Save(ctx) +func (_u *RobotAccountUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (rau *RobotAccountUpdate) ExecX(ctx context.Context) { - if err := rau.Exec(ctx); err != nil { +func (_u *RobotAccountUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (rau *RobotAccountUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *RobotAccountUpdate { - rau.modifiers = append(rau.modifiers, modifiers...) - return rau +func (_u *RobotAccountUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *RobotAccountUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (rau *RobotAccountUpdate) sqlSave(ctx context.Context) (n int, err error) { +func (_u *RobotAccountUpdate) sqlSave(ctx context.Context) (_node int, err error) { _spec := sqlgraph.NewUpdateSpec(robotaccount.Table, robotaccount.Columns, sqlgraph.NewFieldSpec(robotaccount.FieldID, field.TypeUUID)) - if ps := rau.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := rau.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(robotaccount.FieldName, field.TypeString, value) } - if value, ok := rau.mutation.RevokedAt(); ok { + if value, ok := _u.mutation.RevokedAt(); ok { _spec.SetField(robotaccount.FieldRevokedAt, field.TypeTime, value) } - if rau.mutation.RevokedAtCleared() { + if _u.mutation.RevokedAtCleared() { _spec.ClearField(robotaccount.FieldRevokedAt, field.TypeTime) } - if rau.mutation.WorkflowCleared() { + if _u.mutation.WorkflowCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -159,7 +159,7 @@ func (rau *RobotAccountUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := rau.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -175,8 +175,8 @@ func (rau *RobotAccountUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(rau.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, rau.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{robotaccount.Label} } else if sqlgraph.IsConstraintError(err) { @@ -184,8 +184,8 @@ func (rau *RobotAccountUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - rau.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // RobotAccountUpdateOne is the builder for updating a single RobotAccount entity. @@ -198,90 +198,90 @@ type RobotAccountUpdateOne struct { } // SetName sets the "name" field. -func (rauo *RobotAccountUpdateOne) SetName(s string) *RobotAccountUpdateOne { - rauo.mutation.SetName(s) - return rauo +func (_u *RobotAccountUpdateOne) SetName(v string) *RobotAccountUpdateOne { + _u.mutation.SetName(v) + return _u } // SetNillableName sets the "name" field if the given value is not nil. -func (rauo *RobotAccountUpdateOne) SetNillableName(s *string) *RobotAccountUpdateOne { - if s != nil { - rauo.SetName(*s) +func (_u *RobotAccountUpdateOne) SetNillableName(v *string) *RobotAccountUpdateOne { + if v != nil { + _u.SetName(*v) } - return rauo + return _u } // SetRevokedAt sets the "revoked_at" field. -func (rauo *RobotAccountUpdateOne) SetRevokedAt(t time.Time) *RobotAccountUpdateOne { - rauo.mutation.SetRevokedAt(t) - return rauo +func (_u *RobotAccountUpdateOne) SetRevokedAt(v time.Time) *RobotAccountUpdateOne { + _u.mutation.SetRevokedAt(v) + return _u } // SetNillableRevokedAt sets the "revoked_at" field if the given value is not nil. -func (rauo *RobotAccountUpdateOne) SetNillableRevokedAt(t *time.Time) *RobotAccountUpdateOne { - if t != nil { - rauo.SetRevokedAt(*t) +func (_u *RobotAccountUpdateOne) SetNillableRevokedAt(v *time.Time) *RobotAccountUpdateOne { + if v != nil { + _u.SetRevokedAt(*v) } - return rauo + return _u } // ClearRevokedAt clears the value of the "revoked_at" field. -func (rauo *RobotAccountUpdateOne) ClearRevokedAt() *RobotAccountUpdateOne { - rauo.mutation.ClearRevokedAt() - return rauo +func (_u *RobotAccountUpdateOne) ClearRevokedAt() *RobotAccountUpdateOne { + _u.mutation.ClearRevokedAt() + return _u } // SetWorkflowID sets the "workflow" edge to the Workflow entity by ID. -func (rauo *RobotAccountUpdateOne) SetWorkflowID(id uuid.UUID) *RobotAccountUpdateOne { - rauo.mutation.SetWorkflowID(id) - return rauo +func (_u *RobotAccountUpdateOne) SetWorkflowID(id uuid.UUID) *RobotAccountUpdateOne { + _u.mutation.SetWorkflowID(id) + return _u } // SetNillableWorkflowID sets the "workflow" edge to the Workflow entity by ID if the given value is not nil. -func (rauo *RobotAccountUpdateOne) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountUpdateOne { +func (_u *RobotAccountUpdateOne) SetNillableWorkflowID(id *uuid.UUID) *RobotAccountUpdateOne { if id != nil { - rauo = rauo.SetWorkflowID(*id) + _u = _u.SetWorkflowID(*id) } - return rauo + return _u } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (rauo *RobotAccountUpdateOne) SetWorkflow(w *Workflow) *RobotAccountUpdateOne { - return rauo.SetWorkflowID(w.ID) +func (_u *RobotAccountUpdateOne) SetWorkflow(v *Workflow) *RobotAccountUpdateOne { + return _u.SetWorkflowID(v.ID) } // Mutation returns the RobotAccountMutation object of the builder. -func (rauo *RobotAccountUpdateOne) Mutation() *RobotAccountMutation { - return rauo.mutation +func (_u *RobotAccountUpdateOne) Mutation() *RobotAccountMutation { + return _u.mutation } // ClearWorkflow clears the "workflow" edge to the Workflow entity. -func (rauo *RobotAccountUpdateOne) ClearWorkflow() *RobotAccountUpdateOne { - rauo.mutation.ClearWorkflow() - return rauo +func (_u *RobotAccountUpdateOne) ClearWorkflow() *RobotAccountUpdateOne { + _u.mutation.ClearWorkflow() + return _u } // Where appends a list predicates to the RobotAccountUpdate builder. -func (rauo *RobotAccountUpdateOne) Where(ps ...predicate.RobotAccount) *RobotAccountUpdateOne { - rauo.mutation.Where(ps...) - return rauo +func (_u *RobotAccountUpdateOne) Where(ps ...predicate.RobotAccount) *RobotAccountUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (rauo *RobotAccountUpdateOne) Select(field string, fields ...string) *RobotAccountUpdateOne { - rauo.fields = append([]string{field}, fields...) - return rauo +func (_u *RobotAccountUpdateOne) Select(field string, fields ...string) *RobotAccountUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated RobotAccount entity. -func (rauo *RobotAccountUpdateOne) Save(ctx context.Context) (*RobotAccount, error) { - return withHooks(ctx, rauo.sqlSave, rauo.mutation, rauo.hooks) +func (_u *RobotAccountUpdateOne) Save(ctx context.Context) (*RobotAccount, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (rauo *RobotAccountUpdateOne) SaveX(ctx context.Context) *RobotAccount { - node, err := rauo.Save(ctx) +func (_u *RobotAccountUpdateOne) SaveX(ctx context.Context) *RobotAccount { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -289,32 +289,32 @@ func (rauo *RobotAccountUpdateOne) SaveX(ctx context.Context) *RobotAccount { } // Exec executes the query on the entity. -func (rauo *RobotAccountUpdateOne) Exec(ctx context.Context) error { - _, err := rauo.Save(ctx) +func (_u *RobotAccountUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (rauo *RobotAccountUpdateOne) ExecX(ctx context.Context) { - if err := rauo.Exec(ctx); err != nil { +func (_u *RobotAccountUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (rauo *RobotAccountUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *RobotAccountUpdateOne { - rauo.modifiers = append(rauo.modifiers, modifiers...) - return rauo +func (_u *RobotAccountUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *RobotAccountUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (rauo *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAccount, err error) { +func (_u *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAccount, err error) { _spec := sqlgraph.NewUpdateSpec(robotaccount.Table, robotaccount.Columns, sqlgraph.NewFieldSpec(robotaccount.FieldID, field.TypeUUID)) - id, ok := rauo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "RobotAccount.id" for update`)} } _spec.Node.ID.Value = id - if fields := rauo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, robotaccount.FieldID) for _, f := range fields { @@ -326,23 +326,23 @@ func (rauo *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAcc } } } - if ps := rauo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := rauo.mutation.Name(); ok { + if value, ok := _u.mutation.Name(); ok { _spec.SetField(robotaccount.FieldName, field.TypeString, value) } - if value, ok := rauo.mutation.RevokedAt(); ok { + if value, ok := _u.mutation.RevokedAt(); ok { _spec.SetField(robotaccount.FieldRevokedAt, field.TypeTime, value) } - if rauo.mutation.RevokedAtCleared() { + if _u.mutation.RevokedAtCleared() { _spec.ClearField(robotaccount.FieldRevokedAt, field.TypeTime) } - if rauo.mutation.WorkflowCleared() { + if _u.mutation.WorkflowCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -355,7 +355,7 @@ func (rauo *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAcc } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := rauo.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -371,11 +371,11 @@ func (rauo *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAcc } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(rauo.modifiers...) - _node = &RobotAccount{config: rauo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &RobotAccount{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, rauo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{robotaccount.Label} } else if sqlgraph.IsConstraintError(err) { @@ -383,6 +383,6 @@ func (rauo *RobotAccountUpdateOne) sqlSave(ctx context.Context) (_node *RobotAcc } return nil, err } - rauo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/runtime/runtime.go b/app/controlplane/pkg/data/ent/runtime/runtime.go index 7359b505e..17a3a0a5d 100644 --- a/app/controlplane/pkg/data/ent/runtime/runtime.go +++ b/app/controlplane/pkg/data/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/chainloop-dev/chainloop/app/controlplane/pkg/data/ent/runtime.go const ( - Version = "v0.14.4" // Version of ent codegen. - Sum = "h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI=" // Sum of ent codegen. + Version = "v0.14.6-0.20251003170342-01063ef6395c" // Version of ent codegen. + Sum = "h1:74zQRklceH5iran5LRWX4KyIHHlwRadGVpEn3JieC4A=" // Sum of ent codegen. ) diff --git a/app/controlplane/pkg/data/ent/user.go b/app/controlplane/pkg/data/ent/user.go index 43f1d5332..3930527df 100644 --- a/app/controlplane/pkg/data/ent/user.go +++ b/app/controlplane/pkg/data/ent/user.go @@ -87,7 +87,7 @@ func (*User) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the User fields. -func (u *User) assignValues(columns []string, values []any) error { +func (_m *User) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -97,47 +97,47 @@ func (u *User) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - u.ID = *value + _m.ID = *value } case user.FieldEmail: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field email", values[i]) } else if value.Valid { - u.Email = value.String + _m.Email = value.String } case user.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - u.CreatedAt = value.Time + _m.CreatedAt = value.Time } case user.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - u.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case user.FieldHasRestrictedAccess: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field has_restricted_access", values[i]) } else if value.Valid { - u.HasRestrictedAccess = new(bool) - *u.HasRestrictedAccess = value.Bool + _m.HasRestrictedAccess = new(bool) + *_m.HasRestrictedAccess = value.Bool } case user.FieldFirstName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field first_name", values[i]) } else if value.Valid { - u.FirstName = value.String + _m.FirstName = value.String } case user.FieldLastName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field last_name", values[i]) } else if value.Valid { - u.LastName = value.String + _m.LastName = value.String } default: - u.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -145,62 +145,62 @@ func (u *User) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the User. // This includes values selected through modifiers, order, etc. -func (u *User) Value(name string) (ent.Value, error) { - return u.selectValues.Get(name) +func (_m *User) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryMemberships queries the "memberships" edge of the User entity. -func (u *User) QueryMemberships() *MembershipQuery { - return NewUserClient(u.config).QueryMemberships(u) +func (_m *User) QueryMemberships() *MembershipQuery { + return NewUserClient(_m.config).QueryMemberships(_m) } // QueryGroupMemberships queries the "group_memberships" edge of the User entity. -func (u *User) QueryGroupMemberships() *GroupMembershipQuery { - return NewUserClient(u.config).QueryGroupMemberships(u) +func (_m *User) QueryGroupMemberships() *GroupMembershipQuery { + return NewUserClient(_m.config).QueryGroupMemberships(_m) } // Update returns a builder for updating this User. // Note that you need to call User.Unwrap() before calling this method if this User // was returned from a transaction, and the transaction was committed or rolled back. -func (u *User) Update() *UserUpdateOne { - return NewUserClient(u.config).UpdateOne(u) +func (_m *User) Update() *UserUpdateOne { + return NewUserClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the User entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (u *User) Unwrap() *User { - _tx, ok := u.config.driver.(*txDriver) +func (_m *User) Unwrap() *User { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: User is not a transactional entity") } - u.config.driver = _tx.drv - return u + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (u *User) String() string { +func (_m *User) String() string { var builder strings.Builder builder.WriteString("User(") - builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("email=") - builder.WriteString(u.Email) + builder.WriteString(_m.Email) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(u.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(u.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := u.HasRestrictedAccess; v != nil { + if v := _m.HasRestrictedAccess; v != nil { builder.WriteString("has_restricted_access=") builder.WriteString(fmt.Sprintf("%v", *v)) } builder.WriteString(", ") builder.WriteString("first_name=") - builder.WriteString(u.FirstName) + builder.WriteString(_m.FirstName) builder.WriteString(", ") builder.WriteString("last_name=") - builder.WriteString(u.LastName) + builder.WriteString(_m.LastName) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/user_create.go b/app/controlplane/pkg/data/ent/user_create.go index 256cab6b2..5579461ee 100644 --- a/app/controlplane/pkg/data/ent/user_create.go +++ b/app/controlplane/pkg/data/ent/user_create.go @@ -27,139 +27,139 @@ type UserCreate struct { } // SetEmail sets the "email" field. -func (uc *UserCreate) SetEmail(s string) *UserCreate { - uc.mutation.SetEmail(s) - return uc +func (_c *UserCreate) SetEmail(v string) *UserCreate { + _c.mutation.SetEmail(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate { - uc.mutation.SetCreatedAt(t) - return uc +func (_c *UserCreate) SetCreatedAt(v time.Time) *UserCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate { - if t != nil { - uc.SetCreatedAt(*t) +func (_c *UserCreate) SetNillableCreatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return uc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (uc *UserCreate) SetUpdatedAt(t time.Time) *UserCreate { - uc.mutation.SetUpdatedAt(t) - return uc +func (_c *UserCreate) SetUpdatedAt(v time.Time) *UserCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (uc *UserCreate) SetNillableUpdatedAt(t *time.Time) *UserCreate { - if t != nil { - uc.SetUpdatedAt(*t) +func (_c *UserCreate) SetNillableUpdatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return uc + return _c } // SetHasRestrictedAccess sets the "has_restricted_access" field. -func (uc *UserCreate) SetHasRestrictedAccess(b bool) *UserCreate { - uc.mutation.SetHasRestrictedAccess(b) - return uc +func (_c *UserCreate) SetHasRestrictedAccess(v bool) *UserCreate { + _c.mutation.SetHasRestrictedAccess(v) + return _c } // SetNillableHasRestrictedAccess sets the "has_restricted_access" field if the given value is not nil. -func (uc *UserCreate) SetNillableHasRestrictedAccess(b *bool) *UserCreate { - if b != nil { - uc.SetHasRestrictedAccess(*b) +func (_c *UserCreate) SetNillableHasRestrictedAccess(v *bool) *UserCreate { + if v != nil { + _c.SetHasRestrictedAccess(*v) } - return uc + return _c } // SetFirstName sets the "first_name" field. -func (uc *UserCreate) SetFirstName(s string) *UserCreate { - uc.mutation.SetFirstName(s) - return uc +func (_c *UserCreate) SetFirstName(v string) *UserCreate { + _c.mutation.SetFirstName(v) + return _c } // SetNillableFirstName sets the "first_name" field if the given value is not nil. -func (uc *UserCreate) SetNillableFirstName(s *string) *UserCreate { - if s != nil { - uc.SetFirstName(*s) +func (_c *UserCreate) SetNillableFirstName(v *string) *UserCreate { + if v != nil { + _c.SetFirstName(*v) } - return uc + return _c } // SetLastName sets the "last_name" field. -func (uc *UserCreate) SetLastName(s string) *UserCreate { - uc.mutation.SetLastName(s) - return uc +func (_c *UserCreate) SetLastName(v string) *UserCreate { + _c.mutation.SetLastName(v) + return _c } // SetNillableLastName sets the "last_name" field if the given value is not nil. -func (uc *UserCreate) SetNillableLastName(s *string) *UserCreate { - if s != nil { - uc.SetLastName(*s) +func (_c *UserCreate) SetNillableLastName(v *string) *UserCreate { + if v != nil { + _c.SetLastName(*v) } - return uc + return _c } // SetID sets the "id" field. -func (uc *UserCreate) SetID(u uuid.UUID) *UserCreate { - uc.mutation.SetID(u) - return uc +func (_c *UserCreate) SetID(v uuid.UUID) *UserCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (uc *UserCreate) SetNillableID(u *uuid.UUID) *UserCreate { - if u != nil { - uc.SetID(*u) +func (_c *UserCreate) SetNillableID(v *uuid.UUID) *UserCreate { + if v != nil { + _c.SetID(*v) } - return uc + return _c } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (uc *UserCreate) AddMembershipIDs(ids ...uuid.UUID) *UserCreate { - uc.mutation.AddMembershipIDs(ids...) - return uc +func (_c *UserCreate) AddMembershipIDs(ids ...uuid.UUID) *UserCreate { + _c.mutation.AddMembershipIDs(ids...) + return _c } // AddMemberships adds the "memberships" edges to the Membership entity. -func (uc *UserCreate) AddMemberships(m ...*Membership) *UserCreate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_c *UserCreate) AddMemberships(v ...*Membership) *UserCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uc.AddMembershipIDs(ids...) + return _c.AddMembershipIDs(ids...) } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (uc *UserCreate) AddGroupMembershipIDs(ids ...uuid.UUID) *UserCreate { - uc.mutation.AddGroupMembershipIDs(ids...) - return uc +func (_c *UserCreate) AddGroupMembershipIDs(ids ...uuid.UUID) *UserCreate { + _c.mutation.AddGroupMembershipIDs(ids...) + return _c } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (uc *UserCreate) AddGroupMemberships(g ...*GroupMembership) *UserCreate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_c *UserCreate) AddGroupMemberships(v ...*GroupMembership) *UserCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uc.AddGroupMembershipIDs(ids...) + return _c.AddGroupMembershipIDs(ids...) } // Mutation returns the UserMutation object of the builder. -func (uc *UserCreate) Mutation() *UserMutation { - return uc.mutation +func (_c *UserCreate) Mutation() *UserMutation { + return _c.mutation } // Save creates the User in the database. -func (uc *UserCreate) Save(ctx context.Context) (*User, error) { - uc.defaults() - return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) +func (_c *UserCreate) Save(ctx context.Context) (*User, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (uc *UserCreate) SaveX(ctx context.Context) *User { - v, err := uc.Save(ctx) +func (_c *UserCreate) SaveX(ctx context.Context) *User { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -167,59 +167,59 @@ func (uc *UserCreate) SaveX(ctx context.Context) *User { } // Exec executes the query. -func (uc *UserCreate) Exec(ctx context.Context) error { - _, err := uc.Save(ctx) +func (_c *UserCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (uc *UserCreate) ExecX(ctx context.Context) { - if err := uc.Exec(ctx); err != nil { +func (_c *UserCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (uc *UserCreate) defaults() { - if _, ok := uc.mutation.CreatedAt(); !ok { +func (_c *UserCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := user.DefaultCreatedAt() - uc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := uc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := user.DefaultUpdatedAt() - uc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := uc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := user.DefaultID() - uc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (uc *UserCreate) check() error { - if _, ok := uc.mutation.Email(); !ok { +func (_c *UserCreate) check() error { + if _, ok := _c.mutation.Email(); !ok { return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)} } - if v, ok := uc.mutation.Email(); ok { + if v, ok := _c.mutation.Email(); ok { if err := user.EmailValidator(v); err != nil { return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} } } - if _, ok := uc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} } - if _, ok := uc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} } return nil } -func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { - if err := uc.check(); err != nil { +func (_c *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := uc.createSpec() - if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -232,46 +232,46 @@ func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { return nil, err } } - uc.mutation.id = &_node.ID - uc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { +func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { var ( - _node = &User{config: uc.config} + _node = &User{config: _c.config} _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) ) - _spec.OnConflict = uc.conflict - if id, ok := uc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := uc.mutation.Email(); ok { + if value, ok := _c.mutation.Email(); ok { _spec.SetField(user.FieldEmail, field.TypeString, value) _node.Email = value } - if value, ok := uc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := uc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := uc.mutation.HasRestrictedAccess(); ok { + if value, ok := _c.mutation.HasRestrictedAccess(); ok { _spec.SetField(user.FieldHasRestrictedAccess, field.TypeBool, value) _node.HasRestrictedAccess = &value } - if value, ok := uc.mutation.FirstName(); ok { + if value, ok := _c.mutation.FirstName(); ok { _spec.SetField(user.FieldFirstName, field.TypeString, value) _node.FirstName = value } - if value, ok := uc.mutation.LastName(); ok { + if value, ok := _c.mutation.LastName(); ok { _spec.SetField(user.FieldLastName, field.TypeString, value) _node.LastName = value } - if nodes := uc.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -287,7 +287,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := uc.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -322,10 +322,10 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { // SetEmail(v+v). // }). // Exec(ctx) -func (uc *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { - uc.conflict = opts +func (_c *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { + _c.conflict = opts return &UserUpsertOne{ - create: uc, + create: _c, } } @@ -335,10 +335,10 @@ func (uc *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { // client.User.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (uc *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { - uc.conflict = append(uc.conflict, sql.ConflictColumns(columns...)) +func (_c *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &UserUpsertOne{ - create: uc, + create: _c, } } @@ -622,16 +622,16 @@ type UserCreateBulk struct { } // Save creates the User entities in the database. -func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { - if ucb.err != nil { - return nil, ucb.err - } - specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) - nodes := make([]*User, len(ucb.builders)) - mutators := make([]Mutator, len(ucb.builders)) - for i := range ucb.builders { +func (_c *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*User, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := ucb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*UserMutation) @@ -645,12 +645,12 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = ucb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -670,7 +670,7 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -678,8 +678,8 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { } // SaveX is like Save, but panics if an error occurs. -func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { - v, err := ucb.Save(ctx) +func (_c *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -687,14 +687,14 @@ func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { } // Exec executes the query. -func (ucb *UserCreateBulk) Exec(ctx context.Context) error { - _, err := ucb.Save(ctx) +func (_c *UserCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (ucb *UserCreateBulk) ExecX(ctx context.Context) { - if err := ucb.Exec(ctx); err != nil { +func (_c *UserCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -714,10 +714,10 @@ func (ucb *UserCreateBulk) ExecX(ctx context.Context) { // SetEmail(v+v). // }). // Exec(ctx) -func (ucb *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { - ucb.conflict = opts +func (_c *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { + _c.conflict = opts return &UserUpsertBulk{ - create: ucb, + create: _c, } } @@ -727,10 +727,10 @@ func (ucb *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBul // client.User.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (ucb *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { - ucb.conflict = append(ucb.conflict, sql.ConflictColumns(columns...)) +func (_c *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &UserUpsertBulk{ - create: ucb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/user_delete.go b/app/controlplane/pkg/data/ent/user_delete.go index 306779dd4..ac7c34e1b 100644 --- a/app/controlplane/pkg/data/ent/user_delete.go +++ b/app/controlplane/pkg/data/ent/user_delete.go @@ -20,56 +20,56 @@ type UserDelete struct { } // Where appends a list predicates to the UserDelete builder. -func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { - ud.mutation.Where(ps...) - return ud +func (_d *UserDelete) Where(ps ...predicate.User) *UserDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (ud *UserDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) +func (_d *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (ud *UserDelete) ExecX(ctx context.Context) int { - n, err := ud.Exec(ctx) +func (_d *UserDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *UserDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) - if ps := ud.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - ud.mutation.done = true + _d.mutation.done = true return affected, err } // UserDeleteOne is the builder for deleting a single User entity. type UserDeleteOne struct { - ud *UserDelete + _d *UserDelete } // Where appends a list predicates to the UserDelete builder. -func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { - udo.ud.mutation.Where(ps...) - return udo +func (_d *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (udo *UserDeleteOne) Exec(ctx context.Context) error { - n, err := udo.ud.Exec(ctx) +func (_d *UserDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (udo *UserDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (udo *UserDeleteOne) ExecX(ctx context.Context) { - if err := udo.Exec(ctx); err != nil { +func (_d *UserDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/user_query.go b/app/controlplane/pkg/data/ent/user_query.go index 7cc41dfdf..7139705ef 100644 --- a/app/controlplane/pkg/data/ent/user_query.go +++ b/app/controlplane/pkg/data/ent/user_query.go @@ -36,44 +36,44 @@ type UserQuery struct { } // Where adds a new predicate for the UserQuery builder. -func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { - uq.predicates = append(uq.predicates, ps...) - return uq +func (_q *UserQuery) Where(ps ...predicate.User) *UserQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (uq *UserQuery) Limit(limit int) *UserQuery { - uq.ctx.Limit = &limit - return uq +func (_q *UserQuery) Limit(limit int) *UserQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (uq *UserQuery) Offset(offset int) *UserQuery { - uq.ctx.Offset = &offset - return uq +func (_q *UserQuery) Offset(offset int) *UserQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (uq *UserQuery) Unique(unique bool) *UserQuery { - uq.ctx.Unique = &unique - return uq +func (_q *UserQuery) Unique(unique bool) *UserQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { - uq.order = append(uq.order, o...) - return uq +func (_q *UserQuery) Order(o ...user.OrderOption) *UserQuery { + _q.order = append(_q.order, o...) + return _q } // QueryMemberships chains the current query on the "memberships" edge. -func (uq *UserQuery) QueryMemberships() *MembershipQuery { - query := (&MembershipClient{config: uq.config}).Query() +func (_q *UserQuery) QueryMemberships() *MembershipQuery { + query := (&MembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := uq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := uq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -82,20 +82,20 @@ func (uq *UserQuery) QueryMemberships() *MembershipQuery { sqlgraph.To(membership.Table, membership.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, user.MembershipsTable, user.MembershipsColumn), ) - fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryGroupMemberships chains the current query on the "group_memberships" edge. -func (uq *UserQuery) QueryGroupMemberships() *GroupMembershipQuery { - query := (&GroupMembershipClient{config: uq.config}).Query() +func (_q *UserQuery) QueryGroupMemberships() *GroupMembershipQuery { + query := (&GroupMembershipClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := uq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := uq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -104,7 +104,7 @@ func (uq *UserQuery) QueryGroupMemberships() *GroupMembershipQuery { sqlgraph.To(groupmembership.Table, groupmembership.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, user.GroupMembershipsTable, user.GroupMembershipsColumn), ) - fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -112,8 +112,8 @@ func (uq *UserQuery) QueryGroupMemberships() *GroupMembershipQuery { // First returns the first User entity from the query. // Returns a *NotFoundError when no User was found. -func (uq *UserQuery) First(ctx context.Context) (*User, error) { - nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, ent.OpQueryFirst)) +func (_q *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -124,8 +124,8 @@ func (uq *UserQuery) First(ctx context.Context) (*User, error) { } // FirstX is like First, but panics if an error occurs. -func (uq *UserQuery) FirstX(ctx context.Context) *User { - node, err := uq.First(ctx) +func (_q *UserQuery) FirstX(ctx context.Context) *User { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,9 +134,9 @@ func (uq *UserQuery) FirstX(ctx context.Context) *User { // FirstID returns the first User ID from the query. // Returns a *NotFoundError when no User ID was found. -func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -147,8 +147,8 @@ func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { } // FirstIDX is like FirstID, but panics if an error occurs. -func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := uq.FirstID(ctx) +func (_q *UserQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -158,8 +158,8 @@ func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single User entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one User entity is found. // Returns a *NotFoundError when no User entities are found. -func (uq *UserQuery) Only(ctx context.Context) (*User, error) { - nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, ent.OpQueryOnly)) +func (_q *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -174,8 +174,8 @@ func (uq *UserQuery) Only(ctx context.Context) (*User, error) { } // OnlyX is like Only, but panics if an error occurs. -func (uq *UserQuery) OnlyX(ctx context.Context) *User { - node, err := uq.Only(ctx) +func (_q *UserQuery) OnlyX(ctx context.Context) *User { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -185,9 +185,9 @@ func (uq *UserQuery) OnlyX(ctx context.Context) *User { // OnlyID is like Only, but returns the only User ID in the query. // Returns a *NotSingularError when more than one User ID is found. // Returns a *NotFoundError when no entities are found. -func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -202,8 +202,8 @@ func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := uq.OnlyID(ctx) +func (_q *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -211,18 +211,18 @@ func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Users. -func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { - ctx = setContextOp(ctx, uq.ctx, ent.OpQueryAll) - if err := uq.prepareQuery(ctx); err != nil { +func (_q *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*User, *UserQuery]() - return withInterceptors[[]*User](ctx, uq, qr, uq.inters) + return withInterceptors[[]*User](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (uq *UserQuery) AllX(ctx context.Context) []*User { - nodes, err := uq.All(ctx) +func (_q *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -230,20 +230,20 @@ func (uq *UserQuery) AllX(ctx context.Context) []*User { } // IDs executes the query and returns a list of User IDs. -func (uq *UserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if uq.ctx.Unique == nil && uq.path != nil { - uq.Unique(true) +func (_q *UserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, uq.ctx, ent.OpQueryIDs) - if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(user.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := uq.IDs(ctx) +func (_q *UserQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -251,17 +251,17 @@ func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (uq *UserQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, uq.ctx, ent.OpQueryCount) - if err := uq.prepareQuery(ctx); err != nil { +func (_q *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) + return withInterceptors[int](ctx, _q, querierCount[*UserQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (uq *UserQuery) CountX(ctx context.Context) int { - count, err := uq.Count(ctx) +func (_q *UserQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -269,9 +269,9 @@ func (uq *UserQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, uq.ctx, ent.OpQueryExist) - switch _, err := uq.FirstID(ctx); { +func (_q *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -282,8 +282,8 @@ func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (uq *UserQuery) ExistX(ctx context.Context) bool { - exist, err := uq.Exist(ctx) +func (_q *UserQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -292,45 +292,45 @@ func (uq *UserQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (uq *UserQuery) Clone() *UserQuery { - if uq == nil { +func (_q *UserQuery) Clone() *UserQuery { + if _q == nil { return nil } return &UserQuery{ - config: uq.config, - ctx: uq.ctx.Clone(), - order: append([]user.OrderOption{}, uq.order...), - inters: append([]Interceptor{}, uq.inters...), - predicates: append([]predicate.User{}, uq.predicates...), - withMemberships: uq.withMemberships.Clone(), - withGroupMemberships: uq.withGroupMemberships.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]user.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.User{}, _q.predicates...), + withMemberships: _q.withMemberships.Clone(), + withGroupMemberships: _q.withGroupMemberships.Clone(), // clone intermediate query. - sql: uq.sql.Clone(), - path: uq.path, - modifiers: append([]func(*sql.Selector){}, uq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithMemberships tells the query-builder to eager-load the nodes that are connected to // the "memberships" edge. The optional arguments are used to configure the query builder of the edge. -func (uq *UserQuery) WithMemberships(opts ...func(*MembershipQuery)) *UserQuery { - query := (&MembershipClient{config: uq.config}).Query() +func (_q *UserQuery) WithMemberships(opts ...func(*MembershipQuery)) *UserQuery { + query := (&MembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - uq.withMemberships = query - return uq + _q.withMemberships = query + return _q } // WithGroupMemberships tells the query-builder to eager-load the nodes that are connected to // the "group_memberships" edge. The optional arguments are used to configure the query builder of the edge. -func (uq *UserQuery) WithGroupMemberships(opts ...func(*GroupMembershipQuery)) *UserQuery { - query := (&GroupMembershipClient{config: uq.config}).Query() +func (_q *UserQuery) WithGroupMemberships(opts ...func(*GroupMembershipQuery)) *UserQuery { + query := (&GroupMembershipClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - uq.withGroupMemberships = query - return uq + _q.withGroupMemberships = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -347,10 +347,10 @@ func (uq *UserQuery) WithGroupMemberships(opts ...func(*GroupMembershipQuery)) * // GroupBy(user.FieldEmail). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { - uq.ctx.Fields = append([]string{field}, fields...) - grbuild := &UserGroupBy{build: uq} - grbuild.flds = &uq.ctx.Fields +func (_q *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = user.Label grbuild.scan = grbuild.Scan return grbuild @@ -368,84 +368,84 @@ func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { // client.User.Query(). // Select(user.FieldEmail). // Scan(ctx, &v) -func (uq *UserQuery) Select(fields ...string) *UserSelect { - uq.ctx.Fields = append(uq.ctx.Fields, fields...) - sbuild := &UserSelect{UserQuery: uq} +func (_q *UserQuery) Select(fields ...string) *UserSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: _q} sbuild.label = user.Label - sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a UserSelect configured with the given aggregations. -func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { - return uq.Select().Aggregate(fns...) +func (_q *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return _q.Select().Aggregate(fns...) } -func (uq *UserQuery) prepareQuery(ctx context.Context) error { - for _, inter := range uq.inters { +func (_q *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, uq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range uq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !user.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if uq.path != nil { - prev, err := uq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - uq.sql = prev + _q.sql = prev } return nil } -func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { +func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { var ( nodes = []*User{} - _spec = uq.querySpec() + _spec = _q.querySpec() loadedTypes = [2]bool{ - uq.withMemberships != nil, - uq.withGroupMemberships != nil, + _q.withMemberships != nil, + _q.withGroupMemberships != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { return (*User).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &User{config: uq.config} + node := &User{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(uq.modifiers) > 0 { - _spec.Modifiers = uq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := uq.withMemberships; query != nil { - if err := uq.loadMemberships(ctx, query, nodes, + if query := _q.withMemberships; query != nil { + if err := _q.loadMemberships(ctx, query, nodes, func(n *User) { n.Edges.Memberships = []*Membership{} }, func(n *User, e *Membership) { n.Edges.Memberships = append(n.Edges.Memberships, e) }); err != nil { return nil, err } } - if query := uq.withGroupMemberships; query != nil { - if err := uq.loadGroupMemberships(ctx, query, nodes, + if query := _q.withGroupMemberships; query != nil { + if err := _q.loadGroupMemberships(ctx, query, nodes, func(n *User) { n.Edges.GroupMemberships = []*GroupMembership{} }, func(n *User, e *GroupMembership) { n.Edges.GroupMemberships = append(n.Edges.GroupMemberships, e) }); err != nil { return nil, err @@ -454,7 +454,7 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e return nodes, nil } -func (uq *UserQuery) loadMemberships(ctx context.Context, query *MembershipQuery, nodes []*User, init func(*User), assign func(*User, *Membership)) error { +func (_q *UserQuery) loadMemberships(ctx context.Context, query *MembershipQuery, nodes []*User, init func(*User), assign func(*User, *Membership)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*User) for i := range nodes { @@ -485,7 +485,7 @@ func (uq *UserQuery) loadMemberships(ctx context.Context, query *MembershipQuery } return nil } -func (uq *UserQuery) loadGroupMemberships(ctx context.Context, query *GroupMembershipQuery, nodes []*User, init func(*User), assign func(*User, *GroupMembership)) error { +func (_q *UserQuery) loadGroupMemberships(ctx context.Context, query *GroupMembershipQuery, nodes []*User, init func(*User), assign func(*User, *GroupMembership)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*User) for i := range nodes { @@ -516,27 +516,27 @@ func (uq *UserQuery) loadGroupMemberships(ctx context.Context, query *GroupMembe return nil } -func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { - _spec := uq.querySpec() - if len(uq.modifiers) > 0 { - _spec.Modifiers = uq.modifiers +func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = uq.ctx.Fields - if len(uq.ctx.Fields) > 0 { - _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, uq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *UserQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) - _spec.From = uq.sql - if unique := uq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if uq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := uq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) for i := range fields { @@ -545,20 +545,20 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := uq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := uq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := uq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := uq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -568,36 +568,36 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(uq.driver.Dialect()) +func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(user.Table) - columns := uq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = user.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if uq.sql != nil { - selector = uq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if uq.ctx.Unique != nil && *uq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range uq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range uq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range uq.order { + for _, p := range _q.order { p(selector) } - if offset := uq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := uq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -606,33 +606,33 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (uq *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery { - if uq.driver.Dialect() == dialect.Postgres { - uq.Unique(false) +func (_q *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - uq.modifiers = append(uq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return uq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (uq *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery { - if uq.driver.Dialect() == dialect.Postgres { - uq.Unique(false) +func (_q *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - uq.modifiers = append(uq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return uq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (uq *UserQuery) Modify(modifiers ...func(s *sql.Selector)) *UserSelect { - uq.modifiers = append(uq.modifiers, modifiers...) - return uq.Select() +func (_q *UserQuery) Modify(modifiers ...func(s *sql.Selector)) *UserSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // UserGroupBy is the group-by builder for User entities. @@ -642,41 +642,41 @@ type UserGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { - ugb.fns = append(ugb.fns, fns...) - return ugb +func (_g *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ugb.build.ctx, ent.OpQueryGroupBy) - if err := ugb.build.prepareQuery(ctx); err != nil { +func (_g *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { +func (_g *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(ugb.fns)) - for _, fn := range ugb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) - for _, f := range *ugb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*ugb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -690,27 +690,27 @@ type UserSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { - us.fns = append(us.fns, fns...) - return us +func (_s *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (us *UserSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, us.ctx, ent.OpQuerySelect) - if err := us.prepareQuery(ctx); err != nil { +func (_s *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, _s.UserQuery, _s, _s.inters, v) } -func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { +func (_s *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(us.fns)) - for _, fn := range us.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*us.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -718,7 +718,7 @@ func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error } rows := &sql.Rows{} query, args := selector.Query() - if err := us.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -726,7 +726,7 @@ func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error } // Modify adds a query modifier for attaching custom logic to queries. -func (us *UserSelect) Modify(modifiers ...func(s *sql.Selector)) *UserSelect { - us.modifiers = append(us.modifiers, modifiers...) - return us +func (_s *UserSelect) Modify(modifiers ...func(s *sql.Selector)) *UserSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/user_update.go b/app/controlplane/pkg/data/ent/user_update.go index fab1f5537..33c274bb2 100644 --- a/app/controlplane/pkg/data/ent/user_update.go +++ b/app/controlplane/pkg/data/ent/user_update.go @@ -27,184 +27,184 @@ type UserUpdate struct { } // Where appends a list predicates to the UserUpdate builder. -func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate { - uu.mutation.Where(ps...) - return uu +func (_u *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + _u.mutation.Where(ps...) + return _u } // SetEmail sets the "email" field. -func (uu *UserUpdate) SetEmail(s string) *UserUpdate { - uu.mutation.SetEmail(s) - return uu +func (_u *UserUpdate) SetEmail(v string) *UserUpdate { + _u.mutation.SetEmail(v) + return _u } // SetNillableEmail sets the "email" field if the given value is not nil. -func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate { - if s != nil { - uu.SetEmail(*s) +func (_u *UserUpdate) SetNillableEmail(v *string) *UserUpdate { + if v != nil { + _u.SetEmail(*v) } - return uu + return _u } // SetUpdatedAt sets the "updated_at" field. -func (uu *UserUpdate) SetUpdatedAt(t time.Time) *UserUpdate { - uu.mutation.SetUpdatedAt(t) - return uu +func (_u *UserUpdate) SetUpdatedAt(v time.Time) *UserUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (uu *UserUpdate) SetNillableUpdatedAt(t *time.Time) *UserUpdate { - if t != nil { - uu.SetUpdatedAt(*t) +func (_u *UserUpdate) SetNillableUpdatedAt(v *time.Time) *UserUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return uu + return _u } // SetHasRestrictedAccess sets the "has_restricted_access" field. -func (uu *UserUpdate) SetHasRestrictedAccess(b bool) *UserUpdate { - uu.mutation.SetHasRestrictedAccess(b) - return uu +func (_u *UserUpdate) SetHasRestrictedAccess(v bool) *UserUpdate { + _u.mutation.SetHasRestrictedAccess(v) + return _u } // SetNillableHasRestrictedAccess sets the "has_restricted_access" field if the given value is not nil. -func (uu *UserUpdate) SetNillableHasRestrictedAccess(b *bool) *UserUpdate { - if b != nil { - uu.SetHasRestrictedAccess(*b) +func (_u *UserUpdate) SetNillableHasRestrictedAccess(v *bool) *UserUpdate { + if v != nil { + _u.SetHasRestrictedAccess(*v) } - return uu + return _u } // ClearHasRestrictedAccess clears the value of the "has_restricted_access" field. -func (uu *UserUpdate) ClearHasRestrictedAccess() *UserUpdate { - uu.mutation.ClearHasRestrictedAccess() - return uu +func (_u *UserUpdate) ClearHasRestrictedAccess() *UserUpdate { + _u.mutation.ClearHasRestrictedAccess() + return _u } // SetFirstName sets the "first_name" field. -func (uu *UserUpdate) SetFirstName(s string) *UserUpdate { - uu.mutation.SetFirstName(s) - return uu +func (_u *UserUpdate) SetFirstName(v string) *UserUpdate { + _u.mutation.SetFirstName(v) + return _u } // SetNillableFirstName sets the "first_name" field if the given value is not nil. -func (uu *UserUpdate) SetNillableFirstName(s *string) *UserUpdate { - if s != nil { - uu.SetFirstName(*s) +func (_u *UserUpdate) SetNillableFirstName(v *string) *UserUpdate { + if v != nil { + _u.SetFirstName(*v) } - return uu + return _u } // ClearFirstName clears the value of the "first_name" field. -func (uu *UserUpdate) ClearFirstName() *UserUpdate { - uu.mutation.ClearFirstName() - return uu +func (_u *UserUpdate) ClearFirstName() *UserUpdate { + _u.mutation.ClearFirstName() + return _u } // SetLastName sets the "last_name" field. -func (uu *UserUpdate) SetLastName(s string) *UserUpdate { - uu.mutation.SetLastName(s) - return uu +func (_u *UserUpdate) SetLastName(v string) *UserUpdate { + _u.mutation.SetLastName(v) + return _u } // SetNillableLastName sets the "last_name" field if the given value is not nil. -func (uu *UserUpdate) SetNillableLastName(s *string) *UserUpdate { - if s != nil { - uu.SetLastName(*s) +func (_u *UserUpdate) SetNillableLastName(v *string) *UserUpdate { + if v != nil { + _u.SetLastName(*v) } - return uu + return _u } // ClearLastName clears the value of the "last_name" field. -func (uu *UserUpdate) ClearLastName() *UserUpdate { - uu.mutation.ClearLastName() - return uu +func (_u *UserUpdate) ClearLastName() *UserUpdate { + _u.mutation.ClearLastName() + return _u } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (uu *UserUpdate) AddMembershipIDs(ids ...uuid.UUID) *UserUpdate { - uu.mutation.AddMembershipIDs(ids...) - return uu +func (_u *UserUpdate) AddMembershipIDs(ids ...uuid.UUID) *UserUpdate { + _u.mutation.AddMembershipIDs(ids...) + return _u } // AddMemberships adds the "memberships" edges to the Membership entity. -func (uu *UserUpdate) AddMemberships(m ...*Membership) *UserUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *UserUpdate) AddMemberships(v ...*Membership) *UserUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uu.AddMembershipIDs(ids...) + return _u.AddMembershipIDs(ids...) } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (uu *UserUpdate) AddGroupMembershipIDs(ids ...uuid.UUID) *UserUpdate { - uu.mutation.AddGroupMembershipIDs(ids...) - return uu +func (_u *UserUpdate) AddGroupMembershipIDs(ids ...uuid.UUID) *UserUpdate { + _u.mutation.AddGroupMembershipIDs(ids...) + return _u } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (uu *UserUpdate) AddGroupMemberships(g ...*GroupMembership) *UserUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *UserUpdate) AddGroupMemberships(v ...*GroupMembership) *UserUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uu.AddGroupMembershipIDs(ids...) + return _u.AddGroupMembershipIDs(ids...) } // Mutation returns the UserMutation object of the builder. -func (uu *UserUpdate) Mutation() *UserMutation { - return uu.mutation +func (_u *UserUpdate) Mutation() *UserMutation { + return _u.mutation } // ClearMemberships clears all "memberships" edges to the Membership entity. -func (uu *UserUpdate) ClearMemberships() *UserUpdate { - uu.mutation.ClearMemberships() - return uu +func (_u *UserUpdate) ClearMemberships() *UserUpdate { + _u.mutation.ClearMemberships() + return _u } // RemoveMembershipIDs removes the "memberships" edge to Membership entities by IDs. -func (uu *UserUpdate) RemoveMembershipIDs(ids ...uuid.UUID) *UserUpdate { - uu.mutation.RemoveMembershipIDs(ids...) - return uu +func (_u *UserUpdate) RemoveMembershipIDs(ids ...uuid.UUID) *UserUpdate { + _u.mutation.RemoveMembershipIDs(ids...) + return _u } // RemoveMemberships removes "memberships" edges to Membership entities. -func (uu *UserUpdate) RemoveMemberships(m ...*Membership) *UserUpdate { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *UserUpdate) RemoveMemberships(v ...*Membership) *UserUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uu.RemoveMembershipIDs(ids...) + return _u.RemoveMembershipIDs(ids...) } // ClearGroupMemberships clears all "group_memberships" edges to the GroupMembership entity. -func (uu *UserUpdate) ClearGroupMemberships() *UserUpdate { - uu.mutation.ClearGroupMemberships() - return uu +func (_u *UserUpdate) ClearGroupMemberships() *UserUpdate { + _u.mutation.ClearGroupMemberships() + return _u } // RemoveGroupMembershipIDs removes the "group_memberships" edge to GroupMembership entities by IDs. -func (uu *UserUpdate) RemoveGroupMembershipIDs(ids ...uuid.UUID) *UserUpdate { - uu.mutation.RemoveGroupMembershipIDs(ids...) - return uu +func (_u *UserUpdate) RemoveGroupMembershipIDs(ids ...uuid.UUID) *UserUpdate { + _u.mutation.RemoveGroupMembershipIDs(ids...) + return _u } // RemoveGroupMemberships removes "group_memberships" edges to GroupMembership entities. -func (uu *UserUpdate) RemoveGroupMemberships(g ...*GroupMembership) *UserUpdate { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *UserUpdate) RemoveGroupMemberships(v ...*GroupMembership) *UserUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uu.RemoveGroupMembershipIDs(ids...) + return _u.RemoveGroupMembershipIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (uu *UserUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) +func (_u *UserUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (uu *UserUpdate) SaveX(ctx context.Context) int { - affected, err := uu.Save(ctx) +func (_u *UserUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -212,21 +212,21 @@ func (uu *UserUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (uu *UserUpdate) Exec(ctx context.Context) error { - _, err := uu.Save(ctx) +func (_u *UserUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (uu *UserUpdate) ExecX(ctx context.Context) { - if err := uu.Exec(ctx); err != nil { +func (_u *UserUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (uu *UserUpdate) check() error { - if v, ok := uu.mutation.Email(); ok { +func (_u *UserUpdate) check() error { + if v, ok := _u.mutation.Email(); ok { if err := user.EmailValidator(v); err != nil { return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} } @@ -235,48 +235,48 @@ func (uu *UserUpdate) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (uu *UserUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *UserUpdate { - uu.modifiers = append(uu.modifiers, modifiers...) - return uu +func (_u *UserUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *UserUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := uu.check(); err != nil { - return n, err +func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) - if ps := uu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := uu.mutation.Email(); ok { + if value, ok := _u.mutation.Email(); ok { _spec.SetField(user.FieldEmail, field.TypeString, value) } - if value, ok := uu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := uu.mutation.HasRestrictedAccess(); ok { + if value, ok := _u.mutation.HasRestrictedAccess(); ok { _spec.SetField(user.FieldHasRestrictedAccess, field.TypeBool, value) } - if uu.mutation.HasRestrictedAccessCleared() { + if _u.mutation.HasRestrictedAccessCleared() { _spec.ClearField(user.FieldHasRestrictedAccess, field.TypeBool) } - if value, ok := uu.mutation.FirstName(); ok { + if value, ok := _u.mutation.FirstName(); ok { _spec.SetField(user.FieldFirstName, field.TypeString, value) } - if uu.mutation.FirstNameCleared() { + if _u.mutation.FirstNameCleared() { _spec.ClearField(user.FieldFirstName, field.TypeString) } - if value, ok := uu.mutation.LastName(); ok { + if value, ok := _u.mutation.LastName(); ok { _spec.SetField(user.FieldLastName, field.TypeString, value) } - if uu.mutation.LastNameCleared() { + if _u.mutation.LastNameCleared() { _spec.ClearField(user.FieldLastName, field.TypeString) } - if uu.mutation.MembershipsCleared() { + if _u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -289,7 +289,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uu.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !uu.mutation.MembershipsCleared() { + if nodes := _u.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !_u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -305,7 +305,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uu.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -321,7 +321,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if uu.mutation.GroupMembershipsCleared() { + if _u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -334,7 +334,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uu.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !uu.mutation.GroupMembershipsCleared() { + if nodes := _u.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !_u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -350,7 +350,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uu.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -366,8 +366,8 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(uu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{user.Label} } else if sqlgraph.IsConstraintError(err) { @@ -375,8 +375,8 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - uu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // UserUpdateOne is the builder for updating a single User entity. @@ -389,191 +389,191 @@ type UserUpdateOne struct { } // SetEmail sets the "email" field. -func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne { - uuo.mutation.SetEmail(s) - return uuo +func (_u *UserUpdateOne) SetEmail(v string) *UserUpdateOne { + _u.mutation.SetEmail(v) + return _u } // SetNillableEmail sets the "email" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne { - if s != nil { - uuo.SetEmail(*s) +func (_u *UserUpdateOne) SetNillableEmail(v *string) *UserUpdateOne { + if v != nil { + _u.SetEmail(*v) } - return uuo + return _u } // SetUpdatedAt sets the "updated_at" field. -func (uuo *UserUpdateOne) SetUpdatedAt(t time.Time) *UserUpdateOne { - uuo.mutation.SetUpdatedAt(t) - return uuo +func (_u *UserUpdateOne) SetUpdatedAt(v time.Time) *UserUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableUpdatedAt(t *time.Time) *UserUpdateOne { - if t != nil { - uuo.SetUpdatedAt(*t) +func (_u *UserUpdateOne) SetNillableUpdatedAt(v *time.Time) *UserUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return uuo + return _u } // SetHasRestrictedAccess sets the "has_restricted_access" field. -func (uuo *UserUpdateOne) SetHasRestrictedAccess(b bool) *UserUpdateOne { - uuo.mutation.SetHasRestrictedAccess(b) - return uuo +func (_u *UserUpdateOne) SetHasRestrictedAccess(v bool) *UserUpdateOne { + _u.mutation.SetHasRestrictedAccess(v) + return _u } // SetNillableHasRestrictedAccess sets the "has_restricted_access" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableHasRestrictedAccess(b *bool) *UserUpdateOne { - if b != nil { - uuo.SetHasRestrictedAccess(*b) +func (_u *UserUpdateOne) SetNillableHasRestrictedAccess(v *bool) *UserUpdateOne { + if v != nil { + _u.SetHasRestrictedAccess(*v) } - return uuo + return _u } // ClearHasRestrictedAccess clears the value of the "has_restricted_access" field. -func (uuo *UserUpdateOne) ClearHasRestrictedAccess() *UserUpdateOne { - uuo.mutation.ClearHasRestrictedAccess() - return uuo +func (_u *UserUpdateOne) ClearHasRestrictedAccess() *UserUpdateOne { + _u.mutation.ClearHasRestrictedAccess() + return _u } // SetFirstName sets the "first_name" field. -func (uuo *UserUpdateOne) SetFirstName(s string) *UserUpdateOne { - uuo.mutation.SetFirstName(s) - return uuo +func (_u *UserUpdateOne) SetFirstName(v string) *UserUpdateOne { + _u.mutation.SetFirstName(v) + return _u } // SetNillableFirstName sets the "first_name" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableFirstName(s *string) *UserUpdateOne { - if s != nil { - uuo.SetFirstName(*s) +func (_u *UserUpdateOne) SetNillableFirstName(v *string) *UserUpdateOne { + if v != nil { + _u.SetFirstName(*v) } - return uuo + return _u } // ClearFirstName clears the value of the "first_name" field. -func (uuo *UserUpdateOne) ClearFirstName() *UserUpdateOne { - uuo.mutation.ClearFirstName() - return uuo +func (_u *UserUpdateOne) ClearFirstName() *UserUpdateOne { + _u.mutation.ClearFirstName() + return _u } // SetLastName sets the "last_name" field. -func (uuo *UserUpdateOne) SetLastName(s string) *UserUpdateOne { - uuo.mutation.SetLastName(s) - return uuo +func (_u *UserUpdateOne) SetLastName(v string) *UserUpdateOne { + _u.mutation.SetLastName(v) + return _u } // SetNillableLastName sets the "last_name" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableLastName(s *string) *UserUpdateOne { - if s != nil { - uuo.SetLastName(*s) +func (_u *UserUpdateOne) SetNillableLastName(v *string) *UserUpdateOne { + if v != nil { + _u.SetLastName(*v) } - return uuo + return _u } // ClearLastName clears the value of the "last_name" field. -func (uuo *UserUpdateOne) ClearLastName() *UserUpdateOne { - uuo.mutation.ClearLastName() - return uuo +func (_u *UserUpdateOne) ClearLastName() *UserUpdateOne { + _u.mutation.ClearLastName() + return _u } // AddMembershipIDs adds the "memberships" edge to the Membership entity by IDs. -func (uuo *UserUpdateOne) AddMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { - uuo.mutation.AddMembershipIDs(ids...) - return uuo +func (_u *UserUpdateOne) AddMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { + _u.mutation.AddMembershipIDs(ids...) + return _u } // AddMemberships adds the "memberships" edges to the Membership entity. -func (uuo *UserUpdateOne) AddMemberships(m ...*Membership) *UserUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *UserUpdateOne) AddMemberships(v ...*Membership) *UserUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uuo.AddMembershipIDs(ids...) + return _u.AddMembershipIDs(ids...) } // AddGroupMembershipIDs adds the "group_memberships" edge to the GroupMembership entity by IDs. -func (uuo *UserUpdateOne) AddGroupMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { - uuo.mutation.AddGroupMembershipIDs(ids...) - return uuo +func (_u *UserUpdateOne) AddGroupMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { + _u.mutation.AddGroupMembershipIDs(ids...) + return _u } // AddGroupMemberships adds the "group_memberships" edges to the GroupMembership entity. -func (uuo *UserUpdateOne) AddGroupMemberships(g ...*GroupMembership) *UserUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *UserUpdateOne) AddGroupMemberships(v ...*GroupMembership) *UserUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uuo.AddGroupMembershipIDs(ids...) + return _u.AddGroupMembershipIDs(ids...) } // Mutation returns the UserMutation object of the builder. -func (uuo *UserUpdateOne) Mutation() *UserMutation { - return uuo.mutation +func (_u *UserUpdateOne) Mutation() *UserMutation { + return _u.mutation } // ClearMemberships clears all "memberships" edges to the Membership entity. -func (uuo *UserUpdateOne) ClearMemberships() *UserUpdateOne { - uuo.mutation.ClearMemberships() - return uuo +func (_u *UserUpdateOne) ClearMemberships() *UserUpdateOne { + _u.mutation.ClearMemberships() + return _u } // RemoveMembershipIDs removes the "memberships" edge to Membership entities by IDs. -func (uuo *UserUpdateOne) RemoveMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { - uuo.mutation.RemoveMembershipIDs(ids...) - return uuo +func (_u *UserUpdateOne) RemoveMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { + _u.mutation.RemoveMembershipIDs(ids...) + return _u } // RemoveMemberships removes "memberships" edges to Membership entities. -func (uuo *UserUpdateOne) RemoveMemberships(m ...*Membership) *UserUpdateOne { - ids := make([]uuid.UUID, len(m)) - for i := range m { - ids[i] = m[i].ID +func (_u *UserUpdateOne) RemoveMemberships(v ...*Membership) *UserUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uuo.RemoveMembershipIDs(ids...) + return _u.RemoveMembershipIDs(ids...) } // ClearGroupMemberships clears all "group_memberships" edges to the GroupMembership entity. -func (uuo *UserUpdateOne) ClearGroupMemberships() *UserUpdateOne { - uuo.mutation.ClearGroupMemberships() - return uuo +func (_u *UserUpdateOne) ClearGroupMemberships() *UserUpdateOne { + _u.mutation.ClearGroupMemberships() + return _u } // RemoveGroupMembershipIDs removes the "group_memberships" edge to GroupMembership entities by IDs. -func (uuo *UserUpdateOne) RemoveGroupMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { - uuo.mutation.RemoveGroupMembershipIDs(ids...) - return uuo +func (_u *UserUpdateOne) RemoveGroupMembershipIDs(ids ...uuid.UUID) *UserUpdateOne { + _u.mutation.RemoveGroupMembershipIDs(ids...) + return _u } // RemoveGroupMemberships removes "group_memberships" edges to GroupMembership entities. -func (uuo *UserUpdateOne) RemoveGroupMemberships(g ...*GroupMembership) *UserUpdateOne { - ids := make([]uuid.UUID, len(g)) - for i := range g { - ids[i] = g[i].ID +func (_u *UserUpdateOne) RemoveGroupMemberships(v ...*GroupMembership) *UserUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return uuo.RemoveGroupMembershipIDs(ids...) + return _u.RemoveGroupMembershipIDs(ids...) } // Where appends a list predicates to the UserUpdate builder. -func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { - uuo.mutation.Where(ps...) - return uuo +func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { - uuo.fields = append([]string{field}, fields...) - return uuo +func (_u *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated User entity. -func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { - return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) +func (_u *UserUpdateOne) Save(ctx context.Context) (*User, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { - node, err := uuo.Save(ctx) +func (_u *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -581,21 +581,21 @@ func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { } // Exec executes the query on the entity. -func (uuo *UserUpdateOne) Exec(ctx context.Context) error { - _, err := uuo.Save(ctx) +func (_u *UserUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (uuo *UserUpdateOne) ExecX(ctx context.Context) { - if err := uuo.Exec(ctx); err != nil { +func (_u *UserUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (uuo *UserUpdateOne) check() error { - if v, ok := uuo.mutation.Email(); ok { +func (_u *UserUpdateOne) check() error { + if v, ok := _u.mutation.Email(); ok { if err := user.EmailValidator(v); err != nil { return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} } @@ -604,22 +604,22 @@ func (uuo *UserUpdateOne) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (uuo *UserUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *UserUpdateOne { - uuo.modifiers = append(uuo.modifiers, modifiers...) - return uuo +func (_u *UserUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *UserUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { - if err := uuo.check(); err != nil { +func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) - id, ok := uuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} } _spec.Node.ID.Value = id - if fields := uuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) for _, f := range fields { @@ -631,38 +631,38 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } } } - if ps := uuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := uuo.mutation.Email(); ok { + if value, ok := _u.mutation.Email(); ok { _spec.SetField(user.FieldEmail, field.TypeString, value) } - if value, ok := uuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := uuo.mutation.HasRestrictedAccess(); ok { + if value, ok := _u.mutation.HasRestrictedAccess(); ok { _spec.SetField(user.FieldHasRestrictedAccess, field.TypeBool, value) } - if uuo.mutation.HasRestrictedAccessCleared() { + if _u.mutation.HasRestrictedAccessCleared() { _spec.ClearField(user.FieldHasRestrictedAccess, field.TypeBool) } - if value, ok := uuo.mutation.FirstName(); ok { + if value, ok := _u.mutation.FirstName(); ok { _spec.SetField(user.FieldFirstName, field.TypeString, value) } - if uuo.mutation.FirstNameCleared() { + if _u.mutation.FirstNameCleared() { _spec.ClearField(user.FieldFirstName, field.TypeString) } - if value, ok := uuo.mutation.LastName(); ok { + if value, ok := _u.mutation.LastName(); ok { _spec.SetField(user.FieldLastName, field.TypeString, value) } - if uuo.mutation.LastNameCleared() { + if _u.mutation.LastNameCleared() { _spec.ClearField(user.FieldLastName, field.TypeString) } - if uuo.mutation.MembershipsCleared() { + if _u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -675,7 +675,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uuo.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !uuo.mutation.MembershipsCleared() { + if nodes := _u.mutation.RemovedMembershipsIDs(); len(nodes) > 0 && !_u.mutation.MembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -691,7 +691,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uuo.mutation.MembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.MembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -707,7 +707,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if uuo.mutation.GroupMembershipsCleared() { + if _u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -720,7 +720,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uuo.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !uuo.mutation.GroupMembershipsCleared() { + if nodes := _u.mutation.RemovedGroupMembershipsIDs(); len(nodes) > 0 && !_u.mutation.GroupMembershipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -736,7 +736,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := uuo.mutation.GroupMembershipsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.GroupMembershipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -752,11 +752,11 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(uuo.modifiers...) - _node = &User{config: uuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &User{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{user.Label} } else if sqlgraph.IsConstraintError(err) { @@ -764,6 +764,6 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } return nil, err } - uuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/workflow.go b/app/controlplane/pkg/data/ent/workflow.go index 18ba4eb92..81c7b7517 100644 --- a/app/controlplane/pkg/data/ent/workflow.go +++ b/app/controlplane/pkg/data/ent/workflow.go @@ -189,7 +189,7 @@ func (*Workflow) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Workflow fields. -func (w *Workflow) assignValues(columns []string, values []any) error { +func (_m *Workflow) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -199,86 +199,86 @@ func (w *Workflow) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - w.ID = *value + _m.ID = *value } case workflow.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - w.Name = value.String + _m.Name = value.String } case workflow.FieldProjectOld: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field project_old", values[i]) } else if value.Valid { - w.ProjectOld = value.String + _m.ProjectOld = value.String } case workflow.FieldTeam: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field team", values[i]) } else if value.Valid { - w.Team = value.String + _m.Team = value.String } case workflow.FieldRunsCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field runs_count", values[i]) } else if value.Valid { - w.RunsCount = int(value.Int64) + _m.RunsCount = int(value.Int64) } case workflow.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - w.CreatedAt = value.Time + _m.CreatedAt = value.Time } case workflow.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - w.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case workflow.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - w.DeletedAt = value.Time + _m.DeletedAt = value.Time } case workflow.FieldPublic: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field public", values[i]) } else if value.Valid { - w.Public = value.Bool + _m.Public = value.Bool } case workflow.FieldOrganizationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field organization_id", values[i]) } else if value != nil { - w.OrganizationID = *value + _m.OrganizationID = *value } case workflow.FieldProjectID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field project_id", values[i]) } else if value != nil { - w.ProjectID = *value + _m.ProjectID = *value } case workflow.FieldLatestRun: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field latest_run", values[i]) } else if value.Valid { - w.LatestRun = new(uuid.UUID) - *w.LatestRun = *value.S.(*uuid.UUID) + _m.LatestRun = new(uuid.UUID) + *_m.LatestRun = *value.S.(*uuid.UUID) } case workflow.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - w.Description = value.String + _m.Description = value.String } case workflow.FieldMetadata: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field metadata", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &w.Metadata); err != nil { + if err := json.Unmarshal(*value, &_m.Metadata); err != nil { return fmt.Errorf("unmarshal field metadata: %w", err) } } @@ -286,11 +286,11 @@ func (w *Workflow) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field workflow_contract", values[i]) } else if value.Valid { - w.workflow_contract = new(uuid.UUID) - *w.workflow_contract = *value.S.(*uuid.UUID) + _m.workflow_contract = new(uuid.UUID) + *_m.workflow_contract = *value.S.(*uuid.UUID) } default: - w.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -298,113 +298,113 @@ func (w *Workflow) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the Workflow. // This includes values selected through modifiers, order, etc. -func (w *Workflow) Value(name string) (ent.Value, error) { - return w.selectValues.Get(name) +func (_m *Workflow) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryRobotaccounts queries the "robotaccounts" edge of the Workflow entity. -func (w *Workflow) QueryRobotaccounts() *RobotAccountQuery { - return NewWorkflowClient(w.config).QueryRobotaccounts(w) +func (_m *Workflow) QueryRobotaccounts() *RobotAccountQuery { + return NewWorkflowClient(_m.config).QueryRobotaccounts(_m) } // QueryWorkflowruns queries the "workflowruns" edge of the Workflow entity. -func (w *Workflow) QueryWorkflowruns() *WorkflowRunQuery { - return NewWorkflowClient(w.config).QueryWorkflowruns(w) +func (_m *Workflow) QueryWorkflowruns() *WorkflowRunQuery { + return NewWorkflowClient(_m.config).QueryWorkflowruns(_m) } // QueryOrganization queries the "organization" edge of the Workflow entity. -func (w *Workflow) QueryOrganization() *OrganizationQuery { - return NewWorkflowClient(w.config).QueryOrganization(w) +func (_m *Workflow) QueryOrganization() *OrganizationQuery { + return NewWorkflowClient(_m.config).QueryOrganization(_m) } // QueryContract queries the "contract" edge of the Workflow entity. -func (w *Workflow) QueryContract() *WorkflowContractQuery { - return NewWorkflowClient(w.config).QueryContract(w) +func (_m *Workflow) QueryContract() *WorkflowContractQuery { + return NewWorkflowClient(_m.config).QueryContract(_m) } // QueryIntegrationAttachments queries the "integration_attachments" edge of the Workflow entity. -func (w *Workflow) QueryIntegrationAttachments() *IntegrationAttachmentQuery { - return NewWorkflowClient(w.config).QueryIntegrationAttachments(w) +func (_m *Workflow) QueryIntegrationAttachments() *IntegrationAttachmentQuery { + return NewWorkflowClient(_m.config).QueryIntegrationAttachments(_m) } // QueryProject queries the "project" edge of the Workflow entity. -func (w *Workflow) QueryProject() *ProjectQuery { - return NewWorkflowClient(w.config).QueryProject(w) +func (_m *Workflow) QueryProject() *ProjectQuery { + return NewWorkflowClient(_m.config).QueryProject(_m) } // QueryLatestWorkflowRun queries the "latest_workflow_run" edge of the Workflow entity. -func (w *Workflow) QueryLatestWorkflowRun() *WorkflowRunQuery { - return NewWorkflowClient(w.config).QueryLatestWorkflowRun(w) +func (_m *Workflow) QueryLatestWorkflowRun() *WorkflowRunQuery { + return NewWorkflowClient(_m.config).QueryLatestWorkflowRun(_m) } // QueryReferrers queries the "referrers" edge of the Workflow entity. -func (w *Workflow) QueryReferrers() *ReferrerQuery { - return NewWorkflowClient(w.config).QueryReferrers(w) +func (_m *Workflow) QueryReferrers() *ReferrerQuery { + return NewWorkflowClient(_m.config).QueryReferrers(_m) } // Update returns a builder for updating this Workflow. // Note that you need to call Workflow.Unwrap() before calling this method if this Workflow // was returned from a transaction, and the transaction was committed or rolled back. -func (w *Workflow) Update() *WorkflowUpdateOne { - return NewWorkflowClient(w.config).UpdateOne(w) +func (_m *Workflow) Update() *WorkflowUpdateOne { + return NewWorkflowClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the Workflow entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (w *Workflow) Unwrap() *Workflow { - _tx, ok := w.config.driver.(*txDriver) +func (_m *Workflow) Unwrap() *Workflow { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: Workflow is not a transactional entity") } - w.config.driver = _tx.drv - return w + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (w *Workflow) String() string { +func (_m *Workflow) String() string { var builder strings.Builder builder.WriteString("Workflow(") - builder.WriteString(fmt.Sprintf("id=%v, ", w.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(w.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("project_old=") - builder.WriteString(w.ProjectOld) + builder.WriteString(_m.ProjectOld) builder.WriteString(", ") builder.WriteString("team=") - builder.WriteString(w.Team) + builder.WriteString(_m.Team) builder.WriteString(", ") builder.WriteString("runs_count=") - builder.WriteString(fmt.Sprintf("%v", w.RunsCount)) + builder.WriteString(fmt.Sprintf("%v", _m.RunsCount)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(w.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(w.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(w.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("public=") - builder.WriteString(fmt.Sprintf("%v", w.Public)) + builder.WriteString(fmt.Sprintf("%v", _m.Public)) builder.WriteString(", ") builder.WriteString("organization_id=") - builder.WriteString(fmt.Sprintf("%v", w.OrganizationID)) + builder.WriteString(fmt.Sprintf("%v", _m.OrganizationID)) builder.WriteString(", ") builder.WriteString("project_id=") - builder.WriteString(fmt.Sprintf("%v", w.ProjectID)) + builder.WriteString(fmt.Sprintf("%v", _m.ProjectID)) builder.WriteString(", ") - if v := w.LatestRun; v != nil { + if v := _m.LatestRun; v != nil { builder.WriteString("latest_run=") builder.WriteString(fmt.Sprintf("%v", *v)) } builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(w.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("metadata=") - builder.WriteString(fmt.Sprintf("%v", w.Metadata)) + builder.WriteString(fmt.Sprintf("%v", _m.Metadata)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/workflow_create.go b/app/controlplane/pkg/data/ent/workflow_create.go index 9bd5de132..68f04b4bd 100644 --- a/app/controlplane/pkg/data/ent/workflow_create.go +++ b/app/controlplane/pkg/data/ent/workflow_create.go @@ -32,283 +32,283 @@ type WorkflowCreate struct { } // SetName sets the "name" field. -func (wc *WorkflowCreate) SetName(s string) *WorkflowCreate { - wc.mutation.SetName(s) - return wc +func (_c *WorkflowCreate) SetName(v string) *WorkflowCreate { + _c.mutation.SetName(v) + return _c } // SetProjectOld sets the "project_old" field. -func (wc *WorkflowCreate) SetProjectOld(s string) *WorkflowCreate { - wc.mutation.SetProjectOld(s) - return wc +func (_c *WorkflowCreate) SetProjectOld(v string) *WorkflowCreate { + _c.mutation.SetProjectOld(v) + return _c } // SetNillableProjectOld sets the "project_old" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableProjectOld(s *string) *WorkflowCreate { - if s != nil { - wc.SetProjectOld(*s) +func (_c *WorkflowCreate) SetNillableProjectOld(v *string) *WorkflowCreate { + if v != nil { + _c.SetProjectOld(*v) } - return wc + return _c } // SetTeam sets the "team" field. -func (wc *WorkflowCreate) SetTeam(s string) *WorkflowCreate { - wc.mutation.SetTeam(s) - return wc +func (_c *WorkflowCreate) SetTeam(v string) *WorkflowCreate { + _c.mutation.SetTeam(v) + return _c } // SetNillableTeam sets the "team" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableTeam(s *string) *WorkflowCreate { - if s != nil { - wc.SetTeam(*s) +func (_c *WorkflowCreate) SetNillableTeam(v *string) *WorkflowCreate { + if v != nil { + _c.SetTeam(*v) } - return wc + return _c } // SetRunsCount sets the "runs_count" field. -func (wc *WorkflowCreate) SetRunsCount(i int) *WorkflowCreate { - wc.mutation.SetRunsCount(i) - return wc +func (_c *WorkflowCreate) SetRunsCount(v int) *WorkflowCreate { + _c.mutation.SetRunsCount(v) + return _c } // SetNillableRunsCount sets the "runs_count" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableRunsCount(i *int) *WorkflowCreate { - if i != nil { - wc.SetRunsCount(*i) +func (_c *WorkflowCreate) SetNillableRunsCount(v *int) *WorkflowCreate { + if v != nil { + _c.SetRunsCount(*v) } - return wc + return _c } // SetCreatedAt sets the "created_at" field. -func (wc *WorkflowCreate) SetCreatedAt(t time.Time) *WorkflowCreate { - wc.mutation.SetCreatedAt(t) - return wc +func (_c *WorkflowCreate) SetCreatedAt(v time.Time) *WorkflowCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableCreatedAt(t *time.Time) *WorkflowCreate { - if t != nil { - wc.SetCreatedAt(*t) +func (_c *WorkflowCreate) SetNillableCreatedAt(v *time.Time) *WorkflowCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return wc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (wc *WorkflowCreate) SetUpdatedAt(t time.Time) *WorkflowCreate { - wc.mutation.SetUpdatedAt(t) - return wc +func (_c *WorkflowCreate) SetUpdatedAt(v time.Time) *WorkflowCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableUpdatedAt(t *time.Time) *WorkflowCreate { - if t != nil { - wc.SetUpdatedAt(*t) +func (_c *WorkflowCreate) SetNillableUpdatedAt(v *time.Time) *WorkflowCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return wc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (wc *WorkflowCreate) SetDeletedAt(t time.Time) *WorkflowCreate { - wc.mutation.SetDeletedAt(t) - return wc +func (_c *WorkflowCreate) SetDeletedAt(v time.Time) *WorkflowCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableDeletedAt(t *time.Time) *WorkflowCreate { - if t != nil { - wc.SetDeletedAt(*t) +func (_c *WorkflowCreate) SetNillableDeletedAt(v *time.Time) *WorkflowCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return wc + return _c } // SetPublic sets the "public" field. -func (wc *WorkflowCreate) SetPublic(b bool) *WorkflowCreate { - wc.mutation.SetPublic(b) - return wc +func (_c *WorkflowCreate) SetPublic(v bool) *WorkflowCreate { + _c.mutation.SetPublic(v) + return _c } // SetNillablePublic sets the "public" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillablePublic(b *bool) *WorkflowCreate { - if b != nil { - wc.SetPublic(*b) +func (_c *WorkflowCreate) SetNillablePublic(v *bool) *WorkflowCreate { + if v != nil { + _c.SetPublic(*v) } - return wc + return _c } // SetOrganizationID sets the "organization_id" field. -func (wc *WorkflowCreate) SetOrganizationID(u uuid.UUID) *WorkflowCreate { - wc.mutation.SetOrganizationID(u) - return wc +func (_c *WorkflowCreate) SetOrganizationID(v uuid.UUID) *WorkflowCreate { + _c.mutation.SetOrganizationID(v) + return _c } // SetProjectID sets the "project_id" field. -func (wc *WorkflowCreate) SetProjectID(u uuid.UUID) *WorkflowCreate { - wc.mutation.SetProjectID(u) - return wc +func (_c *WorkflowCreate) SetProjectID(v uuid.UUID) *WorkflowCreate { + _c.mutation.SetProjectID(v) + return _c } // SetLatestRun sets the "latest_run" field. -func (wc *WorkflowCreate) SetLatestRun(u uuid.UUID) *WorkflowCreate { - wc.mutation.SetLatestRun(u) - return wc +func (_c *WorkflowCreate) SetLatestRun(v uuid.UUID) *WorkflowCreate { + _c.mutation.SetLatestRun(v) + return _c } // SetNillableLatestRun sets the "latest_run" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableLatestRun(u *uuid.UUID) *WorkflowCreate { - if u != nil { - wc.SetLatestRun(*u) +func (_c *WorkflowCreate) SetNillableLatestRun(v *uuid.UUID) *WorkflowCreate { + if v != nil { + _c.SetLatestRun(*v) } - return wc + return _c } // SetDescription sets the "description" field. -func (wc *WorkflowCreate) SetDescription(s string) *WorkflowCreate { - wc.mutation.SetDescription(s) - return wc +func (_c *WorkflowCreate) SetDescription(v string) *WorkflowCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableDescription(s *string) *WorkflowCreate { - if s != nil { - wc.SetDescription(*s) +func (_c *WorkflowCreate) SetNillableDescription(v *string) *WorkflowCreate { + if v != nil { + _c.SetDescription(*v) } - return wc + return _c } // SetMetadata sets the "metadata" field. -func (wc *WorkflowCreate) SetMetadata(m map[string]interface{}) *WorkflowCreate { - wc.mutation.SetMetadata(m) - return wc +func (_c *WorkflowCreate) SetMetadata(v map[string]interface{}) *WorkflowCreate { + _c.mutation.SetMetadata(v) + return _c } // SetID sets the "id" field. -func (wc *WorkflowCreate) SetID(u uuid.UUID) *WorkflowCreate { - wc.mutation.SetID(u) - return wc +func (_c *WorkflowCreate) SetID(v uuid.UUID) *WorkflowCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (wc *WorkflowCreate) SetNillableID(u *uuid.UUID) *WorkflowCreate { - if u != nil { - wc.SetID(*u) +func (_c *WorkflowCreate) SetNillableID(v *uuid.UUID) *WorkflowCreate { + if v != nil { + _c.SetID(*v) } - return wc + return _c } // AddRobotaccountIDs adds the "robotaccounts" edge to the RobotAccount entity by IDs. -func (wc *WorkflowCreate) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowCreate { - wc.mutation.AddRobotaccountIDs(ids...) - return wc +func (_c *WorkflowCreate) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowCreate { + _c.mutation.AddRobotaccountIDs(ids...) + return _c } // AddRobotaccounts adds the "robotaccounts" edges to the RobotAccount entity. -func (wc *WorkflowCreate) AddRobotaccounts(r ...*RobotAccount) *WorkflowCreate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_c *WorkflowCreate) AddRobotaccounts(v ...*RobotAccount) *WorkflowCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wc.AddRobotaccountIDs(ids...) + return _c.AddRobotaccountIDs(ids...) } // AddWorkflowrunIDs adds the "workflowruns" edge to the WorkflowRun entity by IDs. -func (wc *WorkflowCreate) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowCreate { - wc.mutation.AddWorkflowrunIDs(ids...) - return wc +func (_c *WorkflowCreate) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowCreate { + _c.mutation.AddWorkflowrunIDs(ids...) + return _c } // AddWorkflowruns adds the "workflowruns" edges to the WorkflowRun entity. -func (wc *WorkflowCreate) AddWorkflowruns(w ...*WorkflowRun) *WorkflowCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *WorkflowCreate) AddWorkflowruns(v ...*WorkflowRun) *WorkflowCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wc.AddWorkflowrunIDs(ids...) + return _c.AddWorkflowrunIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (wc *WorkflowCreate) SetOrganization(o *Organization) *WorkflowCreate { - return wc.SetOrganizationID(o.ID) +func (_c *WorkflowCreate) SetOrganization(v *Organization) *WorkflowCreate { + return _c.SetOrganizationID(v.ID) } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wc *WorkflowCreate) SetContractID(id uuid.UUID) *WorkflowCreate { - wc.mutation.SetContractID(id) - return wc +func (_c *WorkflowCreate) SetContractID(id uuid.UUID) *WorkflowCreate { + _c.mutation.SetContractID(id) + return _c } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wc *WorkflowCreate) SetContract(w *WorkflowContract) *WorkflowCreate { - return wc.SetContractID(w.ID) +func (_c *WorkflowCreate) SetContract(v *WorkflowContract) *WorkflowCreate { + return _c.SetContractID(v.ID) } // AddIntegrationAttachmentIDs adds the "integration_attachments" edge to the IntegrationAttachment entity by IDs. -func (wc *WorkflowCreate) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowCreate { - wc.mutation.AddIntegrationAttachmentIDs(ids...) - return wc +func (_c *WorkflowCreate) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowCreate { + _c.mutation.AddIntegrationAttachmentIDs(ids...) + return _c } // AddIntegrationAttachments adds the "integration_attachments" edges to the IntegrationAttachment entity. -func (wc *WorkflowCreate) AddIntegrationAttachments(i ...*IntegrationAttachment) *WorkflowCreate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_c *WorkflowCreate) AddIntegrationAttachments(v ...*IntegrationAttachment) *WorkflowCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wc.AddIntegrationAttachmentIDs(ids...) + return _c.AddIntegrationAttachmentIDs(ids...) } // SetProject sets the "project" edge to the Project entity. -func (wc *WorkflowCreate) SetProject(p *Project) *WorkflowCreate { - return wc.SetProjectID(p.ID) +func (_c *WorkflowCreate) SetProject(v *Project) *WorkflowCreate { + return _c.SetProjectID(v.ID) } // SetLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID. -func (wc *WorkflowCreate) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowCreate { - wc.mutation.SetLatestWorkflowRunID(id) - return wc +func (_c *WorkflowCreate) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowCreate { + _c.mutation.SetLatestWorkflowRunID(id) + return _c } // SetNillableLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID if the given value is not nil. -func (wc *WorkflowCreate) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowCreate { +func (_c *WorkflowCreate) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowCreate { if id != nil { - wc = wc.SetLatestWorkflowRunID(*id) + _c = _c.SetLatestWorkflowRunID(*id) } - return wc + return _c } // SetLatestWorkflowRun sets the "latest_workflow_run" edge to the WorkflowRun entity. -func (wc *WorkflowCreate) SetLatestWorkflowRun(w *WorkflowRun) *WorkflowCreate { - return wc.SetLatestWorkflowRunID(w.ID) +func (_c *WorkflowCreate) SetLatestWorkflowRun(v *WorkflowRun) *WorkflowCreate { + return _c.SetLatestWorkflowRunID(v.ID) } // AddReferrerIDs adds the "referrers" edge to the Referrer entity by IDs. -func (wc *WorkflowCreate) AddReferrerIDs(ids ...uuid.UUID) *WorkflowCreate { - wc.mutation.AddReferrerIDs(ids...) - return wc +func (_c *WorkflowCreate) AddReferrerIDs(ids ...uuid.UUID) *WorkflowCreate { + _c.mutation.AddReferrerIDs(ids...) + return _c } // AddReferrers adds the "referrers" edges to the Referrer entity. -func (wc *WorkflowCreate) AddReferrers(r ...*Referrer) *WorkflowCreate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_c *WorkflowCreate) AddReferrers(v ...*Referrer) *WorkflowCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wc.AddReferrerIDs(ids...) + return _c.AddReferrerIDs(ids...) } // Mutation returns the WorkflowMutation object of the builder. -func (wc *WorkflowCreate) Mutation() *WorkflowMutation { - return wc.mutation +func (_c *WorkflowCreate) Mutation() *WorkflowMutation { + return _c.mutation } // Save creates the Workflow in the database. -func (wc *WorkflowCreate) Save(ctx context.Context) (*Workflow, error) { - wc.defaults() - return withHooks(ctx, wc.sqlSave, wc.mutation, wc.hooks) +func (_c *WorkflowCreate) Save(ctx context.Context) (*Workflow, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (wc *WorkflowCreate) SaveX(ctx context.Context) *Workflow { - v, err := wc.Save(ctx) +func (_c *WorkflowCreate) SaveX(ctx context.Context) *Workflow { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -316,83 +316,83 @@ func (wc *WorkflowCreate) SaveX(ctx context.Context) *Workflow { } // Exec executes the query. -func (wc *WorkflowCreate) Exec(ctx context.Context) error { - _, err := wc.Save(ctx) +func (_c *WorkflowCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wc *WorkflowCreate) ExecX(ctx context.Context) { - if err := wc.Exec(ctx); err != nil { +func (_c *WorkflowCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (wc *WorkflowCreate) defaults() { - if _, ok := wc.mutation.RunsCount(); !ok { +func (_c *WorkflowCreate) defaults() { + if _, ok := _c.mutation.RunsCount(); !ok { v := workflow.DefaultRunsCount - wc.mutation.SetRunsCount(v) + _c.mutation.SetRunsCount(v) } - if _, ok := wc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := workflow.DefaultCreatedAt() - wc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := wc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := workflow.DefaultUpdatedAt() - wc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := wc.mutation.Public(); !ok { + if _, ok := _c.mutation.Public(); !ok { v := workflow.DefaultPublic - wc.mutation.SetPublic(v) + _c.mutation.SetPublic(v) } - if _, ok := wc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := workflow.DefaultID() - wc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (wc *WorkflowCreate) check() error { - if _, ok := wc.mutation.Name(); !ok { +func (_c *WorkflowCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Workflow.name"`)} } - if _, ok := wc.mutation.RunsCount(); !ok { + if _, ok := _c.mutation.RunsCount(); !ok { return &ValidationError{Name: "runs_count", err: errors.New(`ent: missing required field "Workflow.runs_count"`)} } - if _, ok := wc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Workflow.created_at"`)} } - if _, ok := wc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Workflow.updated_at"`)} } - if _, ok := wc.mutation.Public(); !ok { + if _, ok := _c.mutation.Public(); !ok { return &ValidationError{Name: "public", err: errors.New(`ent: missing required field "Workflow.public"`)} } - if _, ok := wc.mutation.OrganizationID(); !ok { + if _, ok := _c.mutation.OrganizationID(); !ok { return &ValidationError{Name: "organization_id", err: errors.New(`ent: missing required field "Workflow.organization_id"`)} } - if _, ok := wc.mutation.ProjectID(); !ok { + if _, ok := _c.mutation.ProjectID(); !ok { return &ValidationError{Name: "project_id", err: errors.New(`ent: missing required field "Workflow.project_id"`)} } - if len(wc.mutation.OrganizationIDs()) == 0 { + if len(_c.mutation.OrganizationIDs()) == 0 { return &ValidationError{Name: "organization", err: errors.New(`ent: missing required edge "Workflow.organization"`)} } - if len(wc.mutation.ContractIDs()) == 0 { + if len(_c.mutation.ContractIDs()) == 0 { return &ValidationError{Name: "contract", err: errors.New(`ent: missing required edge "Workflow.contract"`)} } - if len(wc.mutation.ProjectIDs()) == 0 { + if len(_c.mutation.ProjectIDs()) == 0 { return &ValidationError{Name: "project", err: errors.New(`ent: missing required edge "Workflow.project"`)} } return nil } -func (wc *WorkflowCreate) sqlSave(ctx context.Context) (*Workflow, error) { - if err := wc.check(); err != nil { +func (_c *WorkflowCreate) sqlSave(ctx context.Context) (*Workflow, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := wc.createSpec() - if err := sqlgraph.CreateNode(ctx, wc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -405,62 +405,62 @@ func (wc *WorkflowCreate) sqlSave(ctx context.Context) (*Workflow, error) { return nil, err } } - wc.mutation.id = &_node.ID - wc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { +func (_c *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { var ( - _node = &Workflow{config: wc.config} + _node = &Workflow{config: _c.config} _spec = sqlgraph.NewCreateSpec(workflow.Table, sqlgraph.NewFieldSpec(workflow.FieldID, field.TypeUUID)) ) - _spec.OnConflict = wc.conflict - if id, ok := wc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := wc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(workflow.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := wc.mutation.ProjectOld(); ok { + if value, ok := _c.mutation.ProjectOld(); ok { _spec.SetField(workflow.FieldProjectOld, field.TypeString, value) _node.ProjectOld = value } - if value, ok := wc.mutation.Team(); ok { + if value, ok := _c.mutation.Team(); ok { _spec.SetField(workflow.FieldTeam, field.TypeString, value) _node.Team = value } - if value, ok := wc.mutation.RunsCount(); ok { + if value, ok := _c.mutation.RunsCount(); ok { _spec.SetField(workflow.FieldRunsCount, field.TypeInt, value) _node.RunsCount = value } - if value, ok := wc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(workflow.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := wc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(workflow.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := wc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(workflow.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := wc.mutation.Public(); ok { + if value, ok := _c.mutation.Public(); ok { _spec.SetField(workflow.FieldPublic, field.TypeBool, value) _node.Public = value } - if value, ok := wc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(workflow.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := wc.mutation.Metadata(); ok { + if value, ok := _c.mutation.Metadata(); ok { _spec.SetField(workflow.FieldMetadata, field.TypeJSON, value) _node.Metadata = value } - if nodes := wc.mutation.RobotaccountsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.RobotaccountsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -476,7 +476,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.WorkflowrunsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowrunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -492,7 +492,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -509,7 +509,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { _node.OrganizationID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -526,7 +526,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { _node.workflow_contract = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -542,7 +542,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -559,7 +559,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { _node.ProjectID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { + if nodes := _c.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -576,7 +576,7 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { _node.LatestRun = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wc.mutation.ReferrersIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ReferrersIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -611,10 +611,10 @@ func (wc *WorkflowCreate) createSpec() (*Workflow, *sqlgraph.CreateSpec) { // SetName(v+v). // }). // Exec(ctx) -func (wc *WorkflowCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowUpsertOne { - wc.conflict = opts +func (_c *WorkflowCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowUpsertOne { + _c.conflict = opts return &WorkflowUpsertOne{ - create: wc, + create: _c, } } @@ -624,10 +624,10 @@ func (wc *WorkflowCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowUpsert // client.Workflow.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wc *WorkflowCreate) OnConflictColumns(columns ...string) *WorkflowUpsertOne { - wc.conflict = append(wc.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowCreate) OnConflictColumns(columns ...string) *WorkflowUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowUpsertOne{ - create: wc, + create: _c, } } @@ -1122,16 +1122,16 @@ type WorkflowCreateBulk struct { } // Save creates the Workflow entities in the database. -func (wcb *WorkflowCreateBulk) Save(ctx context.Context) ([]*Workflow, error) { - if wcb.err != nil { - return nil, wcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(wcb.builders)) - nodes := make([]*Workflow, len(wcb.builders)) - mutators := make([]Mutator, len(wcb.builders)) - for i := range wcb.builders { +func (_c *WorkflowCreateBulk) Save(ctx context.Context) ([]*Workflow, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Workflow, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := wcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*WorkflowMutation) @@ -1145,12 +1145,12 @@ func (wcb *WorkflowCreateBulk) Save(ctx context.Context) ([]*Workflow, error) { var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, wcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = wcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, wcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -1170,7 +1170,7 @@ func (wcb *WorkflowCreateBulk) Save(ctx context.Context) ([]*Workflow, error) { }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, wcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -1178,8 +1178,8 @@ func (wcb *WorkflowCreateBulk) Save(ctx context.Context) ([]*Workflow, error) { } // SaveX is like Save, but panics if an error occurs. -func (wcb *WorkflowCreateBulk) SaveX(ctx context.Context) []*Workflow { - v, err := wcb.Save(ctx) +func (_c *WorkflowCreateBulk) SaveX(ctx context.Context) []*Workflow { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -1187,14 +1187,14 @@ func (wcb *WorkflowCreateBulk) SaveX(ctx context.Context) []*Workflow { } // Exec executes the query. -func (wcb *WorkflowCreateBulk) Exec(ctx context.Context) error { - _, err := wcb.Save(ctx) +func (_c *WorkflowCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcb *WorkflowCreateBulk) ExecX(ctx context.Context) { - if err := wcb.Exec(ctx); err != nil { +func (_c *WorkflowCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -1214,10 +1214,10 @@ func (wcb *WorkflowCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (wcb *WorkflowCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowUpsertBulk { - wcb.conflict = opts +func (_c *WorkflowCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowUpsertBulk { + _c.conflict = opts return &WorkflowUpsertBulk{ - create: wcb, + create: _c, } } @@ -1227,10 +1227,10 @@ func (wcb *WorkflowCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowU // client.Workflow.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wcb *WorkflowCreateBulk) OnConflictColumns(columns ...string) *WorkflowUpsertBulk { - wcb.conflict = append(wcb.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowCreateBulk) OnConflictColumns(columns ...string) *WorkflowUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowUpsertBulk{ - create: wcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/workflow_delete.go b/app/controlplane/pkg/data/ent/workflow_delete.go index e45a926f3..27ccd0b76 100644 --- a/app/controlplane/pkg/data/ent/workflow_delete.go +++ b/app/controlplane/pkg/data/ent/workflow_delete.go @@ -20,56 +20,56 @@ type WorkflowDelete struct { } // Where appends a list predicates to the WorkflowDelete builder. -func (wd *WorkflowDelete) Where(ps ...predicate.Workflow) *WorkflowDelete { - wd.mutation.Where(ps...) - return wd +func (_d *WorkflowDelete) Where(ps ...predicate.Workflow) *WorkflowDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (wd *WorkflowDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, wd.sqlExec, wd.mutation, wd.hooks) +func (_d *WorkflowDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (wd *WorkflowDelete) ExecX(ctx context.Context) int { - n, err := wd.Exec(ctx) +func (_d *WorkflowDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (wd *WorkflowDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *WorkflowDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(workflow.Table, sqlgraph.NewFieldSpec(workflow.FieldID, field.TypeUUID)) - if ps := wd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, wd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - wd.mutation.done = true + _d.mutation.done = true return affected, err } // WorkflowDeleteOne is the builder for deleting a single Workflow entity. type WorkflowDeleteOne struct { - wd *WorkflowDelete + _d *WorkflowDelete } // Where appends a list predicates to the WorkflowDelete builder. -func (wdo *WorkflowDeleteOne) Where(ps ...predicate.Workflow) *WorkflowDeleteOne { - wdo.wd.mutation.Where(ps...) - return wdo +func (_d *WorkflowDeleteOne) Where(ps ...predicate.Workflow) *WorkflowDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (wdo *WorkflowDeleteOne) Exec(ctx context.Context) error { - n, err := wdo.wd.Exec(ctx) +func (_d *WorkflowDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (wdo *WorkflowDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (wdo *WorkflowDeleteOne) ExecX(ctx context.Context) { - if err := wdo.Exec(ctx); err != nil { +func (_d *WorkflowDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/workflow_query.go b/app/controlplane/pkg/data/ent/workflow_query.go index f39c164e9..d9ffe5e88 100644 --- a/app/controlplane/pkg/data/ent/workflow_query.go +++ b/app/controlplane/pkg/data/ent/workflow_query.go @@ -48,44 +48,44 @@ type WorkflowQuery struct { } // Where adds a new predicate for the WorkflowQuery builder. -func (wq *WorkflowQuery) Where(ps ...predicate.Workflow) *WorkflowQuery { - wq.predicates = append(wq.predicates, ps...) - return wq +func (_q *WorkflowQuery) Where(ps ...predicate.Workflow) *WorkflowQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (wq *WorkflowQuery) Limit(limit int) *WorkflowQuery { - wq.ctx.Limit = &limit - return wq +func (_q *WorkflowQuery) Limit(limit int) *WorkflowQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (wq *WorkflowQuery) Offset(offset int) *WorkflowQuery { - wq.ctx.Offset = &offset - return wq +func (_q *WorkflowQuery) Offset(offset int) *WorkflowQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (wq *WorkflowQuery) Unique(unique bool) *WorkflowQuery { - wq.ctx.Unique = &unique - return wq +func (_q *WorkflowQuery) Unique(unique bool) *WorkflowQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (wq *WorkflowQuery) Order(o ...workflow.OrderOption) *WorkflowQuery { - wq.order = append(wq.order, o...) - return wq +func (_q *WorkflowQuery) Order(o ...workflow.OrderOption) *WorkflowQuery { + _q.order = append(_q.order, o...) + return _q } // QueryRobotaccounts chains the current query on the "robotaccounts" edge. -func (wq *WorkflowQuery) QueryRobotaccounts() *RobotAccountQuery { - query := (&RobotAccountClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryRobotaccounts() *RobotAccountQuery { + query := (&RobotAccountClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -94,20 +94,20 @@ func (wq *WorkflowQuery) QueryRobotaccounts() *RobotAccountQuery { sqlgraph.To(robotaccount.Table, robotaccount.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflow.RobotaccountsTable, workflow.RobotaccountsColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflowruns chains the current query on the "workflowruns" edge. -func (wq *WorkflowQuery) QueryWorkflowruns() *WorkflowRunQuery { - query := (&WorkflowRunClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryWorkflowruns() *WorkflowRunQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -116,20 +116,20 @@ func (wq *WorkflowQuery) QueryWorkflowruns() *WorkflowRunQuery { sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflow.WorkflowrunsTable, workflow.WorkflowrunsColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryOrganization chains the current query on the "organization" edge. -func (wq *WorkflowQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -138,20 +138,20 @@ func (wq *WorkflowQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflow.OrganizationTable, workflow.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryContract chains the current query on the "contract" edge. -func (wq *WorkflowQuery) QueryContract() *WorkflowContractQuery { - query := (&WorkflowContractClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryContract() *WorkflowContractQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -160,20 +160,20 @@ func (wq *WorkflowQuery) QueryContract() *WorkflowContractQuery { sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflow.ContractTable, workflow.ContractColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryIntegrationAttachments chains the current query on the "integration_attachments" edge. -func (wq *WorkflowQuery) QueryIntegrationAttachments() *IntegrationAttachmentQuery { - query := (&IntegrationAttachmentClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryIntegrationAttachments() *IntegrationAttachmentQuery { + query := (&IntegrationAttachmentClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -182,20 +182,20 @@ func (wq *WorkflowQuery) QueryIntegrationAttachments() *IntegrationAttachmentQue sqlgraph.To(integrationattachment.Table, integrationattachment.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, workflow.IntegrationAttachmentsTable, workflow.IntegrationAttachmentsColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryProject chains the current query on the "project" edge. -func (wq *WorkflowQuery) QueryProject() *ProjectQuery { - query := (&ProjectClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryProject() *ProjectQuery { + query := (&ProjectClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -204,20 +204,20 @@ func (wq *WorkflowQuery) QueryProject() *ProjectQuery { sqlgraph.To(project.Table, project.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflow.ProjectTable, workflow.ProjectColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryLatestWorkflowRun chains the current query on the "latest_workflow_run" edge. -func (wq *WorkflowQuery) QueryLatestWorkflowRun() *WorkflowRunQuery { - query := (&WorkflowRunClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryLatestWorkflowRun() *WorkflowRunQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -226,20 +226,20 @@ func (wq *WorkflowQuery) QueryLatestWorkflowRun() *WorkflowRunQuery { sqlgraph.To(workflowrun.Table, workflowrun.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflow.LatestWorkflowRunTable, workflow.LatestWorkflowRunColumn), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryReferrers chains the current query on the "referrers" edge. -func (wq *WorkflowQuery) QueryReferrers() *ReferrerQuery { - query := (&ReferrerClient{config: wq.config}).Query() +func (_q *WorkflowQuery) QueryReferrers() *ReferrerQuery { + query := (&ReferrerClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -248,7 +248,7 @@ func (wq *WorkflowQuery) QueryReferrers() *ReferrerQuery { sqlgraph.To(referrer.Table, referrer.FieldID), sqlgraph.Edge(sqlgraph.M2M, true, workflow.ReferrersTable, workflow.ReferrersPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -256,8 +256,8 @@ func (wq *WorkflowQuery) QueryReferrers() *ReferrerQuery { // First returns the first Workflow entity from the query. // Returns a *NotFoundError when no Workflow was found. -func (wq *WorkflowQuery) First(ctx context.Context) (*Workflow, error) { - nodes, err := wq.Limit(1).All(setContextOp(ctx, wq.ctx, ent.OpQueryFirst)) +func (_q *WorkflowQuery) First(ctx context.Context) (*Workflow, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -268,8 +268,8 @@ func (wq *WorkflowQuery) First(ctx context.Context) (*Workflow, error) { } // FirstX is like First, but panics if an error occurs. -func (wq *WorkflowQuery) FirstX(ctx context.Context) *Workflow { - node, err := wq.First(ctx) +func (_q *WorkflowQuery) FirstX(ctx context.Context) *Workflow { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -278,9 +278,9 @@ func (wq *WorkflowQuery) FirstX(ctx context.Context) *Workflow { // FirstID returns the first Workflow ID from the query. // Returns a *NotFoundError when no Workflow ID was found. -func (wq *WorkflowQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wq.Limit(1).IDs(setContextOp(ctx, wq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -291,8 +291,8 @@ func (wq *WorkflowQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) } // FirstIDX is like FirstID, but panics if an error occurs. -func (wq *WorkflowQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := wq.FirstID(ctx) +func (_q *WorkflowQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -302,8 +302,8 @@ func (wq *WorkflowQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single Workflow entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one Workflow entity is found. // Returns a *NotFoundError when no Workflow entities are found. -func (wq *WorkflowQuery) Only(ctx context.Context) (*Workflow, error) { - nodes, err := wq.Limit(2).All(setContextOp(ctx, wq.ctx, ent.OpQueryOnly)) +func (_q *WorkflowQuery) Only(ctx context.Context) (*Workflow, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -318,8 +318,8 @@ func (wq *WorkflowQuery) Only(ctx context.Context) (*Workflow, error) { } // OnlyX is like Only, but panics if an error occurs. -func (wq *WorkflowQuery) OnlyX(ctx context.Context) *Workflow { - node, err := wq.Only(ctx) +func (_q *WorkflowQuery) OnlyX(ctx context.Context) *Workflow { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -329,9 +329,9 @@ func (wq *WorkflowQuery) OnlyX(ctx context.Context) *Workflow { // OnlyID is like Only, but returns the only Workflow ID in the query. // Returns a *NotSingularError when more than one Workflow ID is found. // Returns a *NotFoundError when no entities are found. -func (wq *WorkflowQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wq.Limit(2).IDs(setContextOp(ctx, wq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -346,8 +346,8 @@ func (wq *WorkflowQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (wq *WorkflowQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := wq.OnlyID(ctx) +func (_q *WorkflowQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -355,18 +355,18 @@ func (wq *WorkflowQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of Workflows. -func (wq *WorkflowQuery) All(ctx context.Context) ([]*Workflow, error) { - ctx = setContextOp(ctx, wq.ctx, ent.OpQueryAll) - if err := wq.prepareQuery(ctx); err != nil { +func (_q *WorkflowQuery) All(ctx context.Context) ([]*Workflow, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*Workflow, *WorkflowQuery]() - return withInterceptors[[]*Workflow](ctx, wq, qr, wq.inters) + return withInterceptors[[]*Workflow](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (wq *WorkflowQuery) AllX(ctx context.Context) []*Workflow { - nodes, err := wq.All(ctx) +func (_q *WorkflowQuery) AllX(ctx context.Context) []*Workflow { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -374,20 +374,20 @@ func (wq *WorkflowQuery) AllX(ctx context.Context) []*Workflow { } // IDs executes the query and returns a list of Workflow IDs. -func (wq *WorkflowQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if wq.ctx.Unique == nil && wq.path != nil { - wq.Unique(true) +func (_q *WorkflowQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, wq.ctx, ent.OpQueryIDs) - if err = wq.Select(workflow.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(workflow.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (wq *WorkflowQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := wq.IDs(ctx) +func (_q *WorkflowQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -395,17 +395,17 @@ func (wq *WorkflowQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (wq *WorkflowQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, wq.ctx, ent.OpQueryCount) - if err := wq.prepareQuery(ctx); err != nil { +func (_q *WorkflowQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, wq, querierCount[*WorkflowQuery](), wq.inters) + return withInterceptors[int](ctx, _q, querierCount[*WorkflowQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (wq *WorkflowQuery) CountX(ctx context.Context) int { - count, err := wq.Count(ctx) +func (_q *WorkflowQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -413,9 +413,9 @@ func (wq *WorkflowQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (wq *WorkflowQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, wq.ctx, ent.OpQueryExist) - switch _, err := wq.FirstID(ctx); { +func (_q *WorkflowQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -426,8 +426,8 @@ func (wq *WorkflowQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (wq *WorkflowQuery) ExistX(ctx context.Context) bool { - exist, err := wq.Exist(ctx) +func (_q *WorkflowQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -436,117 +436,117 @@ func (wq *WorkflowQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the WorkflowQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (wq *WorkflowQuery) Clone() *WorkflowQuery { - if wq == nil { +func (_q *WorkflowQuery) Clone() *WorkflowQuery { + if _q == nil { return nil } return &WorkflowQuery{ - config: wq.config, - ctx: wq.ctx.Clone(), - order: append([]workflow.OrderOption{}, wq.order...), - inters: append([]Interceptor{}, wq.inters...), - predicates: append([]predicate.Workflow{}, wq.predicates...), - withRobotaccounts: wq.withRobotaccounts.Clone(), - withWorkflowruns: wq.withWorkflowruns.Clone(), - withOrganization: wq.withOrganization.Clone(), - withContract: wq.withContract.Clone(), - withIntegrationAttachments: wq.withIntegrationAttachments.Clone(), - withProject: wq.withProject.Clone(), - withLatestWorkflowRun: wq.withLatestWorkflowRun.Clone(), - withReferrers: wq.withReferrers.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]workflow.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Workflow{}, _q.predicates...), + withRobotaccounts: _q.withRobotaccounts.Clone(), + withWorkflowruns: _q.withWorkflowruns.Clone(), + withOrganization: _q.withOrganization.Clone(), + withContract: _q.withContract.Clone(), + withIntegrationAttachments: _q.withIntegrationAttachments.Clone(), + withProject: _q.withProject.Clone(), + withLatestWorkflowRun: _q.withLatestWorkflowRun.Clone(), + withReferrers: _q.withReferrers.Clone(), // clone intermediate query. - sql: wq.sql.Clone(), - path: wq.path, - modifiers: append([]func(*sql.Selector){}, wq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithRobotaccounts tells the query-builder to eager-load the nodes that are connected to // the "robotaccounts" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithRobotaccounts(opts ...func(*RobotAccountQuery)) *WorkflowQuery { - query := (&RobotAccountClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithRobotaccounts(opts ...func(*RobotAccountQuery)) *WorkflowQuery { + query := (&RobotAccountClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withRobotaccounts = query - return wq + _q.withRobotaccounts = query + return _q } // WithWorkflowruns tells the query-builder to eager-load the nodes that are connected to // the "workflowruns" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithWorkflowruns(opts ...func(*WorkflowRunQuery)) *WorkflowQuery { - query := (&WorkflowRunClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithWorkflowruns(opts ...func(*WorkflowRunQuery)) *WorkflowQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withWorkflowruns = query - return wq + _q.withWorkflowruns = query + return _q } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithOrganization(opts ...func(*OrganizationQuery)) *WorkflowQuery { - query := (&OrganizationClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithOrganization(opts ...func(*OrganizationQuery)) *WorkflowQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withOrganization = query - return wq + _q.withOrganization = query + return _q } // WithContract tells the query-builder to eager-load the nodes that are connected to // the "contract" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithContract(opts ...func(*WorkflowContractQuery)) *WorkflowQuery { - query := (&WorkflowContractClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithContract(opts ...func(*WorkflowContractQuery)) *WorkflowQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withContract = query - return wq + _q.withContract = query + return _q } // WithIntegrationAttachments tells the query-builder to eager-load the nodes that are connected to // the "integration_attachments" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithIntegrationAttachments(opts ...func(*IntegrationAttachmentQuery)) *WorkflowQuery { - query := (&IntegrationAttachmentClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithIntegrationAttachments(opts ...func(*IntegrationAttachmentQuery)) *WorkflowQuery { + query := (&IntegrationAttachmentClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withIntegrationAttachments = query - return wq + _q.withIntegrationAttachments = query + return _q } // WithProject tells the query-builder to eager-load the nodes that are connected to // the "project" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithProject(opts ...func(*ProjectQuery)) *WorkflowQuery { - query := (&ProjectClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithProject(opts ...func(*ProjectQuery)) *WorkflowQuery { + query := (&ProjectClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withProject = query - return wq + _q.withProject = query + return _q } // WithLatestWorkflowRun tells the query-builder to eager-load the nodes that are connected to // the "latest_workflow_run" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithLatestWorkflowRun(opts ...func(*WorkflowRunQuery)) *WorkflowQuery { - query := (&WorkflowRunClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithLatestWorkflowRun(opts ...func(*WorkflowRunQuery)) *WorkflowQuery { + query := (&WorkflowRunClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withLatestWorkflowRun = query - return wq + _q.withLatestWorkflowRun = query + return _q } // WithReferrers tells the query-builder to eager-load the nodes that are connected to // the "referrers" edge. The optional arguments are used to configure the query builder of the edge. -func (wq *WorkflowQuery) WithReferrers(opts ...func(*ReferrerQuery)) *WorkflowQuery { - query := (&ReferrerClient{config: wq.config}).Query() +func (_q *WorkflowQuery) WithReferrers(opts ...func(*ReferrerQuery)) *WorkflowQuery { + query := (&ReferrerClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wq.withReferrers = query - return wq + _q.withReferrers = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -563,10 +563,10 @@ func (wq *WorkflowQuery) WithReferrers(opts ...func(*ReferrerQuery)) *WorkflowQu // GroupBy(workflow.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (wq *WorkflowQuery) GroupBy(field string, fields ...string) *WorkflowGroupBy { - wq.ctx.Fields = append([]string{field}, fields...) - grbuild := &WorkflowGroupBy{build: wq} - grbuild.flds = &wq.ctx.Fields +func (_q *WorkflowQuery) GroupBy(field string, fields ...string) *WorkflowGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &WorkflowGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = workflow.Label grbuild.scan = grbuild.Scan return grbuild @@ -584,62 +584,62 @@ func (wq *WorkflowQuery) GroupBy(field string, fields ...string) *WorkflowGroupB // client.Workflow.Query(). // Select(workflow.FieldName). // Scan(ctx, &v) -func (wq *WorkflowQuery) Select(fields ...string) *WorkflowSelect { - wq.ctx.Fields = append(wq.ctx.Fields, fields...) - sbuild := &WorkflowSelect{WorkflowQuery: wq} +func (_q *WorkflowQuery) Select(fields ...string) *WorkflowSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &WorkflowSelect{WorkflowQuery: _q} sbuild.label = workflow.Label - sbuild.flds, sbuild.scan = &wq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a WorkflowSelect configured with the given aggregations. -func (wq *WorkflowQuery) Aggregate(fns ...AggregateFunc) *WorkflowSelect { - return wq.Select().Aggregate(fns...) +func (_q *WorkflowQuery) Aggregate(fns ...AggregateFunc) *WorkflowSelect { + return _q.Select().Aggregate(fns...) } -func (wq *WorkflowQuery) prepareQuery(ctx context.Context) error { - for _, inter := range wq.inters { +func (_q *WorkflowQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, wq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range wq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !workflow.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if wq.path != nil { - prev, err := wq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - wq.sql = prev + _q.sql = prev } return nil } -func (wq *WorkflowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Workflow, error) { +func (_q *WorkflowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Workflow, error) { var ( nodes = []*Workflow{} - withFKs = wq.withFKs - _spec = wq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [8]bool{ - wq.withRobotaccounts != nil, - wq.withWorkflowruns != nil, - wq.withOrganization != nil, - wq.withContract != nil, - wq.withIntegrationAttachments != nil, - wq.withProject != nil, - wq.withLatestWorkflowRun != nil, - wq.withReferrers != nil, + _q.withRobotaccounts != nil, + _q.withWorkflowruns != nil, + _q.withOrganization != nil, + _q.withContract != nil, + _q.withIntegrationAttachments != nil, + _q.withProject != nil, + _q.withLatestWorkflowRun != nil, + _q.withReferrers != nil, } ) - if wq.withContract != nil { + if _q.withContract != nil { withFKs = true } if withFKs { @@ -649,51 +649,51 @@ func (wq *WorkflowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Wor return (*Workflow).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &Workflow{config: wq.config} + node := &Workflow{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(wq.modifiers) > 0 { - _spec.Modifiers = wq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, wq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := wq.withRobotaccounts; query != nil { - if err := wq.loadRobotaccounts(ctx, query, nodes, + if query := _q.withRobotaccounts; query != nil { + if err := _q.loadRobotaccounts(ctx, query, nodes, func(n *Workflow) { n.Edges.Robotaccounts = []*RobotAccount{} }, func(n *Workflow, e *RobotAccount) { n.Edges.Robotaccounts = append(n.Edges.Robotaccounts, e) }); err != nil { return nil, err } } - if query := wq.withWorkflowruns; query != nil { - if err := wq.loadWorkflowruns(ctx, query, nodes, + if query := _q.withWorkflowruns; query != nil { + if err := _q.loadWorkflowruns(ctx, query, nodes, func(n *Workflow) { n.Edges.Workflowruns = []*WorkflowRun{} }, func(n *Workflow, e *WorkflowRun) { n.Edges.Workflowruns = append(n.Edges.Workflowruns, e) }); err != nil { return nil, err } } - if query := wq.withOrganization; query != nil { - if err := wq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *Workflow, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := wq.withContract; query != nil { - if err := wq.loadContract(ctx, query, nodes, nil, + if query := _q.withContract; query != nil { + if err := _q.loadContract(ctx, query, nodes, nil, func(n *Workflow, e *WorkflowContract) { n.Edges.Contract = e }); err != nil { return nil, err } } - if query := wq.withIntegrationAttachments; query != nil { - if err := wq.loadIntegrationAttachments(ctx, query, nodes, + if query := _q.withIntegrationAttachments; query != nil { + if err := _q.loadIntegrationAttachments(ctx, query, nodes, func(n *Workflow) { n.Edges.IntegrationAttachments = []*IntegrationAttachment{} }, func(n *Workflow, e *IntegrationAttachment) { n.Edges.IntegrationAttachments = append(n.Edges.IntegrationAttachments, e) @@ -701,20 +701,20 @@ func (wq *WorkflowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Wor return nil, err } } - if query := wq.withProject; query != nil { - if err := wq.loadProject(ctx, query, nodes, nil, + if query := _q.withProject; query != nil { + if err := _q.loadProject(ctx, query, nodes, nil, func(n *Workflow, e *Project) { n.Edges.Project = e }); err != nil { return nil, err } } - if query := wq.withLatestWorkflowRun; query != nil { - if err := wq.loadLatestWorkflowRun(ctx, query, nodes, nil, + if query := _q.withLatestWorkflowRun; query != nil { + if err := _q.loadLatestWorkflowRun(ctx, query, nodes, nil, func(n *Workflow, e *WorkflowRun) { n.Edges.LatestWorkflowRun = e }); err != nil { return nil, err } } - if query := wq.withReferrers; query != nil { - if err := wq.loadReferrers(ctx, query, nodes, + if query := _q.withReferrers; query != nil { + if err := _q.loadReferrers(ctx, query, nodes, func(n *Workflow) { n.Edges.Referrers = []*Referrer{} }, func(n *Workflow, e *Referrer) { n.Edges.Referrers = append(n.Edges.Referrers, e) }); err != nil { return nil, err @@ -723,7 +723,7 @@ func (wq *WorkflowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Wor return nodes, nil } -func (wq *WorkflowQuery) loadRobotaccounts(ctx context.Context, query *RobotAccountQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *RobotAccount)) error { +func (_q *WorkflowQuery) loadRobotaccounts(ctx context.Context, query *RobotAccountQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *RobotAccount)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Workflow) for i := range nodes { @@ -754,7 +754,7 @@ func (wq *WorkflowQuery) loadRobotaccounts(ctx context.Context, query *RobotAcco } return nil } -func (wq *WorkflowQuery) loadWorkflowruns(ctx context.Context, query *WorkflowRunQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowRun)) error { +func (_q *WorkflowQuery) loadWorkflowruns(ctx context.Context, query *WorkflowRunQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowRun)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Workflow) for i := range nodes { @@ -785,7 +785,7 @@ func (wq *WorkflowQuery) loadWorkflowruns(ctx context.Context, query *WorkflowRu } return nil } -func (wq *WorkflowQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Organization)) error { +func (_q *WorkflowQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Workflow) for i := range nodes { @@ -814,7 +814,7 @@ func (wq *WorkflowQuery) loadOrganization(ctx context.Context, query *Organizati } return nil } -func (wq *WorkflowQuery) loadContract(ctx context.Context, query *WorkflowContractQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowContract)) error { +func (_q *WorkflowQuery) loadContract(ctx context.Context, query *WorkflowContractQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowContract)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Workflow) for i := range nodes { @@ -846,7 +846,7 @@ func (wq *WorkflowQuery) loadContract(ctx context.Context, query *WorkflowContra } return nil } -func (wq *WorkflowQuery) loadIntegrationAttachments(ctx context.Context, query *IntegrationAttachmentQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *IntegrationAttachment)) error { +func (_q *WorkflowQuery) loadIntegrationAttachments(ctx context.Context, query *IntegrationAttachmentQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *IntegrationAttachment)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Workflow) for i := range nodes { @@ -877,7 +877,7 @@ func (wq *WorkflowQuery) loadIntegrationAttachments(ctx context.Context, query * } return nil } -func (wq *WorkflowQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Project)) error { +func (_q *WorkflowQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Project)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Workflow) for i := range nodes { @@ -906,7 +906,7 @@ func (wq *WorkflowQuery) loadProject(ctx context.Context, query *ProjectQuery, n } return nil } -func (wq *WorkflowQuery) loadLatestWorkflowRun(ctx context.Context, query *WorkflowRunQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowRun)) error { +func (_q *WorkflowQuery) loadLatestWorkflowRun(ctx context.Context, query *WorkflowRunQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *WorkflowRun)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Workflow) for i := range nodes { @@ -938,7 +938,7 @@ func (wq *WorkflowQuery) loadLatestWorkflowRun(ctx context.Context, query *Workf } return nil } -func (wq *WorkflowQuery) loadReferrers(ctx context.Context, query *ReferrerQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Referrer)) error { +func (_q *WorkflowQuery) loadReferrers(ctx context.Context, query *ReferrerQuery, nodes []*Workflow, init func(*Workflow), assign func(*Workflow, *Referrer)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*Workflow) nids := make(map[uuid.UUID]map[*Workflow]struct{}) @@ -1000,27 +1000,27 @@ func (wq *WorkflowQuery) loadReferrers(ctx context.Context, query *ReferrerQuery return nil } -func (wq *WorkflowQuery) sqlCount(ctx context.Context) (int, error) { - _spec := wq.querySpec() - if len(wq.modifiers) > 0 { - _spec.Modifiers = wq.modifiers +func (_q *WorkflowQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = wq.ctx.Fields - if len(wq.ctx.Fields) > 0 { - _spec.Unique = wq.ctx.Unique != nil && *wq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, wq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (wq *WorkflowQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *WorkflowQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(workflow.Table, workflow.Columns, sqlgraph.NewFieldSpec(workflow.FieldID, field.TypeUUID)) - _spec.From = wq.sql - if unique := wq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if wq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := wq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflow.FieldID) for i := range fields { @@ -1028,30 +1028,30 @@ func (wq *WorkflowQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if wq.withOrganization != nil { + if _q.withOrganization != nil { _spec.Node.AddColumnOnce(workflow.FieldOrganizationID) } - if wq.withProject != nil { + if _q.withProject != nil { _spec.Node.AddColumnOnce(workflow.FieldProjectID) } - if wq.withLatestWorkflowRun != nil { + if _q.withLatestWorkflowRun != nil { _spec.Node.AddColumnOnce(workflow.FieldLatestRun) } } - if ps := wq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := wq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := wq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := wq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -1061,36 +1061,36 @@ func (wq *WorkflowQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (wq *WorkflowQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(wq.driver.Dialect()) +func (_q *WorkflowQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(workflow.Table) - columns := wq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = workflow.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if wq.sql != nil { - selector = wq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if wq.ctx.Unique != nil && *wq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range wq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range wq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range wq.order { + for _, p := range _q.order { p(selector) } - if offset := wq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := wq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -1099,33 +1099,33 @@ func (wq *WorkflowQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (wq *WorkflowQuery) ForUpdate(opts ...sql.LockOption) *WorkflowQuery { - if wq.driver.Dialect() == dialect.Postgres { - wq.Unique(false) +func (_q *WorkflowQuery) ForUpdate(opts ...sql.LockOption) *WorkflowQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wq.modifiers = append(wq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return wq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (wq *WorkflowQuery) ForShare(opts ...sql.LockOption) *WorkflowQuery { - if wq.driver.Dialect() == dialect.Postgres { - wq.Unique(false) +func (_q *WorkflowQuery) ForShare(opts ...sql.LockOption) *WorkflowQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wq.modifiers = append(wq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return wq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (wq *WorkflowQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowSelect { - wq.modifiers = append(wq.modifiers, modifiers...) - return wq.Select() +func (_q *WorkflowQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // WorkflowGroupBy is the group-by builder for Workflow entities. @@ -1135,41 +1135,41 @@ type WorkflowGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (wgb *WorkflowGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowGroupBy { - wgb.fns = append(wgb.fns, fns...) - return wgb +func (_g *WorkflowGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (wgb *WorkflowGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wgb.build.ctx, ent.OpQueryGroupBy) - if err := wgb.build.prepareQuery(ctx); err != nil { +func (_g *WorkflowGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowQuery, *WorkflowGroupBy](ctx, wgb.build, wgb, wgb.build.inters, v) + return scanWithInterceptors[*WorkflowQuery, *WorkflowGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (wgb *WorkflowGroupBy) sqlScan(ctx context.Context, root *WorkflowQuery, v any) error { +func (_g *WorkflowGroupBy) sqlScan(ctx context.Context, root *WorkflowQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(wgb.fns)) - for _, fn := range wgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*wgb.flds)+len(wgb.fns)) - for _, f := range *wgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*wgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := wgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -1183,27 +1183,27 @@ type WorkflowSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (ws *WorkflowSelect) Aggregate(fns ...AggregateFunc) *WorkflowSelect { - ws.fns = append(ws.fns, fns...) - return ws +func (_s *WorkflowSelect) Aggregate(fns ...AggregateFunc) *WorkflowSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (ws *WorkflowSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ws.ctx, ent.OpQuerySelect) - if err := ws.prepareQuery(ctx); err != nil { +func (_s *WorkflowSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowQuery, *WorkflowSelect](ctx, ws.WorkflowQuery, ws, ws.inters, v) + return scanWithInterceptors[*WorkflowQuery, *WorkflowSelect](ctx, _s.WorkflowQuery, _s, _s.inters, v) } -func (ws *WorkflowSelect) sqlScan(ctx context.Context, root *WorkflowQuery, v any) error { +func (_s *WorkflowSelect) sqlScan(ctx context.Context, root *WorkflowQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ws.fns)) - for _, fn := range ws.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*ws.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -1211,7 +1211,7 @@ func (ws *WorkflowSelect) sqlScan(ctx context.Context, root *WorkflowQuery, v an } rows := &sql.Rows{} query, args := selector.Query() - if err := ws.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -1219,7 +1219,7 @@ func (ws *WorkflowSelect) sqlScan(ctx context.Context, root *WorkflowQuery, v an } // Modify adds a query modifier for attaching custom logic to queries. -func (ws *WorkflowSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowSelect { - ws.modifiers = append(ws.modifiers, modifiers...) - return ws +func (_s *WorkflowSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/workflow_update.go b/app/controlplane/pkg/data/ent/workflow_update.go index e231371a4..01f5b2291 100644 --- a/app/controlplane/pkg/data/ent/workflow_update.go +++ b/app/controlplane/pkg/data/ent/workflow_update.go @@ -32,421 +32,421 @@ type WorkflowUpdate struct { } // Where appends a list predicates to the WorkflowUpdate builder. -func (wu *WorkflowUpdate) Where(ps ...predicate.Workflow) *WorkflowUpdate { - wu.mutation.Where(ps...) - return wu +func (_u *WorkflowUpdate) Where(ps ...predicate.Workflow) *WorkflowUpdate { + _u.mutation.Where(ps...) + return _u } // SetProjectOld sets the "project_old" field. -func (wu *WorkflowUpdate) SetProjectOld(s string) *WorkflowUpdate { - wu.mutation.SetProjectOld(s) - return wu +func (_u *WorkflowUpdate) SetProjectOld(v string) *WorkflowUpdate { + _u.mutation.SetProjectOld(v) + return _u } // SetNillableProjectOld sets the "project_old" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableProjectOld(s *string) *WorkflowUpdate { - if s != nil { - wu.SetProjectOld(*s) +func (_u *WorkflowUpdate) SetNillableProjectOld(v *string) *WorkflowUpdate { + if v != nil { + _u.SetProjectOld(*v) } - return wu + return _u } // ClearProjectOld clears the value of the "project_old" field. -func (wu *WorkflowUpdate) ClearProjectOld() *WorkflowUpdate { - wu.mutation.ClearProjectOld() - return wu +func (_u *WorkflowUpdate) ClearProjectOld() *WorkflowUpdate { + _u.mutation.ClearProjectOld() + return _u } // SetTeam sets the "team" field. -func (wu *WorkflowUpdate) SetTeam(s string) *WorkflowUpdate { - wu.mutation.SetTeam(s) - return wu +func (_u *WorkflowUpdate) SetTeam(v string) *WorkflowUpdate { + _u.mutation.SetTeam(v) + return _u } // SetNillableTeam sets the "team" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableTeam(s *string) *WorkflowUpdate { - if s != nil { - wu.SetTeam(*s) +func (_u *WorkflowUpdate) SetNillableTeam(v *string) *WorkflowUpdate { + if v != nil { + _u.SetTeam(*v) } - return wu + return _u } // ClearTeam clears the value of the "team" field. -func (wu *WorkflowUpdate) ClearTeam() *WorkflowUpdate { - wu.mutation.ClearTeam() - return wu +func (_u *WorkflowUpdate) ClearTeam() *WorkflowUpdate { + _u.mutation.ClearTeam() + return _u } // SetRunsCount sets the "runs_count" field. -func (wu *WorkflowUpdate) SetRunsCount(i int) *WorkflowUpdate { - wu.mutation.ResetRunsCount() - wu.mutation.SetRunsCount(i) - return wu +func (_u *WorkflowUpdate) SetRunsCount(v int) *WorkflowUpdate { + _u.mutation.ResetRunsCount() + _u.mutation.SetRunsCount(v) + return _u } // SetNillableRunsCount sets the "runs_count" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableRunsCount(i *int) *WorkflowUpdate { - if i != nil { - wu.SetRunsCount(*i) +func (_u *WorkflowUpdate) SetNillableRunsCount(v *int) *WorkflowUpdate { + if v != nil { + _u.SetRunsCount(*v) } - return wu + return _u } -// AddRunsCount adds i to the "runs_count" field. -func (wu *WorkflowUpdate) AddRunsCount(i int) *WorkflowUpdate { - wu.mutation.AddRunsCount(i) - return wu +// AddRunsCount adds value to the "runs_count" field. +func (_u *WorkflowUpdate) AddRunsCount(v int) *WorkflowUpdate { + _u.mutation.AddRunsCount(v) + return _u } // SetUpdatedAt sets the "updated_at" field. -func (wu *WorkflowUpdate) SetUpdatedAt(t time.Time) *WorkflowUpdate { - wu.mutation.SetUpdatedAt(t) - return wu +func (_u *WorkflowUpdate) SetUpdatedAt(v time.Time) *WorkflowUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableUpdatedAt(t *time.Time) *WorkflowUpdate { - if t != nil { - wu.SetUpdatedAt(*t) +func (_u *WorkflowUpdate) SetNillableUpdatedAt(v *time.Time) *WorkflowUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return wu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (wu *WorkflowUpdate) SetDeletedAt(t time.Time) *WorkflowUpdate { - wu.mutation.SetDeletedAt(t) - return wu +func (_u *WorkflowUpdate) SetDeletedAt(v time.Time) *WorkflowUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableDeletedAt(t *time.Time) *WorkflowUpdate { - if t != nil { - wu.SetDeletedAt(*t) +func (_u *WorkflowUpdate) SetNillableDeletedAt(v *time.Time) *WorkflowUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return wu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (wu *WorkflowUpdate) ClearDeletedAt() *WorkflowUpdate { - wu.mutation.ClearDeletedAt() - return wu +func (_u *WorkflowUpdate) ClearDeletedAt() *WorkflowUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetPublic sets the "public" field. -func (wu *WorkflowUpdate) SetPublic(b bool) *WorkflowUpdate { - wu.mutation.SetPublic(b) - return wu +func (_u *WorkflowUpdate) SetPublic(v bool) *WorkflowUpdate { + _u.mutation.SetPublic(v) + return _u } // SetNillablePublic sets the "public" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillablePublic(b *bool) *WorkflowUpdate { - if b != nil { - wu.SetPublic(*b) +func (_u *WorkflowUpdate) SetNillablePublic(v *bool) *WorkflowUpdate { + if v != nil { + _u.SetPublic(*v) } - return wu + return _u } // SetOrganizationID sets the "organization_id" field. -func (wu *WorkflowUpdate) SetOrganizationID(u uuid.UUID) *WorkflowUpdate { - wu.mutation.SetOrganizationID(u) - return wu +func (_u *WorkflowUpdate) SetOrganizationID(v uuid.UUID) *WorkflowUpdate { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableOrganizationID(u *uuid.UUID) *WorkflowUpdate { - if u != nil { - wu.SetOrganizationID(*u) +func (_u *WorkflowUpdate) SetNillableOrganizationID(v *uuid.UUID) *WorkflowUpdate { + if v != nil { + _u.SetOrganizationID(*v) } - return wu + return _u } // SetProjectID sets the "project_id" field. -func (wu *WorkflowUpdate) SetProjectID(u uuid.UUID) *WorkflowUpdate { - wu.mutation.SetProjectID(u) - return wu +func (_u *WorkflowUpdate) SetProjectID(v uuid.UUID) *WorkflowUpdate { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableProjectID(u *uuid.UUID) *WorkflowUpdate { - if u != nil { - wu.SetProjectID(*u) +func (_u *WorkflowUpdate) SetNillableProjectID(v *uuid.UUID) *WorkflowUpdate { + if v != nil { + _u.SetProjectID(*v) } - return wu + return _u } // SetLatestRun sets the "latest_run" field. -func (wu *WorkflowUpdate) SetLatestRun(u uuid.UUID) *WorkflowUpdate { - wu.mutation.SetLatestRun(u) - return wu +func (_u *WorkflowUpdate) SetLatestRun(v uuid.UUID) *WorkflowUpdate { + _u.mutation.SetLatestRun(v) + return _u } // SetNillableLatestRun sets the "latest_run" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableLatestRun(u *uuid.UUID) *WorkflowUpdate { - if u != nil { - wu.SetLatestRun(*u) +func (_u *WorkflowUpdate) SetNillableLatestRun(v *uuid.UUID) *WorkflowUpdate { + if v != nil { + _u.SetLatestRun(*v) } - return wu + return _u } // ClearLatestRun clears the value of the "latest_run" field. -func (wu *WorkflowUpdate) ClearLatestRun() *WorkflowUpdate { - wu.mutation.ClearLatestRun() - return wu +func (_u *WorkflowUpdate) ClearLatestRun() *WorkflowUpdate { + _u.mutation.ClearLatestRun() + return _u } // SetDescription sets the "description" field. -func (wu *WorkflowUpdate) SetDescription(s string) *WorkflowUpdate { - wu.mutation.SetDescription(s) - return wu +func (_u *WorkflowUpdate) SetDescription(v string) *WorkflowUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableDescription(s *string) *WorkflowUpdate { - if s != nil { - wu.SetDescription(*s) +func (_u *WorkflowUpdate) SetNillableDescription(v *string) *WorkflowUpdate { + if v != nil { + _u.SetDescription(*v) } - return wu + return _u } // ClearDescription clears the value of the "description" field. -func (wu *WorkflowUpdate) ClearDescription() *WorkflowUpdate { - wu.mutation.ClearDescription() - return wu +func (_u *WorkflowUpdate) ClearDescription() *WorkflowUpdate { + _u.mutation.ClearDescription() + return _u } // SetMetadata sets the "metadata" field. -func (wu *WorkflowUpdate) SetMetadata(m map[string]interface{}) *WorkflowUpdate { - wu.mutation.SetMetadata(m) - return wu +func (_u *WorkflowUpdate) SetMetadata(v map[string]interface{}) *WorkflowUpdate { + _u.mutation.SetMetadata(v) + return _u } // ClearMetadata clears the value of the "metadata" field. -func (wu *WorkflowUpdate) ClearMetadata() *WorkflowUpdate { - wu.mutation.ClearMetadata() - return wu +func (_u *WorkflowUpdate) ClearMetadata() *WorkflowUpdate { + _u.mutation.ClearMetadata() + return _u } // AddRobotaccountIDs adds the "robotaccounts" edge to the RobotAccount entity by IDs. -func (wu *WorkflowUpdate) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.AddRobotaccountIDs(ids...) - return wu +func (_u *WorkflowUpdate) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.AddRobotaccountIDs(ids...) + return _u } // AddRobotaccounts adds the "robotaccounts" edges to the RobotAccount entity. -func (wu *WorkflowUpdate) AddRobotaccounts(r ...*RobotAccount) *WorkflowUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdate) AddRobotaccounts(v ...*RobotAccount) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.AddRobotaccountIDs(ids...) + return _u.AddRobotaccountIDs(ids...) } // AddWorkflowrunIDs adds the "workflowruns" edge to the WorkflowRun entity by IDs. -func (wu *WorkflowUpdate) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.AddWorkflowrunIDs(ids...) - return wu +func (_u *WorkflowUpdate) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.AddWorkflowrunIDs(ids...) + return _u } // AddWorkflowruns adds the "workflowruns" edges to the WorkflowRun entity. -func (wu *WorkflowUpdate) AddWorkflowruns(w ...*WorkflowRun) *WorkflowUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowUpdate) AddWorkflowruns(v ...*WorkflowRun) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.AddWorkflowrunIDs(ids...) + return _u.AddWorkflowrunIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (wu *WorkflowUpdate) SetOrganization(o *Organization) *WorkflowUpdate { - return wu.SetOrganizationID(o.ID) +func (_u *WorkflowUpdate) SetOrganization(v *Organization) *WorkflowUpdate { + return _u.SetOrganizationID(v.ID) } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wu *WorkflowUpdate) SetContractID(id uuid.UUID) *WorkflowUpdate { - wu.mutation.SetContractID(id) - return wu +func (_u *WorkflowUpdate) SetContractID(id uuid.UUID) *WorkflowUpdate { + _u.mutation.SetContractID(id) + return _u } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wu *WorkflowUpdate) SetContract(w *WorkflowContract) *WorkflowUpdate { - return wu.SetContractID(w.ID) +func (_u *WorkflowUpdate) SetContract(v *WorkflowContract) *WorkflowUpdate { + return _u.SetContractID(v.ID) } // AddIntegrationAttachmentIDs adds the "integration_attachments" edge to the IntegrationAttachment entity by IDs. -func (wu *WorkflowUpdate) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.AddIntegrationAttachmentIDs(ids...) - return wu +func (_u *WorkflowUpdate) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.AddIntegrationAttachmentIDs(ids...) + return _u } // AddIntegrationAttachments adds the "integration_attachments" edges to the IntegrationAttachment entity. -func (wu *WorkflowUpdate) AddIntegrationAttachments(i ...*IntegrationAttachment) *WorkflowUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *WorkflowUpdate) AddIntegrationAttachments(v ...*IntegrationAttachment) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.AddIntegrationAttachmentIDs(ids...) + return _u.AddIntegrationAttachmentIDs(ids...) } // SetProject sets the "project" edge to the Project entity. -func (wu *WorkflowUpdate) SetProject(p *Project) *WorkflowUpdate { - return wu.SetProjectID(p.ID) +func (_u *WorkflowUpdate) SetProject(v *Project) *WorkflowUpdate { + return _u.SetProjectID(v.ID) } // SetLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID. -func (wu *WorkflowUpdate) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowUpdate { - wu.mutation.SetLatestWorkflowRunID(id) - return wu +func (_u *WorkflowUpdate) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowUpdate { + _u.mutation.SetLatestWorkflowRunID(id) + return _u } // SetNillableLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID if the given value is not nil. -func (wu *WorkflowUpdate) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowUpdate { +func (_u *WorkflowUpdate) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowUpdate { if id != nil { - wu = wu.SetLatestWorkflowRunID(*id) + _u = _u.SetLatestWorkflowRunID(*id) } - return wu + return _u } // SetLatestWorkflowRun sets the "latest_workflow_run" edge to the WorkflowRun entity. -func (wu *WorkflowUpdate) SetLatestWorkflowRun(w *WorkflowRun) *WorkflowUpdate { - return wu.SetLatestWorkflowRunID(w.ID) +func (_u *WorkflowUpdate) SetLatestWorkflowRun(v *WorkflowRun) *WorkflowUpdate { + return _u.SetLatestWorkflowRunID(v.ID) } // AddReferrerIDs adds the "referrers" edge to the Referrer entity by IDs. -func (wu *WorkflowUpdate) AddReferrerIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.AddReferrerIDs(ids...) - return wu +func (_u *WorkflowUpdate) AddReferrerIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.AddReferrerIDs(ids...) + return _u } // AddReferrers adds the "referrers" edges to the Referrer entity. -func (wu *WorkflowUpdate) AddReferrers(r ...*Referrer) *WorkflowUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdate) AddReferrers(v ...*Referrer) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.AddReferrerIDs(ids...) + return _u.AddReferrerIDs(ids...) } // Mutation returns the WorkflowMutation object of the builder. -func (wu *WorkflowUpdate) Mutation() *WorkflowMutation { - return wu.mutation +func (_u *WorkflowUpdate) Mutation() *WorkflowMutation { + return _u.mutation } // ClearRobotaccounts clears all "robotaccounts" edges to the RobotAccount entity. -func (wu *WorkflowUpdate) ClearRobotaccounts() *WorkflowUpdate { - wu.mutation.ClearRobotaccounts() - return wu +func (_u *WorkflowUpdate) ClearRobotaccounts() *WorkflowUpdate { + _u.mutation.ClearRobotaccounts() + return _u } // RemoveRobotaccountIDs removes the "robotaccounts" edge to RobotAccount entities by IDs. -func (wu *WorkflowUpdate) RemoveRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.RemoveRobotaccountIDs(ids...) - return wu +func (_u *WorkflowUpdate) RemoveRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.RemoveRobotaccountIDs(ids...) + return _u } // RemoveRobotaccounts removes "robotaccounts" edges to RobotAccount entities. -func (wu *WorkflowUpdate) RemoveRobotaccounts(r ...*RobotAccount) *WorkflowUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdate) RemoveRobotaccounts(v ...*RobotAccount) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.RemoveRobotaccountIDs(ids...) + return _u.RemoveRobotaccountIDs(ids...) } // ClearWorkflowruns clears all "workflowruns" edges to the WorkflowRun entity. -func (wu *WorkflowUpdate) ClearWorkflowruns() *WorkflowUpdate { - wu.mutation.ClearWorkflowruns() - return wu +func (_u *WorkflowUpdate) ClearWorkflowruns() *WorkflowUpdate { + _u.mutation.ClearWorkflowruns() + return _u } // RemoveWorkflowrunIDs removes the "workflowruns" edge to WorkflowRun entities by IDs. -func (wu *WorkflowUpdate) RemoveWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.RemoveWorkflowrunIDs(ids...) - return wu +func (_u *WorkflowUpdate) RemoveWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.RemoveWorkflowrunIDs(ids...) + return _u } // RemoveWorkflowruns removes "workflowruns" edges to WorkflowRun entities. -func (wu *WorkflowUpdate) RemoveWorkflowruns(w ...*WorkflowRun) *WorkflowUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowUpdate) RemoveWorkflowruns(v ...*WorkflowRun) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.RemoveWorkflowrunIDs(ids...) + return _u.RemoveWorkflowrunIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (wu *WorkflowUpdate) ClearOrganization() *WorkflowUpdate { - wu.mutation.ClearOrganization() - return wu +func (_u *WorkflowUpdate) ClearOrganization() *WorkflowUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearContract clears the "contract" edge to the WorkflowContract entity. -func (wu *WorkflowUpdate) ClearContract() *WorkflowUpdate { - wu.mutation.ClearContract() - return wu +func (_u *WorkflowUpdate) ClearContract() *WorkflowUpdate { + _u.mutation.ClearContract() + return _u } // ClearIntegrationAttachments clears all "integration_attachments" edges to the IntegrationAttachment entity. -func (wu *WorkflowUpdate) ClearIntegrationAttachments() *WorkflowUpdate { - wu.mutation.ClearIntegrationAttachments() - return wu +func (_u *WorkflowUpdate) ClearIntegrationAttachments() *WorkflowUpdate { + _u.mutation.ClearIntegrationAttachments() + return _u } // RemoveIntegrationAttachmentIDs removes the "integration_attachments" edge to IntegrationAttachment entities by IDs. -func (wu *WorkflowUpdate) RemoveIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.RemoveIntegrationAttachmentIDs(ids...) - return wu +func (_u *WorkflowUpdate) RemoveIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.RemoveIntegrationAttachmentIDs(ids...) + return _u } // RemoveIntegrationAttachments removes "integration_attachments" edges to IntegrationAttachment entities. -func (wu *WorkflowUpdate) RemoveIntegrationAttachments(i ...*IntegrationAttachment) *WorkflowUpdate { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *WorkflowUpdate) RemoveIntegrationAttachments(v ...*IntegrationAttachment) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.RemoveIntegrationAttachmentIDs(ids...) + return _u.RemoveIntegrationAttachmentIDs(ids...) } // ClearProject clears the "project" edge to the Project entity. -func (wu *WorkflowUpdate) ClearProject() *WorkflowUpdate { - wu.mutation.ClearProject() - return wu +func (_u *WorkflowUpdate) ClearProject() *WorkflowUpdate { + _u.mutation.ClearProject() + return _u } // ClearLatestWorkflowRun clears the "latest_workflow_run" edge to the WorkflowRun entity. -func (wu *WorkflowUpdate) ClearLatestWorkflowRun() *WorkflowUpdate { - wu.mutation.ClearLatestWorkflowRun() - return wu +func (_u *WorkflowUpdate) ClearLatestWorkflowRun() *WorkflowUpdate { + _u.mutation.ClearLatestWorkflowRun() + return _u } // ClearReferrers clears all "referrers" edges to the Referrer entity. -func (wu *WorkflowUpdate) ClearReferrers() *WorkflowUpdate { - wu.mutation.ClearReferrers() - return wu +func (_u *WorkflowUpdate) ClearReferrers() *WorkflowUpdate { + _u.mutation.ClearReferrers() + return _u } // RemoveReferrerIDs removes the "referrers" edge to Referrer entities by IDs. -func (wu *WorkflowUpdate) RemoveReferrerIDs(ids ...uuid.UUID) *WorkflowUpdate { - wu.mutation.RemoveReferrerIDs(ids...) - return wu +func (_u *WorkflowUpdate) RemoveReferrerIDs(ids ...uuid.UUID) *WorkflowUpdate { + _u.mutation.RemoveReferrerIDs(ids...) + return _u } // RemoveReferrers removes "referrers" edges to Referrer entities. -func (wu *WorkflowUpdate) RemoveReferrers(r ...*Referrer) *WorkflowUpdate { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdate) RemoveReferrers(v ...*Referrer) *WorkflowUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wu.RemoveReferrerIDs(ids...) + return _u.RemoveReferrerIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (wu *WorkflowUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, wu.sqlSave, wu.mutation, wu.hooks) +func (_u *WorkflowUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wu *WorkflowUpdate) SaveX(ctx context.Context) int { - affected, err := wu.Save(ctx) +func (_u *WorkflowUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -454,93 +454,93 @@ func (wu *WorkflowUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (wu *WorkflowUpdate) Exec(ctx context.Context) error { - _, err := wu.Save(ctx) +func (_u *WorkflowUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wu *WorkflowUpdate) ExecX(ctx context.Context) { - if err := wu.Exec(ctx); err != nil { +func (_u *WorkflowUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wu *WorkflowUpdate) check() error { - if wu.mutation.OrganizationCleared() && len(wu.mutation.OrganizationIDs()) > 0 { +func (_u *WorkflowUpdate) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.organization"`) } - if wu.mutation.ContractCleared() && len(wu.mutation.ContractIDs()) > 0 { + if _u.mutation.ContractCleared() && len(_u.mutation.ContractIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.contract"`) } - if wu.mutation.ProjectCleared() && len(wu.mutation.ProjectIDs()) > 0 { + if _u.mutation.ProjectCleared() && len(_u.mutation.ProjectIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.project"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wu *WorkflowUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowUpdate { - wu.modifiers = append(wu.modifiers, modifiers...) - return wu +func (_u *WorkflowUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := wu.check(); err != nil { - return n, err +func (_u *WorkflowUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(workflow.Table, workflow.Columns, sqlgraph.NewFieldSpec(workflow.FieldID, field.TypeUUID)) - if ps := wu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wu.mutation.ProjectOld(); ok { + if value, ok := _u.mutation.ProjectOld(); ok { _spec.SetField(workflow.FieldProjectOld, field.TypeString, value) } - if wu.mutation.ProjectOldCleared() { + if _u.mutation.ProjectOldCleared() { _spec.ClearField(workflow.FieldProjectOld, field.TypeString) } - if value, ok := wu.mutation.Team(); ok { + if value, ok := _u.mutation.Team(); ok { _spec.SetField(workflow.FieldTeam, field.TypeString, value) } - if wu.mutation.TeamCleared() { + if _u.mutation.TeamCleared() { _spec.ClearField(workflow.FieldTeam, field.TypeString) } - if value, ok := wu.mutation.RunsCount(); ok { + if value, ok := _u.mutation.RunsCount(); ok { _spec.SetField(workflow.FieldRunsCount, field.TypeInt, value) } - if value, ok := wu.mutation.AddedRunsCount(); ok { + if value, ok := _u.mutation.AddedRunsCount(); ok { _spec.AddField(workflow.FieldRunsCount, field.TypeInt, value) } - if value, ok := wu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(workflow.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := wu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(workflow.FieldDeletedAt, field.TypeTime, value) } - if wu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(workflow.FieldDeletedAt, field.TypeTime) } - if value, ok := wu.mutation.Public(); ok { + if value, ok := _u.mutation.Public(); ok { _spec.SetField(workflow.FieldPublic, field.TypeBool, value) } - if value, ok := wu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(workflow.FieldDescription, field.TypeString, value) } - if wu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(workflow.FieldDescription, field.TypeString) } - if value, ok := wu.mutation.Metadata(); ok { + if value, ok := _u.mutation.Metadata(); ok { _spec.SetField(workflow.FieldMetadata, field.TypeJSON, value) } - if wu.mutation.MetadataCleared() { + if _u.mutation.MetadataCleared() { _spec.ClearField(workflow.FieldMetadata, field.TypeJSON) } - if wu.mutation.RobotaccountsCleared() { + if _u.mutation.RobotaccountsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -553,7 +553,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.RemovedRobotaccountsIDs(); len(nodes) > 0 && !wu.mutation.RobotaccountsCleared() { + if nodes := _u.mutation.RemovedRobotaccountsIDs(); len(nodes) > 0 && !_u.mutation.RobotaccountsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -569,7 +569,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.RobotaccountsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.RobotaccountsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -585,7 +585,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.WorkflowrunsCleared() { + if _u.mutation.WorkflowrunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -598,7 +598,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.RemovedWorkflowrunsIDs(); len(nodes) > 0 && !wu.mutation.WorkflowrunsCleared() { + if nodes := _u.mutation.RemovedWorkflowrunsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowrunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -614,7 +614,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.WorkflowrunsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowrunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -630,7 +630,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -643,7 +643,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -659,7 +659,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.ContractCleared() { + if _u.mutation.ContractCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -672,7 +672,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -688,7 +688,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.IntegrationAttachmentsCleared() { + if _u.mutation.IntegrationAttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -701,7 +701,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.RemovedIntegrationAttachmentsIDs(); len(nodes) > 0 && !wu.mutation.IntegrationAttachmentsCleared() { + if nodes := _u.mutation.RemovedIntegrationAttachmentsIDs(); len(nodes) > 0 && !_u.mutation.IntegrationAttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -717,7 +717,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -733,7 +733,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -746,7 +746,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -762,7 +762,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.LatestWorkflowRunCleared() { + if _u.mutation.LatestWorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -775,7 +775,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { + if nodes := _u.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -791,7 +791,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wu.mutation.ReferrersCleared() { + if _u.mutation.ReferrersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -804,7 +804,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.RemovedReferrersIDs(); len(nodes) > 0 && !wu.mutation.ReferrersCleared() { + if nodes := _u.mutation.RemovedReferrersIDs(); len(nodes) > 0 && !_u.mutation.ReferrersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -820,7 +820,7 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wu.mutation.ReferrersIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ReferrersIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -836,8 +836,8 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, wu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflow.Label} } else if sqlgraph.IsConstraintError(err) { @@ -845,8 +845,8 @@ func (wu *WorkflowUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - wu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // WorkflowUpdateOne is the builder for updating a single Workflow entity. @@ -859,428 +859,428 @@ type WorkflowUpdateOne struct { } // SetProjectOld sets the "project_old" field. -func (wuo *WorkflowUpdateOne) SetProjectOld(s string) *WorkflowUpdateOne { - wuo.mutation.SetProjectOld(s) - return wuo +func (_u *WorkflowUpdateOne) SetProjectOld(v string) *WorkflowUpdateOne { + _u.mutation.SetProjectOld(v) + return _u } // SetNillableProjectOld sets the "project_old" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableProjectOld(s *string) *WorkflowUpdateOne { - if s != nil { - wuo.SetProjectOld(*s) +func (_u *WorkflowUpdateOne) SetNillableProjectOld(v *string) *WorkflowUpdateOne { + if v != nil { + _u.SetProjectOld(*v) } - return wuo + return _u } // ClearProjectOld clears the value of the "project_old" field. -func (wuo *WorkflowUpdateOne) ClearProjectOld() *WorkflowUpdateOne { - wuo.mutation.ClearProjectOld() - return wuo +func (_u *WorkflowUpdateOne) ClearProjectOld() *WorkflowUpdateOne { + _u.mutation.ClearProjectOld() + return _u } // SetTeam sets the "team" field. -func (wuo *WorkflowUpdateOne) SetTeam(s string) *WorkflowUpdateOne { - wuo.mutation.SetTeam(s) - return wuo +func (_u *WorkflowUpdateOne) SetTeam(v string) *WorkflowUpdateOne { + _u.mutation.SetTeam(v) + return _u } // SetNillableTeam sets the "team" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableTeam(s *string) *WorkflowUpdateOne { - if s != nil { - wuo.SetTeam(*s) +func (_u *WorkflowUpdateOne) SetNillableTeam(v *string) *WorkflowUpdateOne { + if v != nil { + _u.SetTeam(*v) } - return wuo + return _u } // ClearTeam clears the value of the "team" field. -func (wuo *WorkflowUpdateOne) ClearTeam() *WorkflowUpdateOne { - wuo.mutation.ClearTeam() - return wuo +func (_u *WorkflowUpdateOne) ClearTeam() *WorkflowUpdateOne { + _u.mutation.ClearTeam() + return _u } // SetRunsCount sets the "runs_count" field. -func (wuo *WorkflowUpdateOne) SetRunsCount(i int) *WorkflowUpdateOne { - wuo.mutation.ResetRunsCount() - wuo.mutation.SetRunsCount(i) - return wuo +func (_u *WorkflowUpdateOne) SetRunsCount(v int) *WorkflowUpdateOne { + _u.mutation.ResetRunsCount() + _u.mutation.SetRunsCount(v) + return _u } // SetNillableRunsCount sets the "runs_count" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableRunsCount(i *int) *WorkflowUpdateOne { - if i != nil { - wuo.SetRunsCount(*i) +func (_u *WorkflowUpdateOne) SetNillableRunsCount(v *int) *WorkflowUpdateOne { + if v != nil { + _u.SetRunsCount(*v) } - return wuo + return _u } -// AddRunsCount adds i to the "runs_count" field. -func (wuo *WorkflowUpdateOne) AddRunsCount(i int) *WorkflowUpdateOne { - wuo.mutation.AddRunsCount(i) - return wuo +// AddRunsCount adds value to the "runs_count" field. +func (_u *WorkflowUpdateOne) AddRunsCount(v int) *WorkflowUpdateOne { + _u.mutation.AddRunsCount(v) + return _u } // SetUpdatedAt sets the "updated_at" field. -func (wuo *WorkflowUpdateOne) SetUpdatedAt(t time.Time) *WorkflowUpdateOne { - wuo.mutation.SetUpdatedAt(t) - return wuo +func (_u *WorkflowUpdateOne) SetUpdatedAt(v time.Time) *WorkflowUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableUpdatedAt(t *time.Time) *WorkflowUpdateOne { - if t != nil { - wuo.SetUpdatedAt(*t) +func (_u *WorkflowUpdateOne) SetNillableUpdatedAt(v *time.Time) *WorkflowUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return wuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (wuo *WorkflowUpdateOne) SetDeletedAt(t time.Time) *WorkflowUpdateOne { - wuo.mutation.SetDeletedAt(t) - return wuo +func (_u *WorkflowUpdateOne) SetDeletedAt(v time.Time) *WorkflowUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableDeletedAt(t *time.Time) *WorkflowUpdateOne { - if t != nil { - wuo.SetDeletedAt(*t) +func (_u *WorkflowUpdateOne) SetNillableDeletedAt(v *time.Time) *WorkflowUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return wuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (wuo *WorkflowUpdateOne) ClearDeletedAt() *WorkflowUpdateOne { - wuo.mutation.ClearDeletedAt() - return wuo +func (_u *WorkflowUpdateOne) ClearDeletedAt() *WorkflowUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetPublic sets the "public" field. -func (wuo *WorkflowUpdateOne) SetPublic(b bool) *WorkflowUpdateOne { - wuo.mutation.SetPublic(b) - return wuo +func (_u *WorkflowUpdateOne) SetPublic(v bool) *WorkflowUpdateOne { + _u.mutation.SetPublic(v) + return _u } // SetNillablePublic sets the "public" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillablePublic(b *bool) *WorkflowUpdateOne { - if b != nil { - wuo.SetPublic(*b) +func (_u *WorkflowUpdateOne) SetNillablePublic(v *bool) *WorkflowUpdateOne { + if v != nil { + _u.SetPublic(*v) } - return wuo + return _u } // SetOrganizationID sets the "organization_id" field. -func (wuo *WorkflowUpdateOne) SetOrganizationID(u uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.SetOrganizationID(u) - return wuo +func (_u *WorkflowUpdateOne) SetOrganizationID(v uuid.UUID) *WorkflowUpdateOne { + _u.mutation.SetOrganizationID(v) + return _u } // SetNillableOrganizationID sets the "organization_id" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableOrganizationID(u *uuid.UUID) *WorkflowUpdateOne { - if u != nil { - wuo.SetOrganizationID(*u) +func (_u *WorkflowUpdateOne) SetNillableOrganizationID(v *uuid.UUID) *WorkflowUpdateOne { + if v != nil { + _u.SetOrganizationID(*v) } - return wuo + return _u } // SetProjectID sets the "project_id" field. -func (wuo *WorkflowUpdateOne) SetProjectID(u uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.SetProjectID(u) - return wuo +func (_u *WorkflowUpdateOne) SetProjectID(v uuid.UUID) *WorkflowUpdateOne { + _u.mutation.SetProjectID(v) + return _u } // SetNillableProjectID sets the "project_id" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableProjectID(u *uuid.UUID) *WorkflowUpdateOne { - if u != nil { - wuo.SetProjectID(*u) +func (_u *WorkflowUpdateOne) SetNillableProjectID(v *uuid.UUID) *WorkflowUpdateOne { + if v != nil { + _u.SetProjectID(*v) } - return wuo + return _u } // SetLatestRun sets the "latest_run" field. -func (wuo *WorkflowUpdateOne) SetLatestRun(u uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.SetLatestRun(u) - return wuo +func (_u *WorkflowUpdateOne) SetLatestRun(v uuid.UUID) *WorkflowUpdateOne { + _u.mutation.SetLatestRun(v) + return _u } // SetNillableLatestRun sets the "latest_run" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableLatestRun(u *uuid.UUID) *WorkflowUpdateOne { - if u != nil { - wuo.SetLatestRun(*u) +func (_u *WorkflowUpdateOne) SetNillableLatestRun(v *uuid.UUID) *WorkflowUpdateOne { + if v != nil { + _u.SetLatestRun(*v) } - return wuo + return _u } // ClearLatestRun clears the value of the "latest_run" field. -func (wuo *WorkflowUpdateOne) ClearLatestRun() *WorkflowUpdateOne { - wuo.mutation.ClearLatestRun() - return wuo +func (_u *WorkflowUpdateOne) ClearLatestRun() *WorkflowUpdateOne { + _u.mutation.ClearLatestRun() + return _u } // SetDescription sets the "description" field. -func (wuo *WorkflowUpdateOne) SetDescription(s string) *WorkflowUpdateOne { - wuo.mutation.SetDescription(s) - return wuo +func (_u *WorkflowUpdateOne) SetDescription(v string) *WorkflowUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableDescription(s *string) *WorkflowUpdateOne { - if s != nil { - wuo.SetDescription(*s) +func (_u *WorkflowUpdateOne) SetNillableDescription(v *string) *WorkflowUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return wuo + return _u } // ClearDescription clears the value of the "description" field. -func (wuo *WorkflowUpdateOne) ClearDescription() *WorkflowUpdateOne { - wuo.mutation.ClearDescription() - return wuo +func (_u *WorkflowUpdateOne) ClearDescription() *WorkflowUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetMetadata sets the "metadata" field. -func (wuo *WorkflowUpdateOne) SetMetadata(m map[string]interface{}) *WorkflowUpdateOne { - wuo.mutation.SetMetadata(m) - return wuo +func (_u *WorkflowUpdateOne) SetMetadata(v map[string]interface{}) *WorkflowUpdateOne { + _u.mutation.SetMetadata(v) + return _u } // ClearMetadata clears the value of the "metadata" field. -func (wuo *WorkflowUpdateOne) ClearMetadata() *WorkflowUpdateOne { - wuo.mutation.ClearMetadata() - return wuo +func (_u *WorkflowUpdateOne) ClearMetadata() *WorkflowUpdateOne { + _u.mutation.ClearMetadata() + return _u } // AddRobotaccountIDs adds the "robotaccounts" edge to the RobotAccount entity by IDs. -func (wuo *WorkflowUpdateOne) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.AddRobotaccountIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) AddRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.AddRobotaccountIDs(ids...) + return _u } // AddRobotaccounts adds the "robotaccounts" edges to the RobotAccount entity. -func (wuo *WorkflowUpdateOne) AddRobotaccounts(r ...*RobotAccount) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdateOne) AddRobotaccounts(v ...*RobotAccount) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.AddRobotaccountIDs(ids...) + return _u.AddRobotaccountIDs(ids...) } // AddWorkflowrunIDs adds the "workflowruns" edge to the WorkflowRun entity by IDs. -func (wuo *WorkflowUpdateOne) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.AddWorkflowrunIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) AddWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.AddWorkflowrunIDs(ids...) + return _u } // AddWorkflowruns adds the "workflowruns" edges to the WorkflowRun entity. -func (wuo *WorkflowUpdateOne) AddWorkflowruns(w ...*WorkflowRun) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowUpdateOne) AddWorkflowruns(v ...*WorkflowRun) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.AddWorkflowrunIDs(ids...) + return _u.AddWorkflowrunIDs(ids...) } // SetOrganization sets the "organization" edge to the Organization entity. -func (wuo *WorkflowUpdateOne) SetOrganization(o *Organization) *WorkflowUpdateOne { - return wuo.SetOrganizationID(o.ID) +func (_u *WorkflowUpdateOne) SetOrganization(v *Organization) *WorkflowUpdateOne { + return _u.SetOrganizationID(v.ID) } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wuo *WorkflowUpdateOne) SetContractID(id uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.SetContractID(id) - return wuo +func (_u *WorkflowUpdateOne) SetContractID(id uuid.UUID) *WorkflowUpdateOne { + _u.mutation.SetContractID(id) + return _u } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wuo *WorkflowUpdateOne) SetContract(w *WorkflowContract) *WorkflowUpdateOne { - return wuo.SetContractID(w.ID) +func (_u *WorkflowUpdateOne) SetContract(v *WorkflowContract) *WorkflowUpdateOne { + return _u.SetContractID(v.ID) } // AddIntegrationAttachmentIDs adds the "integration_attachments" edge to the IntegrationAttachment entity by IDs. -func (wuo *WorkflowUpdateOne) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.AddIntegrationAttachmentIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) AddIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.AddIntegrationAttachmentIDs(ids...) + return _u } // AddIntegrationAttachments adds the "integration_attachments" edges to the IntegrationAttachment entity. -func (wuo *WorkflowUpdateOne) AddIntegrationAttachments(i ...*IntegrationAttachment) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *WorkflowUpdateOne) AddIntegrationAttachments(v ...*IntegrationAttachment) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.AddIntegrationAttachmentIDs(ids...) + return _u.AddIntegrationAttachmentIDs(ids...) } // SetProject sets the "project" edge to the Project entity. -func (wuo *WorkflowUpdateOne) SetProject(p *Project) *WorkflowUpdateOne { - return wuo.SetProjectID(p.ID) +func (_u *WorkflowUpdateOne) SetProject(v *Project) *WorkflowUpdateOne { + return _u.SetProjectID(v.ID) } // SetLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID. -func (wuo *WorkflowUpdateOne) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.SetLatestWorkflowRunID(id) - return wuo +func (_u *WorkflowUpdateOne) SetLatestWorkflowRunID(id uuid.UUID) *WorkflowUpdateOne { + _u.mutation.SetLatestWorkflowRunID(id) + return _u } // SetNillableLatestWorkflowRunID sets the "latest_workflow_run" edge to the WorkflowRun entity by ID if the given value is not nil. -func (wuo *WorkflowUpdateOne) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowUpdateOne { +func (_u *WorkflowUpdateOne) SetNillableLatestWorkflowRunID(id *uuid.UUID) *WorkflowUpdateOne { if id != nil { - wuo = wuo.SetLatestWorkflowRunID(*id) + _u = _u.SetLatestWorkflowRunID(*id) } - return wuo + return _u } // SetLatestWorkflowRun sets the "latest_workflow_run" edge to the WorkflowRun entity. -func (wuo *WorkflowUpdateOne) SetLatestWorkflowRun(w *WorkflowRun) *WorkflowUpdateOne { - return wuo.SetLatestWorkflowRunID(w.ID) +func (_u *WorkflowUpdateOne) SetLatestWorkflowRun(v *WorkflowRun) *WorkflowUpdateOne { + return _u.SetLatestWorkflowRunID(v.ID) } // AddReferrerIDs adds the "referrers" edge to the Referrer entity by IDs. -func (wuo *WorkflowUpdateOne) AddReferrerIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.AddReferrerIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) AddReferrerIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.AddReferrerIDs(ids...) + return _u } // AddReferrers adds the "referrers" edges to the Referrer entity. -func (wuo *WorkflowUpdateOne) AddReferrers(r ...*Referrer) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdateOne) AddReferrers(v ...*Referrer) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.AddReferrerIDs(ids...) + return _u.AddReferrerIDs(ids...) } // Mutation returns the WorkflowMutation object of the builder. -func (wuo *WorkflowUpdateOne) Mutation() *WorkflowMutation { - return wuo.mutation +func (_u *WorkflowUpdateOne) Mutation() *WorkflowMutation { + return _u.mutation } // ClearRobotaccounts clears all "robotaccounts" edges to the RobotAccount entity. -func (wuo *WorkflowUpdateOne) ClearRobotaccounts() *WorkflowUpdateOne { - wuo.mutation.ClearRobotaccounts() - return wuo +func (_u *WorkflowUpdateOne) ClearRobotaccounts() *WorkflowUpdateOne { + _u.mutation.ClearRobotaccounts() + return _u } // RemoveRobotaccountIDs removes the "robotaccounts" edge to RobotAccount entities by IDs. -func (wuo *WorkflowUpdateOne) RemoveRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.RemoveRobotaccountIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) RemoveRobotaccountIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.RemoveRobotaccountIDs(ids...) + return _u } // RemoveRobotaccounts removes "robotaccounts" edges to RobotAccount entities. -func (wuo *WorkflowUpdateOne) RemoveRobotaccounts(r ...*RobotAccount) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdateOne) RemoveRobotaccounts(v ...*RobotAccount) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.RemoveRobotaccountIDs(ids...) + return _u.RemoveRobotaccountIDs(ids...) } // ClearWorkflowruns clears all "workflowruns" edges to the WorkflowRun entity. -func (wuo *WorkflowUpdateOne) ClearWorkflowruns() *WorkflowUpdateOne { - wuo.mutation.ClearWorkflowruns() - return wuo +func (_u *WorkflowUpdateOne) ClearWorkflowruns() *WorkflowUpdateOne { + _u.mutation.ClearWorkflowruns() + return _u } // RemoveWorkflowrunIDs removes the "workflowruns" edge to WorkflowRun entities by IDs. -func (wuo *WorkflowUpdateOne) RemoveWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.RemoveWorkflowrunIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) RemoveWorkflowrunIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.RemoveWorkflowrunIDs(ids...) + return _u } // RemoveWorkflowruns removes "workflowruns" edges to WorkflowRun entities. -func (wuo *WorkflowUpdateOne) RemoveWorkflowruns(w ...*WorkflowRun) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowUpdateOne) RemoveWorkflowruns(v ...*WorkflowRun) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.RemoveWorkflowrunIDs(ids...) + return _u.RemoveWorkflowrunIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (wuo *WorkflowUpdateOne) ClearOrganization() *WorkflowUpdateOne { - wuo.mutation.ClearOrganization() - return wuo +func (_u *WorkflowUpdateOne) ClearOrganization() *WorkflowUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearContract clears the "contract" edge to the WorkflowContract entity. -func (wuo *WorkflowUpdateOne) ClearContract() *WorkflowUpdateOne { - wuo.mutation.ClearContract() - return wuo +func (_u *WorkflowUpdateOne) ClearContract() *WorkflowUpdateOne { + _u.mutation.ClearContract() + return _u } // ClearIntegrationAttachments clears all "integration_attachments" edges to the IntegrationAttachment entity. -func (wuo *WorkflowUpdateOne) ClearIntegrationAttachments() *WorkflowUpdateOne { - wuo.mutation.ClearIntegrationAttachments() - return wuo +func (_u *WorkflowUpdateOne) ClearIntegrationAttachments() *WorkflowUpdateOne { + _u.mutation.ClearIntegrationAttachments() + return _u } // RemoveIntegrationAttachmentIDs removes the "integration_attachments" edge to IntegrationAttachment entities by IDs. -func (wuo *WorkflowUpdateOne) RemoveIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.RemoveIntegrationAttachmentIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) RemoveIntegrationAttachmentIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.RemoveIntegrationAttachmentIDs(ids...) + return _u } // RemoveIntegrationAttachments removes "integration_attachments" edges to IntegrationAttachment entities. -func (wuo *WorkflowUpdateOne) RemoveIntegrationAttachments(i ...*IntegrationAttachment) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(i)) - for j := range i { - ids[j] = i[j].ID +func (_u *WorkflowUpdateOne) RemoveIntegrationAttachments(v ...*IntegrationAttachment) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.RemoveIntegrationAttachmentIDs(ids...) + return _u.RemoveIntegrationAttachmentIDs(ids...) } // ClearProject clears the "project" edge to the Project entity. -func (wuo *WorkflowUpdateOne) ClearProject() *WorkflowUpdateOne { - wuo.mutation.ClearProject() - return wuo +func (_u *WorkflowUpdateOne) ClearProject() *WorkflowUpdateOne { + _u.mutation.ClearProject() + return _u } // ClearLatestWorkflowRun clears the "latest_workflow_run" edge to the WorkflowRun entity. -func (wuo *WorkflowUpdateOne) ClearLatestWorkflowRun() *WorkflowUpdateOne { - wuo.mutation.ClearLatestWorkflowRun() - return wuo +func (_u *WorkflowUpdateOne) ClearLatestWorkflowRun() *WorkflowUpdateOne { + _u.mutation.ClearLatestWorkflowRun() + return _u } // ClearReferrers clears all "referrers" edges to the Referrer entity. -func (wuo *WorkflowUpdateOne) ClearReferrers() *WorkflowUpdateOne { - wuo.mutation.ClearReferrers() - return wuo +func (_u *WorkflowUpdateOne) ClearReferrers() *WorkflowUpdateOne { + _u.mutation.ClearReferrers() + return _u } // RemoveReferrerIDs removes the "referrers" edge to Referrer entities by IDs. -func (wuo *WorkflowUpdateOne) RemoveReferrerIDs(ids ...uuid.UUID) *WorkflowUpdateOne { - wuo.mutation.RemoveReferrerIDs(ids...) - return wuo +func (_u *WorkflowUpdateOne) RemoveReferrerIDs(ids ...uuid.UUID) *WorkflowUpdateOne { + _u.mutation.RemoveReferrerIDs(ids...) + return _u } // RemoveReferrers removes "referrers" edges to Referrer entities. -func (wuo *WorkflowUpdateOne) RemoveReferrers(r ...*Referrer) *WorkflowUpdateOne { - ids := make([]uuid.UUID, len(r)) - for i := range r { - ids[i] = r[i].ID +func (_u *WorkflowUpdateOne) RemoveReferrers(v ...*Referrer) *WorkflowUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wuo.RemoveReferrerIDs(ids...) + return _u.RemoveReferrerIDs(ids...) } // Where appends a list predicates to the WorkflowUpdate builder. -func (wuo *WorkflowUpdateOne) Where(ps ...predicate.Workflow) *WorkflowUpdateOne { - wuo.mutation.Where(ps...) - return wuo +func (_u *WorkflowUpdateOne) Where(ps ...predicate.Workflow) *WorkflowUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (wuo *WorkflowUpdateOne) Select(field string, fields ...string) *WorkflowUpdateOne { - wuo.fields = append([]string{field}, fields...) - return wuo +func (_u *WorkflowUpdateOne) Select(field string, fields ...string) *WorkflowUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated Workflow entity. -func (wuo *WorkflowUpdateOne) Save(ctx context.Context) (*Workflow, error) { - return withHooks(ctx, wuo.sqlSave, wuo.mutation, wuo.hooks) +func (_u *WorkflowUpdateOne) Save(ctx context.Context) (*Workflow, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wuo *WorkflowUpdateOne) SaveX(ctx context.Context) *Workflow { - node, err := wuo.Save(ctx) +func (_u *WorkflowUpdateOne) SaveX(ctx context.Context) *Workflow { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -1288,49 +1288,49 @@ func (wuo *WorkflowUpdateOne) SaveX(ctx context.Context) *Workflow { } // Exec executes the query on the entity. -func (wuo *WorkflowUpdateOne) Exec(ctx context.Context) error { - _, err := wuo.Save(ctx) +func (_u *WorkflowUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wuo *WorkflowUpdateOne) ExecX(ctx context.Context) { - if err := wuo.Exec(ctx); err != nil { +func (_u *WorkflowUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wuo *WorkflowUpdateOne) check() error { - if wuo.mutation.OrganizationCleared() && len(wuo.mutation.OrganizationIDs()) > 0 { +func (_u *WorkflowUpdateOne) check() error { + if _u.mutation.OrganizationCleared() && len(_u.mutation.OrganizationIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.organization"`) } - if wuo.mutation.ContractCleared() && len(wuo.mutation.ContractIDs()) > 0 { + if _u.mutation.ContractCleared() && len(_u.mutation.ContractIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.contract"`) } - if wuo.mutation.ProjectCleared() && len(wuo.mutation.ProjectIDs()) > 0 { + if _u.mutation.ProjectCleared() && len(_u.mutation.ProjectIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "Workflow.project"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wuo *WorkflowUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowUpdateOne { - wuo.modifiers = append(wuo.modifiers, modifiers...) - return wuo +func (_u *WorkflowUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err error) { - if err := wuo.check(); err != nil { +func (_u *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(workflow.Table, workflow.Columns, sqlgraph.NewFieldSpec(workflow.FieldID, field.TypeUUID)) - id, ok := wuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Workflow.id" for update`)} } _spec.Node.ID.Value = id - if fields := wuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflow.FieldID) for _, f := range fields { @@ -1342,56 +1342,56 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } } } - if ps := wuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wuo.mutation.ProjectOld(); ok { + if value, ok := _u.mutation.ProjectOld(); ok { _spec.SetField(workflow.FieldProjectOld, field.TypeString, value) } - if wuo.mutation.ProjectOldCleared() { + if _u.mutation.ProjectOldCleared() { _spec.ClearField(workflow.FieldProjectOld, field.TypeString) } - if value, ok := wuo.mutation.Team(); ok { + if value, ok := _u.mutation.Team(); ok { _spec.SetField(workflow.FieldTeam, field.TypeString, value) } - if wuo.mutation.TeamCleared() { + if _u.mutation.TeamCleared() { _spec.ClearField(workflow.FieldTeam, field.TypeString) } - if value, ok := wuo.mutation.RunsCount(); ok { + if value, ok := _u.mutation.RunsCount(); ok { _spec.SetField(workflow.FieldRunsCount, field.TypeInt, value) } - if value, ok := wuo.mutation.AddedRunsCount(); ok { + if value, ok := _u.mutation.AddedRunsCount(); ok { _spec.AddField(workflow.FieldRunsCount, field.TypeInt, value) } - if value, ok := wuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(workflow.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := wuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(workflow.FieldDeletedAt, field.TypeTime, value) } - if wuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(workflow.FieldDeletedAt, field.TypeTime) } - if value, ok := wuo.mutation.Public(); ok { + if value, ok := _u.mutation.Public(); ok { _spec.SetField(workflow.FieldPublic, field.TypeBool, value) } - if value, ok := wuo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(workflow.FieldDescription, field.TypeString, value) } - if wuo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(workflow.FieldDescription, field.TypeString) } - if value, ok := wuo.mutation.Metadata(); ok { + if value, ok := _u.mutation.Metadata(); ok { _spec.SetField(workflow.FieldMetadata, field.TypeJSON, value) } - if wuo.mutation.MetadataCleared() { + if _u.mutation.MetadataCleared() { _spec.ClearField(workflow.FieldMetadata, field.TypeJSON) } - if wuo.mutation.RobotaccountsCleared() { + if _u.mutation.RobotaccountsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1404,7 +1404,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.RemovedRobotaccountsIDs(); len(nodes) > 0 && !wuo.mutation.RobotaccountsCleared() { + if nodes := _u.mutation.RemovedRobotaccountsIDs(); len(nodes) > 0 && !_u.mutation.RobotaccountsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1420,7 +1420,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.RobotaccountsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.RobotaccountsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1436,7 +1436,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.WorkflowrunsCleared() { + if _u.mutation.WorkflowrunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1449,7 +1449,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.RemovedWorkflowrunsIDs(); len(nodes) > 0 && !wuo.mutation.WorkflowrunsCleared() { + if nodes := _u.mutation.RemovedWorkflowrunsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowrunsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1465,7 +1465,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.WorkflowrunsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowrunsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -1481,7 +1481,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1494,7 +1494,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1510,7 +1510,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.ContractCleared() { + if _u.mutation.ContractCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1523,7 +1523,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1539,7 +1539,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.IntegrationAttachmentsCleared() { + if _u.mutation.IntegrationAttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -1552,7 +1552,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.RemovedIntegrationAttachmentsIDs(); len(nodes) > 0 && !wuo.mutation.IntegrationAttachmentsCleared() { + if nodes := _u.mutation.RemovedIntegrationAttachmentsIDs(); len(nodes) > 0 && !_u.mutation.IntegrationAttachmentsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -1568,7 +1568,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.IntegrationAttachmentsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -1584,7 +1584,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.ProjectCleared() { + if _u.mutation.ProjectCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1597,7 +1597,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.ProjectIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ProjectIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1613,7 +1613,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.LatestWorkflowRunCleared() { + if _u.mutation.LatestWorkflowRunCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1626,7 +1626,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { + if nodes := _u.mutation.LatestWorkflowRunIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1642,7 +1642,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wuo.mutation.ReferrersCleared() { + if _u.mutation.ReferrersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -1655,7 +1655,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.RemovedReferrersIDs(); len(nodes) > 0 && !wuo.mutation.ReferrersCleared() { + if nodes := _u.mutation.RemovedReferrersIDs(); len(nodes) > 0 && !_u.mutation.ReferrersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -1671,7 +1671,7 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wuo.mutation.ReferrersIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ReferrersIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: true, @@ -1687,11 +1687,11 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wuo.modifiers...) - _node = &Workflow{config: wuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &Workflow{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, wuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflow.Label} } else if sqlgraph.IsConstraintError(err) { @@ -1699,6 +1699,6 @@ func (wuo *WorkflowUpdateOne) sqlSave(ctx context.Context) (_node *Workflow, err } return nil, err } - wuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/workflowcontract.go b/app/controlplane/pkg/data/ent/workflowcontract.go index ded8d5814..078c8cac4 100644 --- a/app/controlplane/pkg/data/ent/workflowcontract.go +++ b/app/controlplane/pkg/data/ent/workflowcontract.go @@ -105,7 +105,7 @@ func (*WorkflowContract) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the WorkflowContract fields. -func (wc *WorkflowContract) assignValues(columns []string, values []any) error { +func (_m *WorkflowContract) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -115,59 +115,59 @@ func (wc *WorkflowContract) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - wc.ID = *value + _m.ID = *value } case workflowcontract.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - wc.Name = value.String + _m.Name = value.String } case workflowcontract.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - wc.CreatedAt = value.Time + _m.CreatedAt = value.Time } case workflowcontract.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - wc.UpdatedAt = value.Time + _m.UpdatedAt = value.Time } case workflowcontract.FieldDeletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) } else if value.Valid { - wc.DeletedAt = value.Time + _m.DeletedAt = value.Time } case workflowcontract.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field description", values[i]) } else if value.Valid { - wc.Description = value.String + _m.Description = value.String } case workflowcontract.FieldScopedResourceType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field scoped_resource_type", values[i]) } else if value.Valid { - wc.ScopedResourceType = biz.ContractScope(value.String) + _m.ScopedResourceType = biz.ContractScope(value.String) } case workflowcontract.FieldScopedResourceID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field scoped_resource_id", values[i]) } else if value != nil { - wc.ScopedResourceID = *value + _m.ScopedResourceID = *value } case workflowcontract.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field organization_workflow_contracts", values[i]) } else if value.Valid { - wc.organization_workflow_contracts = new(uuid.UUID) - *wc.organization_workflow_contracts = *value.S.(*uuid.UUID) + _m.organization_workflow_contracts = new(uuid.UUID) + *_m.organization_workflow_contracts = *value.S.(*uuid.UUID) } default: - wc.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -175,68 +175,68 @@ func (wc *WorkflowContract) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the WorkflowContract. // This includes values selected through modifiers, order, etc. -func (wc *WorkflowContract) Value(name string) (ent.Value, error) { - return wc.selectValues.Get(name) +func (_m *WorkflowContract) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryVersions queries the "versions" edge of the WorkflowContract entity. -func (wc *WorkflowContract) QueryVersions() *WorkflowContractVersionQuery { - return NewWorkflowContractClient(wc.config).QueryVersions(wc) +func (_m *WorkflowContract) QueryVersions() *WorkflowContractVersionQuery { + return NewWorkflowContractClient(_m.config).QueryVersions(_m) } // QueryOrganization queries the "organization" edge of the WorkflowContract entity. -func (wc *WorkflowContract) QueryOrganization() *OrganizationQuery { - return NewWorkflowContractClient(wc.config).QueryOrganization(wc) +func (_m *WorkflowContract) QueryOrganization() *OrganizationQuery { + return NewWorkflowContractClient(_m.config).QueryOrganization(_m) } // QueryWorkflows queries the "workflows" edge of the WorkflowContract entity. -func (wc *WorkflowContract) QueryWorkflows() *WorkflowQuery { - return NewWorkflowContractClient(wc.config).QueryWorkflows(wc) +func (_m *WorkflowContract) QueryWorkflows() *WorkflowQuery { + return NewWorkflowContractClient(_m.config).QueryWorkflows(_m) } // Update returns a builder for updating this WorkflowContract. // Note that you need to call WorkflowContract.Unwrap() before calling this method if this WorkflowContract // was returned from a transaction, and the transaction was committed or rolled back. -func (wc *WorkflowContract) Update() *WorkflowContractUpdateOne { - return NewWorkflowContractClient(wc.config).UpdateOne(wc) +func (_m *WorkflowContract) Update() *WorkflowContractUpdateOne { + return NewWorkflowContractClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the WorkflowContract entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (wc *WorkflowContract) Unwrap() *WorkflowContract { - _tx, ok := wc.config.driver.(*txDriver) +func (_m *WorkflowContract) Unwrap() *WorkflowContract { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: WorkflowContract is not a transactional entity") } - wc.config.driver = _tx.drv - return wc + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (wc *WorkflowContract) String() string { +func (_m *WorkflowContract) String() string { var builder strings.Builder builder.WriteString("WorkflowContract(") - builder.WriteString(fmt.Sprintf("id=%v, ", wc.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("name=") - builder.WriteString(wc.Name) + builder.WriteString(_m.Name) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(wc.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("updated_at=") - builder.WriteString(wc.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("deleted_at=") - builder.WriteString(wc.DeletedAt.Format(time.ANSIC)) + builder.WriteString(_m.DeletedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("description=") - builder.WriteString(wc.Description) + builder.WriteString(_m.Description) builder.WriteString(", ") builder.WriteString("scoped_resource_type=") - builder.WriteString(fmt.Sprintf("%v", wc.ScopedResourceType)) + builder.WriteString(fmt.Sprintf("%v", _m.ScopedResourceType)) builder.WriteString(", ") builder.WriteString("scoped_resource_id=") - builder.WriteString(fmt.Sprintf("%v", wc.ScopedResourceID)) + builder.WriteString(fmt.Sprintf("%v", _m.ScopedResourceID)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/workflowcontract_create.go b/app/controlplane/pkg/data/ent/workflowcontract_create.go index e17de188a..60bbe77ee 100644 --- a/app/controlplane/pkg/data/ent/workflowcontract_create.go +++ b/app/controlplane/pkg/data/ent/workflowcontract_create.go @@ -29,172 +29,172 @@ type WorkflowContractCreate struct { } // SetName sets the "name" field. -func (wcc *WorkflowContractCreate) SetName(s string) *WorkflowContractCreate { - wcc.mutation.SetName(s) - return wcc +func (_c *WorkflowContractCreate) SetName(v string) *WorkflowContractCreate { + _c.mutation.SetName(v) + return _c } // SetCreatedAt sets the "created_at" field. -func (wcc *WorkflowContractCreate) SetCreatedAt(t time.Time) *WorkflowContractCreate { - wcc.mutation.SetCreatedAt(t) - return wcc +func (_c *WorkflowContractCreate) SetCreatedAt(v time.Time) *WorkflowContractCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableCreatedAt(t *time.Time) *WorkflowContractCreate { - if t != nil { - wcc.SetCreatedAt(*t) +func (_c *WorkflowContractCreate) SetNillableCreatedAt(v *time.Time) *WorkflowContractCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return wcc + return _c } // SetUpdatedAt sets the "updated_at" field. -func (wcc *WorkflowContractCreate) SetUpdatedAt(t time.Time) *WorkflowContractCreate { - wcc.mutation.SetUpdatedAt(t) - return wcc +func (_c *WorkflowContractCreate) SetUpdatedAt(v time.Time) *WorkflowContractCreate { + _c.mutation.SetUpdatedAt(v) + return _c } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableUpdatedAt(t *time.Time) *WorkflowContractCreate { - if t != nil { - wcc.SetUpdatedAt(*t) +func (_c *WorkflowContractCreate) SetNillableUpdatedAt(v *time.Time) *WorkflowContractCreate { + if v != nil { + _c.SetUpdatedAt(*v) } - return wcc + return _c } // SetDeletedAt sets the "deleted_at" field. -func (wcc *WorkflowContractCreate) SetDeletedAt(t time.Time) *WorkflowContractCreate { - wcc.mutation.SetDeletedAt(t) - return wcc +func (_c *WorkflowContractCreate) SetDeletedAt(v time.Time) *WorkflowContractCreate { + _c.mutation.SetDeletedAt(v) + return _c } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableDeletedAt(t *time.Time) *WorkflowContractCreate { - if t != nil { - wcc.SetDeletedAt(*t) +func (_c *WorkflowContractCreate) SetNillableDeletedAt(v *time.Time) *WorkflowContractCreate { + if v != nil { + _c.SetDeletedAt(*v) } - return wcc + return _c } // SetDescription sets the "description" field. -func (wcc *WorkflowContractCreate) SetDescription(s string) *WorkflowContractCreate { - wcc.mutation.SetDescription(s) - return wcc +func (_c *WorkflowContractCreate) SetDescription(v string) *WorkflowContractCreate { + _c.mutation.SetDescription(v) + return _c } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableDescription(s *string) *WorkflowContractCreate { - if s != nil { - wcc.SetDescription(*s) +func (_c *WorkflowContractCreate) SetNillableDescription(v *string) *WorkflowContractCreate { + if v != nil { + _c.SetDescription(*v) } - return wcc + return _c } // SetScopedResourceType sets the "scoped_resource_type" field. -func (wcc *WorkflowContractCreate) SetScopedResourceType(bs biz.ContractScope) *WorkflowContractCreate { - wcc.mutation.SetScopedResourceType(bs) - return wcc +func (_c *WorkflowContractCreate) SetScopedResourceType(v biz.ContractScope) *WorkflowContractCreate { + _c.mutation.SetScopedResourceType(v) + return _c } // SetNillableScopedResourceType sets the "scoped_resource_type" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableScopedResourceType(bs *biz.ContractScope) *WorkflowContractCreate { - if bs != nil { - wcc.SetScopedResourceType(*bs) +func (_c *WorkflowContractCreate) SetNillableScopedResourceType(v *biz.ContractScope) *WorkflowContractCreate { + if v != nil { + _c.SetScopedResourceType(*v) } - return wcc + return _c } // SetScopedResourceID sets the "scoped_resource_id" field. -func (wcc *WorkflowContractCreate) SetScopedResourceID(u uuid.UUID) *WorkflowContractCreate { - wcc.mutation.SetScopedResourceID(u) - return wcc +func (_c *WorkflowContractCreate) SetScopedResourceID(v uuid.UUID) *WorkflowContractCreate { + _c.mutation.SetScopedResourceID(v) + return _c } // SetNillableScopedResourceID sets the "scoped_resource_id" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableScopedResourceID(u *uuid.UUID) *WorkflowContractCreate { - if u != nil { - wcc.SetScopedResourceID(*u) +func (_c *WorkflowContractCreate) SetNillableScopedResourceID(v *uuid.UUID) *WorkflowContractCreate { + if v != nil { + _c.SetScopedResourceID(*v) } - return wcc + return _c } // SetID sets the "id" field. -func (wcc *WorkflowContractCreate) SetID(u uuid.UUID) *WorkflowContractCreate { - wcc.mutation.SetID(u) - return wcc +func (_c *WorkflowContractCreate) SetID(v uuid.UUID) *WorkflowContractCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableID(u *uuid.UUID) *WorkflowContractCreate { - if u != nil { - wcc.SetID(*u) +func (_c *WorkflowContractCreate) SetNillableID(v *uuid.UUID) *WorkflowContractCreate { + if v != nil { + _c.SetID(*v) } - return wcc + return _c } // AddVersionIDs adds the "versions" edge to the WorkflowContractVersion entity by IDs. -func (wcc *WorkflowContractCreate) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractCreate { - wcc.mutation.AddVersionIDs(ids...) - return wcc +func (_c *WorkflowContractCreate) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractCreate { + _c.mutation.AddVersionIDs(ids...) + return _c } // AddVersions adds the "versions" edges to the WorkflowContractVersion entity. -func (wcc *WorkflowContractCreate) AddVersions(w ...*WorkflowContractVersion) *WorkflowContractCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *WorkflowContractCreate) AddVersions(v ...*WorkflowContractVersion) *WorkflowContractCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcc.AddVersionIDs(ids...) + return _c.AddVersionIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (wcc *WorkflowContractCreate) SetOrganizationID(id uuid.UUID) *WorkflowContractCreate { - wcc.mutation.SetOrganizationID(id) - return wcc +func (_c *WorkflowContractCreate) SetOrganizationID(id uuid.UUID) *WorkflowContractCreate { + _c.mutation.SetOrganizationID(id) + return _c } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (wcc *WorkflowContractCreate) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractCreate { +func (_c *WorkflowContractCreate) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractCreate { if id != nil { - wcc = wcc.SetOrganizationID(*id) + _c = _c.SetOrganizationID(*id) } - return wcc + return _c } // SetOrganization sets the "organization" edge to the Organization entity. -func (wcc *WorkflowContractCreate) SetOrganization(o *Organization) *WorkflowContractCreate { - return wcc.SetOrganizationID(o.ID) +func (_c *WorkflowContractCreate) SetOrganization(v *Organization) *WorkflowContractCreate { + return _c.SetOrganizationID(v.ID) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (wcc *WorkflowContractCreate) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractCreate { - wcc.mutation.AddWorkflowIDs(ids...) - return wcc +func (_c *WorkflowContractCreate) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractCreate { + _c.mutation.AddWorkflowIDs(ids...) + return _c } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (wcc *WorkflowContractCreate) AddWorkflows(w ...*Workflow) *WorkflowContractCreate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_c *WorkflowContractCreate) AddWorkflows(v ...*Workflow) *WorkflowContractCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcc.AddWorkflowIDs(ids...) + return _c.AddWorkflowIDs(ids...) } // Mutation returns the WorkflowContractMutation object of the builder. -func (wcc *WorkflowContractCreate) Mutation() *WorkflowContractMutation { - return wcc.mutation +func (_c *WorkflowContractCreate) Mutation() *WorkflowContractMutation { + return _c.mutation } // Save creates the WorkflowContract in the database. -func (wcc *WorkflowContractCreate) Save(ctx context.Context) (*WorkflowContract, error) { - wcc.defaults() - return withHooks(ctx, wcc.sqlSave, wcc.mutation, wcc.hooks) +func (_c *WorkflowContractCreate) Save(ctx context.Context) (*WorkflowContract, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (wcc *WorkflowContractCreate) SaveX(ctx context.Context) *WorkflowContract { - v, err := wcc.Save(ctx) +func (_c *WorkflowContractCreate) SaveX(ctx context.Context) *WorkflowContract { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -202,46 +202,46 @@ func (wcc *WorkflowContractCreate) SaveX(ctx context.Context) *WorkflowContract } // Exec executes the query. -func (wcc *WorkflowContractCreate) Exec(ctx context.Context) error { - _, err := wcc.Save(ctx) +func (_c *WorkflowContractCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcc *WorkflowContractCreate) ExecX(ctx context.Context) { - if err := wcc.Exec(ctx); err != nil { +func (_c *WorkflowContractCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (wcc *WorkflowContractCreate) defaults() { - if _, ok := wcc.mutation.CreatedAt(); !ok { +func (_c *WorkflowContractCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := workflowcontract.DefaultCreatedAt() - wcc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := wcc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { v := workflowcontract.DefaultUpdatedAt() - wcc.mutation.SetUpdatedAt(v) + _c.mutation.SetUpdatedAt(v) } - if _, ok := wcc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := workflowcontract.DefaultID() - wcc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (wcc *WorkflowContractCreate) check() error { - if _, ok := wcc.mutation.Name(); !ok { +func (_c *WorkflowContractCreate) check() error { + if _, ok := _c.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "WorkflowContract.name"`)} } - if _, ok := wcc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "WorkflowContract.created_at"`)} } - if _, ok := wcc.mutation.UpdatedAt(); !ok { + if _, ok := _c.mutation.UpdatedAt(); !ok { return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "WorkflowContract.updated_at"`)} } - if v, ok := wcc.mutation.ScopedResourceType(); ok { + if v, ok := _c.mutation.ScopedResourceType(); ok { if err := workflowcontract.ScopedResourceTypeValidator(v); err != nil { return &ValidationError{Name: "scoped_resource_type", err: fmt.Errorf(`ent: validator failed for field "WorkflowContract.scoped_resource_type": %w`, err)} } @@ -249,12 +249,12 @@ func (wcc *WorkflowContractCreate) check() error { return nil } -func (wcc *WorkflowContractCreate) sqlSave(ctx context.Context) (*WorkflowContract, error) { - if err := wcc.check(); err != nil { +func (_c *WorkflowContractCreate) sqlSave(ctx context.Context) (*WorkflowContract, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := wcc.createSpec() - if err := sqlgraph.CreateNode(ctx, wcc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -267,50 +267,50 @@ func (wcc *WorkflowContractCreate) sqlSave(ctx context.Context) (*WorkflowContra return nil, err } } - wcc.mutation.id = &_node.ID - wcc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (wcc *WorkflowContractCreate) createSpec() (*WorkflowContract, *sqlgraph.CreateSpec) { +func (_c *WorkflowContractCreate) createSpec() (*WorkflowContract, *sqlgraph.CreateSpec) { var ( - _node = &WorkflowContract{config: wcc.config} + _node = &WorkflowContract{config: _c.config} _spec = sqlgraph.NewCreateSpec(workflowcontract.Table, sqlgraph.NewFieldSpec(workflowcontract.FieldID, field.TypeUUID)) ) - _spec.OnConflict = wcc.conflict - if id, ok := wcc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := wcc.mutation.Name(); ok { + if value, ok := _c.mutation.Name(); ok { _spec.SetField(workflowcontract.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := wcc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(workflowcontract.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := wcc.mutation.UpdatedAt(); ok { + if value, ok := _c.mutation.UpdatedAt(); ok { _spec.SetField(workflowcontract.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = value } - if value, ok := wcc.mutation.DeletedAt(); ok { + if value, ok := _c.mutation.DeletedAt(); ok { _spec.SetField(workflowcontract.FieldDeletedAt, field.TypeTime, value) _node.DeletedAt = value } - if value, ok := wcc.mutation.Description(); ok { + if value, ok := _c.mutation.Description(); ok { _spec.SetField(workflowcontract.FieldDescription, field.TypeString, value) _node.Description = value } - if value, ok := wcc.mutation.ScopedResourceType(); ok { + if value, ok := _c.mutation.ScopedResourceType(); ok { _spec.SetField(workflowcontract.FieldScopedResourceType, field.TypeEnum, value) _node.ScopedResourceType = value } - if value, ok := wcc.mutation.ScopedResourceID(); ok { + if value, ok := _c.mutation.ScopedResourceID(); ok { _spec.SetField(workflowcontract.FieldScopedResourceID, field.TypeUUID, value) _node.ScopedResourceID = value } - if nodes := wcc.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -326,7 +326,7 @@ func (wcc *WorkflowContractCreate) createSpec() (*WorkflowContract, *sqlgraph.Cr } _spec.Edges = append(_spec.Edges, edge) } - if nodes := wcc.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _c.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -343,7 +343,7 @@ func (wcc *WorkflowContractCreate) createSpec() (*WorkflowContract, *sqlgraph.Cr _node.organization_workflow_contracts = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wcc.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -378,10 +378,10 @@ func (wcc *WorkflowContractCreate) createSpec() (*WorkflowContract, *sqlgraph.Cr // SetName(v+v). // }). // Exec(ctx) -func (wcc *WorkflowContractCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowContractUpsertOne { - wcc.conflict = opts +func (_c *WorkflowContractCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowContractUpsertOne { + _c.conflict = opts return &WorkflowContractUpsertOne{ - create: wcc, + create: _c, } } @@ -391,10 +391,10 @@ func (wcc *WorkflowContractCreate) OnConflict(opts ...sql.ConflictOption) *Workf // client.WorkflowContract.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wcc *WorkflowContractCreate) OnConflictColumns(columns ...string) *WorkflowContractUpsertOne { - wcc.conflict = append(wcc.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowContractCreate) OnConflictColumns(columns ...string) *WorkflowContractUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowContractUpsertOne{ - create: wcc, + create: _c, } } @@ -694,16 +694,16 @@ type WorkflowContractCreateBulk struct { } // Save creates the WorkflowContract entities in the database. -func (wccb *WorkflowContractCreateBulk) Save(ctx context.Context) ([]*WorkflowContract, error) { - if wccb.err != nil { - return nil, wccb.err - } - specs := make([]*sqlgraph.CreateSpec, len(wccb.builders)) - nodes := make([]*WorkflowContract, len(wccb.builders)) - mutators := make([]Mutator, len(wccb.builders)) - for i := range wccb.builders { +func (_c *WorkflowContractCreateBulk) Save(ctx context.Context) ([]*WorkflowContract, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*WorkflowContract, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := wccb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*WorkflowContractMutation) @@ -717,12 +717,12 @@ func (wccb *WorkflowContractCreateBulk) Save(ctx context.Context) ([]*WorkflowCo var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, wccb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = wccb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, wccb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -742,7 +742,7 @@ func (wccb *WorkflowContractCreateBulk) Save(ctx context.Context) ([]*WorkflowCo }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, wccb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -750,8 +750,8 @@ func (wccb *WorkflowContractCreateBulk) Save(ctx context.Context) ([]*WorkflowCo } // SaveX is like Save, but panics if an error occurs. -func (wccb *WorkflowContractCreateBulk) SaveX(ctx context.Context) []*WorkflowContract { - v, err := wccb.Save(ctx) +func (_c *WorkflowContractCreateBulk) SaveX(ctx context.Context) []*WorkflowContract { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -759,14 +759,14 @@ func (wccb *WorkflowContractCreateBulk) SaveX(ctx context.Context) []*WorkflowCo } // Exec executes the query. -func (wccb *WorkflowContractCreateBulk) Exec(ctx context.Context) error { - _, err := wccb.Save(ctx) +func (_c *WorkflowContractCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wccb *WorkflowContractCreateBulk) ExecX(ctx context.Context) { - if err := wccb.Exec(ctx); err != nil { +func (_c *WorkflowContractCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -786,10 +786,10 @@ func (wccb *WorkflowContractCreateBulk) ExecX(ctx context.Context) { // SetName(v+v). // }). // Exec(ctx) -func (wccb *WorkflowContractCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowContractUpsertBulk { - wccb.conflict = opts +func (_c *WorkflowContractCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowContractUpsertBulk { + _c.conflict = opts return &WorkflowContractUpsertBulk{ - create: wccb, + create: _c, } } @@ -799,10 +799,10 @@ func (wccb *WorkflowContractCreateBulk) OnConflict(opts ...sql.ConflictOption) * // client.WorkflowContract.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wccb *WorkflowContractCreateBulk) OnConflictColumns(columns ...string) *WorkflowContractUpsertBulk { - wccb.conflict = append(wccb.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowContractCreateBulk) OnConflictColumns(columns ...string) *WorkflowContractUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowContractUpsertBulk{ - create: wccb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/workflowcontract_delete.go b/app/controlplane/pkg/data/ent/workflowcontract_delete.go index 11437e98e..154696e6b 100644 --- a/app/controlplane/pkg/data/ent/workflowcontract_delete.go +++ b/app/controlplane/pkg/data/ent/workflowcontract_delete.go @@ -20,56 +20,56 @@ type WorkflowContractDelete struct { } // Where appends a list predicates to the WorkflowContractDelete builder. -func (wcd *WorkflowContractDelete) Where(ps ...predicate.WorkflowContract) *WorkflowContractDelete { - wcd.mutation.Where(ps...) - return wcd +func (_d *WorkflowContractDelete) Where(ps ...predicate.WorkflowContract) *WorkflowContractDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (wcd *WorkflowContractDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, wcd.sqlExec, wcd.mutation, wcd.hooks) +func (_d *WorkflowContractDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (wcd *WorkflowContractDelete) ExecX(ctx context.Context) int { - n, err := wcd.Exec(ctx) +func (_d *WorkflowContractDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (wcd *WorkflowContractDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *WorkflowContractDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(workflowcontract.Table, sqlgraph.NewFieldSpec(workflowcontract.FieldID, field.TypeUUID)) - if ps := wcd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, wcd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - wcd.mutation.done = true + _d.mutation.done = true return affected, err } // WorkflowContractDeleteOne is the builder for deleting a single WorkflowContract entity. type WorkflowContractDeleteOne struct { - wcd *WorkflowContractDelete + _d *WorkflowContractDelete } // Where appends a list predicates to the WorkflowContractDelete builder. -func (wcdo *WorkflowContractDeleteOne) Where(ps ...predicate.WorkflowContract) *WorkflowContractDeleteOne { - wcdo.wcd.mutation.Where(ps...) - return wcdo +func (_d *WorkflowContractDeleteOne) Where(ps ...predicate.WorkflowContract) *WorkflowContractDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (wcdo *WorkflowContractDeleteOne) Exec(ctx context.Context) error { - n, err := wcdo.wcd.Exec(ctx) +func (_d *WorkflowContractDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (wcdo *WorkflowContractDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (wcdo *WorkflowContractDeleteOne) ExecX(ctx context.Context) { - if err := wcdo.Exec(ctx); err != nil { +func (_d *WorkflowContractDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/workflowcontract_query.go b/app/controlplane/pkg/data/ent/workflowcontract_query.go index 1d2e2652c..7a2969b82 100644 --- a/app/controlplane/pkg/data/ent/workflowcontract_query.go +++ b/app/controlplane/pkg/data/ent/workflowcontract_query.go @@ -39,44 +39,44 @@ type WorkflowContractQuery struct { } // Where adds a new predicate for the WorkflowContractQuery builder. -func (wcq *WorkflowContractQuery) Where(ps ...predicate.WorkflowContract) *WorkflowContractQuery { - wcq.predicates = append(wcq.predicates, ps...) - return wcq +func (_q *WorkflowContractQuery) Where(ps ...predicate.WorkflowContract) *WorkflowContractQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (wcq *WorkflowContractQuery) Limit(limit int) *WorkflowContractQuery { - wcq.ctx.Limit = &limit - return wcq +func (_q *WorkflowContractQuery) Limit(limit int) *WorkflowContractQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (wcq *WorkflowContractQuery) Offset(offset int) *WorkflowContractQuery { - wcq.ctx.Offset = &offset - return wcq +func (_q *WorkflowContractQuery) Offset(offset int) *WorkflowContractQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (wcq *WorkflowContractQuery) Unique(unique bool) *WorkflowContractQuery { - wcq.ctx.Unique = &unique - return wcq +func (_q *WorkflowContractQuery) Unique(unique bool) *WorkflowContractQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (wcq *WorkflowContractQuery) Order(o ...workflowcontract.OrderOption) *WorkflowContractQuery { - wcq.order = append(wcq.order, o...) - return wcq +func (_q *WorkflowContractQuery) Order(o ...workflowcontract.OrderOption) *WorkflowContractQuery { + _q.order = append(_q.order, o...) + return _q } // QueryVersions chains the current query on the "versions" edge. -func (wcq *WorkflowContractQuery) QueryVersions() *WorkflowContractVersionQuery { - query := (&WorkflowContractVersionClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) QueryVersions() *WorkflowContractVersionQuery { + query := (&WorkflowContractVersionClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wcq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wcq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -85,20 +85,20 @@ func (wcq *WorkflowContractQuery) QueryVersions() *WorkflowContractVersionQuery sqlgraph.To(workflowcontractversion.Table, workflowcontractversion.FieldID), sqlgraph.Edge(sqlgraph.O2M, false, workflowcontract.VersionsTable, workflowcontract.VersionsColumn), ) - fromU = sqlgraph.SetNeighbors(wcq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryOrganization chains the current query on the "organization" edge. -func (wcq *WorkflowContractQuery) QueryOrganization() *OrganizationQuery { - query := (&OrganizationClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) QueryOrganization() *OrganizationQuery { + query := (&OrganizationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wcq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wcq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -107,20 +107,20 @@ func (wcq *WorkflowContractQuery) QueryOrganization() *OrganizationQuery { sqlgraph.To(organization.Table, organization.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowcontract.OrganizationTable, workflowcontract.OrganizationColumn), ) - fromU = sqlgraph.SetNeighbors(wcq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryWorkflows chains the current query on the "workflows" edge. -func (wcq *WorkflowContractQuery) QueryWorkflows() *WorkflowQuery { - query := (&WorkflowClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) QueryWorkflows() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wcq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wcq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -129,7 +129,7 @@ func (wcq *WorkflowContractQuery) QueryWorkflows() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.O2M, true, workflowcontract.WorkflowsTable, workflowcontract.WorkflowsColumn), ) - fromU = sqlgraph.SetNeighbors(wcq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -137,8 +137,8 @@ func (wcq *WorkflowContractQuery) QueryWorkflows() *WorkflowQuery { // First returns the first WorkflowContract entity from the query. // Returns a *NotFoundError when no WorkflowContract was found. -func (wcq *WorkflowContractQuery) First(ctx context.Context) (*WorkflowContract, error) { - nodes, err := wcq.Limit(1).All(setContextOp(ctx, wcq.ctx, ent.OpQueryFirst)) +func (_q *WorkflowContractQuery) First(ctx context.Context) (*WorkflowContract, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -149,8 +149,8 @@ func (wcq *WorkflowContractQuery) First(ctx context.Context) (*WorkflowContract, } // FirstX is like First, but panics if an error occurs. -func (wcq *WorkflowContractQuery) FirstX(ctx context.Context) *WorkflowContract { - node, err := wcq.First(ctx) +func (_q *WorkflowContractQuery) FirstX(ctx context.Context) *WorkflowContract { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -159,9 +159,9 @@ func (wcq *WorkflowContractQuery) FirstX(ctx context.Context) *WorkflowContract // FirstID returns the first WorkflowContract ID from the query. // Returns a *NotFoundError when no WorkflowContract ID was found. -func (wcq *WorkflowContractQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowContractQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wcq.Limit(1).IDs(setContextOp(ctx, wcq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -172,8 +172,8 @@ func (wcq *WorkflowContractQuery) FirstID(ctx context.Context) (id uuid.UUID, er } // FirstIDX is like FirstID, but panics if an error occurs. -func (wcq *WorkflowContractQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := wcq.FirstID(ctx) +func (_q *WorkflowContractQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -183,8 +183,8 @@ func (wcq *WorkflowContractQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single WorkflowContract entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one WorkflowContract entity is found. // Returns a *NotFoundError when no WorkflowContract entities are found. -func (wcq *WorkflowContractQuery) Only(ctx context.Context) (*WorkflowContract, error) { - nodes, err := wcq.Limit(2).All(setContextOp(ctx, wcq.ctx, ent.OpQueryOnly)) +func (_q *WorkflowContractQuery) Only(ctx context.Context) (*WorkflowContract, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -199,8 +199,8 @@ func (wcq *WorkflowContractQuery) Only(ctx context.Context) (*WorkflowContract, } // OnlyX is like Only, but panics if an error occurs. -func (wcq *WorkflowContractQuery) OnlyX(ctx context.Context) *WorkflowContract { - node, err := wcq.Only(ctx) +func (_q *WorkflowContractQuery) OnlyX(ctx context.Context) *WorkflowContract { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -210,9 +210,9 @@ func (wcq *WorkflowContractQuery) OnlyX(ctx context.Context) *WorkflowContract { // OnlyID is like Only, but returns the only WorkflowContract ID in the query. // Returns a *NotSingularError when more than one WorkflowContract ID is found. // Returns a *NotFoundError when no entities are found. -func (wcq *WorkflowContractQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowContractQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wcq.Limit(2).IDs(setContextOp(ctx, wcq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -227,8 +227,8 @@ func (wcq *WorkflowContractQuery) OnlyID(ctx context.Context) (id uuid.UUID, err } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (wcq *WorkflowContractQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := wcq.OnlyID(ctx) +func (_q *WorkflowContractQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -236,18 +236,18 @@ func (wcq *WorkflowContractQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of WorkflowContracts. -func (wcq *WorkflowContractQuery) All(ctx context.Context) ([]*WorkflowContract, error) { - ctx = setContextOp(ctx, wcq.ctx, ent.OpQueryAll) - if err := wcq.prepareQuery(ctx); err != nil { +func (_q *WorkflowContractQuery) All(ctx context.Context) ([]*WorkflowContract, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*WorkflowContract, *WorkflowContractQuery]() - return withInterceptors[[]*WorkflowContract](ctx, wcq, qr, wcq.inters) + return withInterceptors[[]*WorkflowContract](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (wcq *WorkflowContractQuery) AllX(ctx context.Context) []*WorkflowContract { - nodes, err := wcq.All(ctx) +func (_q *WorkflowContractQuery) AllX(ctx context.Context) []*WorkflowContract { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -255,20 +255,20 @@ func (wcq *WorkflowContractQuery) AllX(ctx context.Context) []*WorkflowContract } // IDs executes the query and returns a list of WorkflowContract IDs. -func (wcq *WorkflowContractQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if wcq.ctx.Unique == nil && wcq.path != nil { - wcq.Unique(true) +func (_q *WorkflowContractQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, wcq.ctx, ent.OpQueryIDs) - if err = wcq.Select(workflowcontract.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(workflowcontract.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (wcq *WorkflowContractQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := wcq.IDs(ctx) +func (_q *WorkflowContractQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -276,17 +276,17 @@ func (wcq *WorkflowContractQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (wcq *WorkflowContractQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, wcq.ctx, ent.OpQueryCount) - if err := wcq.prepareQuery(ctx); err != nil { +func (_q *WorkflowContractQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, wcq, querierCount[*WorkflowContractQuery](), wcq.inters) + return withInterceptors[int](ctx, _q, querierCount[*WorkflowContractQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (wcq *WorkflowContractQuery) CountX(ctx context.Context) int { - count, err := wcq.Count(ctx) +func (_q *WorkflowContractQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -294,9 +294,9 @@ func (wcq *WorkflowContractQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (wcq *WorkflowContractQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, wcq.ctx, ent.OpQueryExist) - switch _, err := wcq.FirstID(ctx); { +func (_q *WorkflowContractQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -307,8 +307,8 @@ func (wcq *WorkflowContractQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (wcq *WorkflowContractQuery) ExistX(ctx context.Context) bool { - exist, err := wcq.Exist(ctx) +func (_q *WorkflowContractQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -317,57 +317,57 @@ func (wcq *WorkflowContractQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the WorkflowContractQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (wcq *WorkflowContractQuery) Clone() *WorkflowContractQuery { - if wcq == nil { +func (_q *WorkflowContractQuery) Clone() *WorkflowContractQuery { + if _q == nil { return nil } return &WorkflowContractQuery{ - config: wcq.config, - ctx: wcq.ctx.Clone(), - order: append([]workflowcontract.OrderOption{}, wcq.order...), - inters: append([]Interceptor{}, wcq.inters...), - predicates: append([]predicate.WorkflowContract{}, wcq.predicates...), - withVersions: wcq.withVersions.Clone(), - withOrganization: wcq.withOrganization.Clone(), - withWorkflows: wcq.withWorkflows.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]workflowcontract.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.WorkflowContract{}, _q.predicates...), + withVersions: _q.withVersions.Clone(), + withOrganization: _q.withOrganization.Clone(), + withWorkflows: _q.withWorkflows.Clone(), // clone intermediate query. - sql: wcq.sql.Clone(), - path: wcq.path, - modifiers: append([]func(*sql.Selector){}, wcq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithVersions tells the query-builder to eager-load the nodes that are connected to // the "versions" edge. The optional arguments are used to configure the query builder of the edge. -func (wcq *WorkflowContractQuery) WithVersions(opts ...func(*WorkflowContractVersionQuery)) *WorkflowContractQuery { - query := (&WorkflowContractVersionClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) WithVersions(opts ...func(*WorkflowContractVersionQuery)) *WorkflowContractQuery { + query := (&WorkflowContractVersionClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wcq.withVersions = query - return wcq + _q.withVersions = query + return _q } // WithOrganization tells the query-builder to eager-load the nodes that are connected to // the "organization" edge. The optional arguments are used to configure the query builder of the edge. -func (wcq *WorkflowContractQuery) WithOrganization(opts ...func(*OrganizationQuery)) *WorkflowContractQuery { - query := (&OrganizationClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) WithOrganization(opts ...func(*OrganizationQuery)) *WorkflowContractQuery { + query := (&OrganizationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wcq.withOrganization = query - return wcq + _q.withOrganization = query + return _q } // WithWorkflows tells the query-builder to eager-load the nodes that are connected to // the "workflows" edge. The optional arguments are used to configure the query builder of the edge. -func (wcq *WorkflowContractQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *WorkflowContractQuery { - query := (&WorkflowClient{config: wcq.config}).Query() +func (_q *WorkflowContractQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *WorkflowContractQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wcq.withWorkflows = query - return wcq + _q.withWorkflows = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -384,10 +384,10 @@ func (wcq *WorkflowContractQuery) WithWorkflows(opts ...func(*WorkflowQuery)) *W // GroupBy(workflowcontract.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (wcq *WorkflowContractQuery) GroupBy(field string, fields ...string) *WorkflowContractGroupBy { - wcq.ctx.Fields = append([]string{field}, fields...) - grbuild := &WorkflowContractGroupBy{build: wcq} - grbuild.flds = &wcq.ctx.Fields +func (_q *WorkflowContractQuery) GroupBy(field string, fields ...string) *WorkflowContractGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &WorkflowContractGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = workflowcontract.Label grbuild.scan = grbuild.Scan return grbuild @@ -405,57 +405,57 @@ func (wcq *WorkflowContractQuery) GroupBy(field string, fields ...string) *Workf // client.WorkflowContract.Query(). // Select(workflowcontract.FieldName). // Scan(ctx, &v) -func (wcq *WorkflowContractQuery) Select(fields ...string) *WorkflowContractSelect { - wcq.ctx.Fields = append(wcq.ctx.Fields, fields...) - sbuild := &WorkflowContractSelect{WorkflowContractQuery: wcq} +func (_q *WorkflowContractQuery) Select(fields ...string) *WorkflowContractSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &WorkflowContractSelect{WorkflowContractQuery: _q} sbuild.label = workflowcontract.Label - sbuild.flds, sbuild.scan = &wcq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a WorkflowContractSelect configured with the given aggregations. -func (wcq *WorkflowContractQuery) Aggregate(fns ...AggregateFunc) *WorkflowContractSelect { - return wcq.Select().Aggregate(fns...) +func (_q *WorkflowContractQuery) Aggregate(fns ...AggregateFunc) *WorkflowContractSelect { + return _q.Select().Aggregate(fns...) } -func (wcq *WorkflowContractQuery) prepareQuery(ctx context.Context) error { - for _, inter := range wcq.inters { +func (_q *WorkflowContractQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, wcq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range wcq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !workflowcontract.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if wcq.path != nil { - prev, err := wcq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - wcq.sql = prev + _q.sql = prev } return nil } -func (wcq *WorkflowContractQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowContract, error) { +func (_q *WorkflowContractQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowContract, error) { var ( nodes = []*WorkflowContract{} - withFKs = wcq.withFKs - _spec = wcq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [3]bool{ - wcq.withVersions != nil, - wcq.withOrganization != nil, - wcq.withWorkflows != nil, + _q.withVersions != nil, + _q.withOrganization != nil, + _q.withWorkflows != nil, } ) - if wcq.withOrganization != nil { + if _q.withOrganization != nil { withFKs = true } if withFKs { @@ -465,38 +465,38 @@ func (wcq *WorkflowContractQuery) sqlAll(ctx context.Context, hooks ...queryHook return (*WorkflowContract).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &WorkflowContract{config: wcq.config} + node := &WorkflowContract{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(wcq.modifiers) > 0 { - _spec.Modifiers = wcq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, wcq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := wcq.withVersions; query != nil { - if err := wcq.loadVersions(ctx, query, nodes, + if query := _q.withVersions; query != nil { + if err := _q.loadVersions(ctx, query, nodes, func(n *WorkflowContract) { n.Edges.Versions = []*WorkflowContractVersion{} }, func(n *WorkflowContract, e *WorkflowContractVersion) { n.Edges.Versions = append(n.Edges.Versions, e) }); err != nil { return nil, err } } - if query := wcq.withOrganization; query != nil { - if err := wcq.loadOrganization(ctx, query, nodes, nil, + if query := _q.withOrganization; query != nil { + if err := _q.loadOrganization(ctx, query, nodes, nil, func(n *WorkflowContract, e *Organization) { n.Edges.Organization = e }); err != nil { return nil, err } } - if query := wcq.withWorkflows; query != nil { - if err := wcq.loadWorkflows(ctx, query, nodes, + if query := _q.withWorkflows; query != nil { + if err := _q.loadWorkflows(ctx, query, nodes, func(n *WorkflowContract) { n.Edges.Workflows = []*Workflow{} }, func(n *WorkflowContract, e *Workflow) { n.Edges.Workflows = append(n.Edges.Workflows, e) }); err != nil { return nil, err @@ -505,7 +505,7 @@ func (wcq *WorkflowContractQuery) sqlAll(ctx context.Context, hooks ...queryHook return nodes, nil } -func (wcq *WorkflowContractQuery) loadVersions(ctx context.Context, query *WorkflowContractVersionQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *WorkflowContractVersion)) error { +func (_q *WorkflowContractQuery) loadVersions(ctx context.Context, query *WorkflowContractVersionQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *WorkflowContractVersion)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*WorkflowContract) for i := range nodes { @@ -536,7 +536,7 @@ func (wcq *WorkflowContractQuery) loadVersions(ctx context.Context, query *Workf } return nil } -func (wcq *WorkflowContractQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *Organization)) error { +func (_q *WorkflowContractQuery) loadOrganization(ctx context.Context, query *OrganizationQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *Organization)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*WorkflowContract) for i := range nodes { @@ -568,7 +568,7 @@ func (wcq *WorkflowContractQuery) loadOrganization(ctx context.Context, query *O } return nil } -func (wcq *WorkflowContractQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *Workflow)) error { +func (_q *WorkflowContractQuery) loadWorkflows(ctx context.Context, query *WorkflowQuery, nodes []*WorkflowContract, init func(*WorkflowContract), assign func(*WorkflowContract, *Workflow)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*WorkflowContract) for i := range nodes { @@ -600,27 +600,27 @@ func (wcq *WorkflowContractQuery) loadWorkflows(ctx context.Context, query *Work return nil } -func (wcq *WorkflowContractQuery) sqlCount(ctx context.Context) (int, error) { - _spec := wcq.querySpec() - if len(wcq.modifiers) > 0 { - _spec.Modifiers = wcq.modifiers +func (_q *WorkflowContractQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = wcq.ctx.Fields - if len(wcq.ctx.Fields) > 0 { - _spec.Unique = wcq.ctx.Unique != nil && *wcq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, wcq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (wcq *WorkflowContractQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *WorkflowContractQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(workflowcontract.Table, workflowcontract.Columns, sqlgraph.NewFieldSpec(workflowcontract.FieldID, field.TypeUUID)) - _spec.From = wcq.sql - if unique := wcq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if wcq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := wcq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowcontract.FieldID) for i := range fields { @@ -629,20 +629,20 @@ func (wcq *WorkflowContractQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := wcq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := wcq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := wcq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := wcq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -652,36 +652,36 @@ func (wcq *WorkflowContractQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (wcq *WorkflowContractQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(wcq.driver.Dialect()) +func (_q *WorkflowContractQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(workflowcontract.Table) - columns := wcq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = workflowcontract.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if wcq.sql != nil { - selector = wcq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if wcq.ctx.Unique != nil && *wcq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range wcq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range wcq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range wcq.order { + for _, p := range _q.order { p(selector) } - if offset := wcq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := wcq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -690,33 +690,33 @@ func (wcq *WorkflowContractQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (wcq *WorkflowContractQuery) ForUpdate(opts ...sql.LockOption) *WorkflowContractQuery { - if wcq.driver.Dialect() == dialect.Postgres { - wcq.Unique(false) +func (_q *WorkflowContractQuery) ForUpdate(opts ...sql.LockOption) *WorkflowContractQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wcq.modifiers = append(wcq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return wcq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (wcq *WorkflowContractQuery) ForShare(opts ...sql.LockOption) *WorkflowContractQuery { - if wcq.driver.Dialect() == dialect.Postgres { - wcq.Unique(false) +func (_q *WorkflowContractQuery) ForShare(opts ...sql.LockOption) *WorkflowContractQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wcq.modifiers = append(wcq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return wcq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (wcq *WorkflowContractQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractSelect { - wcq.modifiers = append(wcq.modifiers, modifiers...) - return wcq.Select() +func (_q *WorkflowContractQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // WorkflowContractGroupBy is the group-by builder for WorkflowContract entities. @@ -726,41 +726,41 @@ type WorkflowContractGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (wcgb *WorkflowContractGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowContractGroupBy { - wcgb.fns = append(wcgb.fns, fns...) - return wcgb +func (_g *WorkflowContractGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowContractGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (wcgb *WorkflowContractGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wcgb.build.ctx, ent.OpQueryGroupBy) - if err := wcgb.build.prepareQuery(ctx); err != nil { +func (_g *WorkflowContractGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowContractQuery, *WorkflowContractGroupBy](ctx, wcgb.build, wcgb, wcgb.build.inters, v) + return scanWithInterceptors[*WorkflowContractQuery, *WorkflowContractGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (wcgb *WorkflowContractGroupBy) sqlScan(ctx context.Context, root *WorkflowContractQuery, v any) error { +func (_g *WorkflowContractGroupBy) sqlScan(ctx context.Context, root *WorkflowContractQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(wcgb.fns)) - for _, fn := range wcgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*wcgb.flds)+len(wcgb.fns)) - for _, f := range *wcgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*wcgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := wcgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -774,27 +774,27 @@ type WorkflowContractSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (wcs *WorkflowContractSelect) Aggregate(fns ...AggregateFunc) *WorkflowContractSelect { - wcs.fns = append(wcs.fns, fns...) - return wcs +func (_s *WorkflowContractSelect) Aggregate(fns ...AggregateFunc) *WorkflowContractSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (wcs *WorkflowContractSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wcs.ctx, ent.OpQuerySelect) - if err := wcs.prepareQuery(ctx); err != nil { +func (_s *WorkflowContractSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowContractQuery, *WorkflowContractSelect](ctx, wcs.WorkflowContractQuery, wcs, wcs.inters, v) + return scanWithInterceptors[*WorkflowContractQuery, *WorkflowContractSelect](ctx, _s.WorkflowContractQuery, _s, _s.inters, v) } -func (wcs *WorkflowContractSelect) sqlScan(ctx context.Context, root *WorkflowContractQuery, v any) error { +func (_s *WorkflowContractSelect) sqlScan(ctx context.Context, root *WorkflowContractQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(wcs.fns)) - for _, fn := range wcs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*wcs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -802,7 +802,7 @@ func (wcs *WorkflowContractSelect) sqlScan(ctx context.Context, root *WorkflowCo } rows := &sql.Rows{} query, args := selector.Query() - if err := wcs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -810,7 +810,7 @@ func (wcs *WorkflowContractSelect) sqlScan(ctx context.Context, root *WorkflowCo } // Modify adds a query modifier for attaching custom logic to queries. -func (wcs *WorkflowContractSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractSelect { - wcs.modifiers = append(wcs.modifiers, modifiers...) - return wcs +func (_s *WorkflowContractSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/workflowcontract_update.go b/app/controlplane/pkg/data/ent/workflowcontract_update.go index ee5bdcf2e..61e369803 100644 --- a/app/controlplane/pkg/data/ent/workflowcontract_update.go +++ b/app/controlplane/pkg/data/ent/workflowcontract_update.go @@ -29,215 +29,215 @@ type WorkflowContractUpdate struct { } // Where appends a list predicates to the WorkflowContractUpdate builder. -func (wcu *WorkflowContractUpdate) Where(ps ...predicate.WorkflowContract) *WorkflowContractUpdate { - wcu.mutation.Where(ps...) - return wcu +func (_u *WorkflowContractUpdate) Where(ps ...predicate.WorkflowContract) *WorkflowContractUpdate { + _u.mutation.Where(ps...) + return _u } // SetUpdatedAt sets the "updated_at" field. -func (wcu *WorkflowContractUpdate) SetUpdatedAt(t time.Time) *WorkflowContractUpdate { - wcu.mutation.SetUpdatedAt(t) - return wcu +func (_u *WorkflowContractUpdate) SetUpdatedAt(v time.Time) *WorkflowContractUpdate { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableUpdatedAt(t *time.Time) *WorkflowContractUpdate { - if t != nil { - wcu.SetUpdatedAt(*t) +func (_u *WorkflowContractUpdate) SetNillableUpdatedAt(v *time.Time) *WorkflowContractUpdate { + if v != nil { + _u.SetUpdatedAt(*v) } - return wcu + return _u } // SetDeletedAt sets the "deleted_at" field. -func (wcu *WorkflowContractUpdate) SetDeletedAt(t time.Time) *WorkflowContractUpdate { - wcu.mutation.SetDeletedAt(t) - return wcu +func (_u *WorkflowContractUpdate) SetDeletedAt(v time.Time) *WorkflowContractUpdate { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableDeletedAt(t *time.Time) *WorkflowContractUpdate { - if t != nil { - wcu.SetDeletedAt(*t) +func (_u *WorkflowContractUpdate) SetNillableDeletedAt(v *time.Time) *WorkflowContractUpdate { + if v != nil { + _u.SetDeletedAt(*v) } - return wcu + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (wcu *WorkflowContractUpdate) ClearDeletedAt() *WorkflowContractUpdate { - wcu.mutation.ClearDeletedAt() - return wcu +func (_u *WorkflowContractUpdate) ClearDeletedAt() *WorkflowContractUpdate { + _u.mutation.ClearDeletedAt() + return _u } // SetDescription sets the "description" field. -func (wcu *WorkflowContractUpdate) SetDescription(s string) *WorkflowContractUpdate { - wcu.mutation.SetDescription(s) - return wcu +func (_u *WorkflowContractUpdate) SetDescription(v string) *WorkflowContractUpdate { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableDescription(s *string) *WorkflowContractUpdate { - if s != nil { - wcu.SetDescription(*s) +func (_u *WorkflowContractUpdate) SetNillableDescription(v *string) *WorkflowContractUpdate { + if v != nil { + _u.SetDescription(*v) } - return wcu + return _u } // ClearDescription clears the value of the "description" field. -func (wcu *WorkflowContractUpdate) ClearDescription() *WorkflowContractUpdate { - wcu.mutation.ClearDescription() - return wcu +func (_u *WorkflowContractUpdate) ClearDescription() *WorkflowContractUpdate { + _u.mutation.ClearDescription() + return _u } // SetScopedResourceType sets the "scoped_resource_type" field. -func (wcu *WorkflowContractUpdate) SetScopedResourceType(bs biz.ContractScope) *WorkflowContractUpdate { - wcu.mutation.SetScopedResourceType(bs) - return wcu +func (_u *WorkflowContractUpdate) SetScopedResourceType(v biz.ContractScope) *WorkflowContractUpdate { + _u.mutation.SetScopedResourceType(v) + return _u } // SetNillableScopedResourceType sets the "scoped_resource_type" field if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableScopedResourceType(bs *biz.ContractScope) *WorkflowContractUpdate { - if bs != nil { - wcu.SetScopedResourceType(*bs) +func (_u *WorkflowContractUpdate) SetNillableScopedResourceType(v *biz.ContractScope) *WorkflowContractUpdate { + if v != nil { + _u.SetScopedResourceType(*v) } - return wcu + return _u } // ClearScopedResourceType clears the value of the "scoped_resource_type" field. -func (wcu *WorkflowContractUpdate) ClearScopedResourceType() *WorkflowContractUpdate { - wcu.mutation.ClearScopedResourceType() - return wcu +func (_u *WorkflowContractUpdate) ClearScopedResourceType() *WorkflowContractUpdate { + _u.mutation.ClearScopedResourceType() + return _u } // SetScopedResourceID sets the "scoped_resource_id" field. -func (wcu *WorkflowContractUpdate) SetScopedResourceID(u uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.SetScopedResourceID(u) - return wcu +func (_u *WorkflowContractUpdate) SetScopedResourceID(v uuid.UUID) *WorkflowContractUpdate { + _u.mutation.SetScopedResourceID(v) + return _u } // SetNillableScopedResourceID sets the "scoped_resource_id" field if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableScopedResourceID(u *uuid.UUID) *WorkflowContractUpdate { - if u != nil { - wcu.SetScopedResourceID(*u) +func (_u *WorkflowContractUpdate) SetNillableScopedResourceID(v *uuid.UUID) *WorkflowContractUpdate { + if v != nil { + _u.SetScopedResourceID(*v) } - return wcu + return _u } // ClearScopedResourceID clears the value of the "scoped_resource_id" field. -func (wcu *WorkflowContractUpdate) ClearScopedResourceID() *WorkflowContractUpdate { - wcu.mutation.ClearScopedResourceID() - return wcu +func (_u *WorkflowContractUpdate) ClearScopedResourceID() *WorkflowContractUpdate { + _u.mutation.ClearScopedResourceID() + return _u } // AddVersionIDs adds the "versions" edge to the WorkflowContractVersion entity by IDs. -func (wcu *WorkflowContractUpdate) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.AddVersionIDs(ids...) - return wcu +func (_u *WorkflowContractUpdate) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdate { + _u.mutation.AddVersionIDs(ids...) + return _u } // AddVersions adds the "versions" edges to the WorkflowContractVersion entity. -func (wcu *WorkflowContractUpdate) AddVersions(w ...*WorkflowContractVersion) *WorkflowContractUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdate) AddVersions(v ...*WorkflowContractVersion) *WorkflowContractUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcu.AddVersionIDs(ids...) + return _u.AddVersionIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (wcu *WorkflowContractUpdate) SetOrganizationID(id uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.SetOrganizationID(id) - return wcu +func (_u *WorkflowContractUpdate) SetOrganizationID(id uuid.UUID) *WorkflowContractUpdate { + _u.mutation.SetOrganizationID(id) + return _u } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (wcu *WorkflowContractUpdate) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractUpdate { +func (_u *WorkflowContractUpdate) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractUpdate { if id != nil { - wcu = wcu.SetOrganizationID(*id) + _u = _u.SetOrganizationID(*id) } - return wcu + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (wcu *WorkflowContractUpdate) SetOrganization(o *Organization) *WorkflowContractUpdate { - return wcu.SetOrganizationID(o.ID) +func (_u *WorkflowContractUpdate) SetOrganization(v *Organization) *WorkflowContractUpdate { + return _u.SetOrganizationID(v.ID) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (wcu *WorkflowContractUpdate) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.AddWorkflowIDs(ids...) - return wcu +func (_u *WorkflowContractUpdate) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdate { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (wcu *WorkflowContractUpdate) AddWorkflows(w ...*Workflow) *WorkflowContractUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdate) AddWorkflows(v ...*Workflow) *WorkflowContractUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcu.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // Mutation returns the WorkflowContractMutation object of the builder. -func (wcu *WorkflowContractUpdate) Mutation() *WorkflowContractMutation { - return wcu.mutation +func (_u *WorkflowContractUpdate) Mutation() *WorkflowContractMutation { + return _u.mutation } // ClearVersions clears all "versions" edges to the WorkflowContractVersion entity. -func (wcu *WorkflowContractUpdate) ClearVersions() *WorkflowContractUpdate { - wcu.mutation.ClearVersions() - return wcu +func (_u *WorkflowContractUpdate) ClearVersions() *WorkflowContractUpdate { + _u.mutation.ClearVersions() + return _u } // RemoveVersionIDs removes the "versions" edge to WorkflowContractVersion entities by IDs. -func (wcu *WorkflowContractUpdate) RemoveVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.RemoveVersionIDs(ids...) - return wcu +func (_u *WorkflowContractUpdate) RemoveVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdate { + _u.mutation.RemoveVersionIDs(ids...) + return _u } // RemoveVersions removes "versions" edges to WorkflowContractVersion entities. -func (wcu *WorkflowContractUpdate) RemoveVersions(w ...*WorkflowContractVersion) *WorkflowContractUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdate) RemoveVersions(v ...*WorkflowContractVersion) *WorkflowContractUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcu.RemoveVersionIDs(ids...) + return _u.RemoveVersionIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (wcu *WorkflowContractUpdate) ClearOrganization() *WorkflowContractUpdate { - wcu.mutation.ClearOrganization() - return wcu +func (_u *WorkflowContractUpdate) ClearOrganization() *WorkflowContractUpdate { + _u.mutation.ClearOrganization() + return _u } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (wcu *WorkflowContractUpdate) ClearWorkflows() *WorkflowContractUpdate { - wcu.mutation.ClearWorkflows() - return wcu +func (_u *WorkflowContractUpdate) ClearWorkflows() *WorkflowContractUpdate { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (wcu *WorkflowContractUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdate { - wcu.mutation.RemoveWorkflowIDs(ids...) - return wcu +func (_u *WorkflowContractUpdate) RemoveWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdate { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (wcu *WorkflowContractUpdate) RemoveWorkflows(w ...*Workflow) *WorkflowContractUpdate { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdate) RemoveWorkflows(v ...*Workflow) *WorkflowContractUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcu.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. -func (wcu *WorkflowContractUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, wcu.sqlSave, wcu.mutation, wcu.hooks) +func (_u *WorkflowContractUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wcu *WorkflowContractUpdate) SaveX(ctx context.Context) int { - affected, err := wcu.Save(ctx) +func (_u *WorkflowContractUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -245,21 +245,21 @@ func (wcu *WorkflowContractUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (wcu *WorkflowContractUpdate) Exec(ctx context.Context) error { - _, err := wcu.Save(ctx) +func (_u *WorkflowContractUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcu *WorkflowContractUpdate) ExecX(ctx context.Context) { - if err := wcu.Exec(ctx); err != nil { +func (_u *WorkflowContractUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wcu *WorkflowContractUpdate) check() error { - if v, ok := wcu.mutation.ScopedResourceType(); ok { +func (_u *WorkflowContractUpdate) check() error { + if v, ok := _u.mutation.ScopedResourceType(); ok { if err := workflowcontract.ScopedResourceTypeValidator(v); err != nil { return &ValidationError{Name: "scoped_resource_type", err: fmt.Errorf(`ent: validator failed for field "WorkflowContract.scoped_resource_type": %w`, err)} } @@ -268,51 +268,51 @@ func (wcu *WorkflowContractUpdate) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wcu *WorkflowContractUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractUpdate { - wcu.modifiers = append(wcu.modifiers, modifiers...) - return wcu +func (_u *WorkflowContractUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := wcu.check(); err != nil { - return n, err +func (_u *WorkflowContractUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowcontract.Table, workflowcontract.Columns, sqlgraph.NewFieldSpec(workflowcontract.FieldID, field.TypeUUID)) - if ps := wcu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wcu.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(workflowcontract.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := wcu.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(workflowcontract.FieldDeletedAt, field.TypeTime, value) } - if wcu.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(workflowcontract.FieldDeletedAt, field.TypeTime) } - if value, ok := wcu.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(workflowcontract.FieldDescription, field.TypeString, value) } - if wcu.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(workflowcontract.FieldDescription, field.TypeString) } - if value, ok := wcu.mutation.ScopedResourceType(); ok { + if value, ok := _u.mutation.ScopedResourceType(); ok { _spec.SetField(workflowcontract.FieldScopedResourceType, field.TypeEnum, value) } - if wcu.mutation.ScopedResourceTypeCleared() { + if _u.mutation.ScopedResourceTypeCleared() { _spec.ClearField(workflowcontract.FieldScopedResourceType, field.TypeEnum) } - if value, ok := wcu.mutation.ScopedResourceID(); ok { + if value, ok := _u.mutation.ScopedResourceID(); ok { _spec.SetField(workflowcontract.FieldScopedResourceID, field.TypeUUID, value) } - if wcu.mutation.ScopedResourceIDCleared() { + if _u.mutation.ScopedResourceIDCleared() { _spec.ClearField(workflowcontract.FieldScopedResourceID, field.TypeUUID) } - if wcu.mutation.VersionsCleared() { + if _u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -325,7 +325,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcu.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !wcu.mutation.VersionsCleared() { + if nodes := _u.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !_u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -341,7 +341,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcu.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -357,7 +357,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wcu.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -370,7 +370,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcu.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -386,7 +386,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wcu.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -399,7 +399,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcu.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !wcu.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -415,7 +415,7 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcu.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -431,8 +431,8 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wcu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, wcu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowcontract.Label} } else if sqlgraph.IsConstraintError(err) { @@ -440,8 +440,8 @@ func (wcu *WorkflowContractUpdate) sqlSave(ctx context.Context) (n int, err erro } return 0, err } - wcu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // WorkflowContractUpdateOne is the builder for updating a single WorkflowContract entity. @@ -454,222 +454,222 @@ type WorkflowContractUpdateOne struct { } // SetUpdatedAt sets the "updated_at" field. -func (wcuo *WorkflowContractUpdateOne) SetUpdatedAt(t time.Time) *WorkflowContractUpdateOne { - wcuo.mutation.SetUpdatedAt(t) - return wcuo +func (_u *WorkflowContractUpdateOne) SetUpdatedAt(v time.Time) *WorkflowContractUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u } // SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableUpdatedAt(t *time.Time) *WorkflowContractUpdateOne { - if t != nil { - wcuo.SetUpdatedAt(*t) +func (_u *WorkflowContractUpdateOne) SetNillableUpdatedAt(v *time.Time) *WorkflowContractUpdateOne { + if v != nil { + _u.SetUpdatedAt(*v) } - return wcuo + return _u } // SetDeletedAt sets the "deleted_at" field. -func (wcuo *WorkflowContractUpdateOne) SetDeletedAt(t time.Time) *WorkflowContractUpdateOne { - wcuo.mutation.SetDeletedAt(t) - return wcuo +func (_u *WorkflowContractUpdateOne) SetDeletedAt(v time.Time) *WorkflowContractUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableDeletedAt(t *time.Time) *WorkflowContractUpdateOne { - if t != nil { - wcuo.SetDeletedAt(*t) +func (_u *WorkflowContractUpdateOne) SetNillableDeletedAt(v *time.Time) *WorkflowContractUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) } - return wcuo + return _u } // ClearDeletedAt clears the value of the "deleted_at" field. -func (wcuo *WorkflowContractUpdateOne) ClearDeletedAt() *WorkflowContractUpdateOne { - wcuo.mutation.ClearDeletedAt() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearDeletedAt() *WorkflowContractUpdateOne { + _u.mutation.ClearDeletedAt() + return _u } // SetDescription sets the "description" field. -func (wcuo *WorkflowContractUpdateOne) SetDescription(s string) *WorkflowContractUpdateOne { - wcuo.mutation.SetDescription(s) - return wcuo +func (_u *WorkflowContractUpdateOne) SetDescription(v string) *WorkflowContractUpdateOne { + _u.mutation.SetDescription(v) + return _u } // SetNillableDescription sets the "description" field if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableDescription(s *string) *WorkflowContractUpdateOne { - if s != nil { - wcuo.SetDescription(*s) +func (_u *WorkflowContractUpdateOne) SetNillableDescription(v *string) *WorkflowContractUpdateOne { + if v != nil { + _u.SetDescription(*v) } - return wcuo + return _u } // ClearDescription clears the value of the "description" field. -func (wcuo *WorkflowContractUpdateOne) ClearDescription() *WorkflowContractUpdateOne { - wcuo.mutation.ClearDescription() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearDescription() *WorkflowContractUpdateOne { + _u.mutation.ClearDescription() + return _u } // SetScopedResourceType sets the "scoped_resource_type" field. -func (wcuo *WorkflowContractUpdateOne) SetScopedResourceType(bs biz.ContractScope) *WorkflowContractUpdateOne { - wcuo.mutation.SetScopedResourceType(bs) - return wcuo +func (_u *WorkflowContractUpdateOne) SetScopedResourceType(v biz.ContractScope) *WorkflowContractUpdateOne { + _u.mutation.SetScopedResourceType(v) + return _u } // SetNillableScopedResourceType sets the "scoped_resource_type" field if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableScopedResourceType(bs *biz.ContractScope) *WorkflowContractUpdateOne { - if bs != nil { - wcuo.SetScopedResourceType(*bs) +func (_u *WorkflowContractUpdateOne) SetNillableScopedResourceType(v *biz.ContractScope) *WorkflowContractUpdateOne { + if v != nil { + _u.SetScopedResourceType(*v) } - return wcuo + return _u } // ClearScopedResourceType clears the value of the "scoped_resource_type" field. -func (wcuo *WorkflowContractUpdateOne) ClearScopedResourceType() *WorkflowContractUpdateOne { - wcuo.mutation.ClearScopedResourceType() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearScopedResourceType() *WorkflowContractUpdateOne { + _u.mutation.ClearScopedResourceType() + return _u } // SetScopedResourceID sets the "scoped_resource_id" field. -func (wcuo *WorkflowContractUpdateOne) SetScopedResourceID(u uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.SetScopedResourceID(u) - return wcuo +func (_u *WorkflowContractUpdateOne) SetScopedResourceID(v uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.SetScopedResourceID(v) + return _u } // SetNillableScopedResourceID sets the "scoped_resource_id" field if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableScopedResourceID(u *uuid.UUID) *WorkflowContractUpdateOne { - if u != nil { - wcuo.SetScopedResourceID(*u) +func (_u *WorkflowContractUpdateOne) SetNillableScopedResourceID(v *uuid.UUID) *WorkflowContractUpdateOne { + if v != nil { + _u.SetScopedResourceID(*v) } - return wcuo + return _u } // ClearScopedResourceID clears the value of the "scoped_resource_id" field. -func (wcuo *WorkflowContractUpdateOne) ClearScopedResourceID() *WorkflowContractUpdateOne { - wcuo.mutation.ClearScopedResourceID() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearScopedResourceID() *WorkflowContractUpdateOne { + _u.mutation.ClearScopedResourceID() + return _u } // AddVersionIDs adds the "versions" edge to the WorkflowContractVersion entity by IDs. -func (wcuo *WorkflowContractUpdateOne) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.AddVersionIDs(ids...) - return wcuo +func (_u *WorkflowContractUpdateOne) AddVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.AddVersionIDs(ids...) + return _u } // AddVersions adds the "versions" edges to the WorkflowContractVersion entity. -func (wcuo *WorkflowContractUpdateOne) AddVersions(w ...*WorkflowContractVersion) *WorkflowContractUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdateOne) AddVersions(v ...*WorkflowContractVersion) *WorkflowContractUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcuo.AddVersionIDs(ids...) + return _u.AddVersionIDs(ids...) } // SetOrganizationID sets the "organization" edge to the Organization entity by ID. -func (wcuo *WorkflowContractUpdateOne) SetOrganizationID(id uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.SetOrganizationID(id) - return wcuo +func (_u *WorkflowContractUpdateOne) SetOrganizationID(id uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.SetOrganizationID(id) + return _u } // SetNillableOrganizationID sets the "organization" edge to the Organization entity by ID if the given value is not nil. -func (wcuo *WorkflowContractUpdateOne) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractUpdateOne { +func (_u *WorkflowContractUpdateOne) SetNillableOrganizationID(id *uuid.UUID) *WorkflowContractUpdateOne { if id != nil { - wcuo = wcuo.SetOrganizationID(*id) + _u = _u.SetOrganizationID(*id) } - return wcuo + return _u } // SetOrganization sets the "organization" edge to the Organization entity. -func (wcuo *WorkflowContractUpdateOne) SetOrganization(o *Organization) *WorkflowContractUpdateOne { - return wcuo.SetOrganizationID(o.ID) +func (_u *WorkflowContractUpdateOne) SetOrganization(v *Organization) *WorkflowContractUpdateOne { + return _u.SetOrganizationID(v.ID) } // AddWorkflowIDs adds the "workflows" edge to the Workflow entity by IDs. -func (wcuo *WorkflowContractUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.AddWorkflowIDs(ids...) - return wcuo +func (_u *WorkflowContractUpdateOne) AddWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.AddWorkflowIDs(ids...) + return _u } // AddWorkflows adds the "workflows" edges to the Workflow entity. -func (wcuo *WorkflowContractUpdateOne) AddWorkflows(w ...*Workflow) *WorkflowContractUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdateOne) AddWorkflows(v ...*Workflow) *WorkflowContractUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcuo.AddWorkflowIDs(ids...) + return _u.AddWorkflowIDs(ids...) } // Mutation returns the WorkflowContractMutation object of the builder. -func (wcuo *WorkflowContractUpdateOne) Mutation() *WorkflowContractMutation { - return wcuo.mutation +func (_u *WorkflowContractUpdateOne) Mutation() *WorkflowContractMutation { + return _u.mutation } // ClearVersions clears all "versions" edges to the WorkflowContractVersion entity. -func (wcuo *WorkflowContractUpdateOne) ClearVersions() *WorkflowContractUpdateOne { - wcuo.mutation.ClearVersions() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearVersions() *WorkflowContractUpdateOne { + _u.mutation.ClearVersions() + return _u } // RemoveVersionIDs removes the "versions" edge to WorkflowContractVersion entities by IDs. -func (wcuo *WorkflowContractUpdateOne) RemoveVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.RemoveVersionIDs(ids...) - return wcuo +func (_u *WorkflowContractUpdateOne) RemoveVersionIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.RemoveVersionIDs(ids...) + return _u } // RemoveVersions removes "versions" edges to WorkflowContractVersion entities. -func (wcuo *WorkflowContractUpdateOne) RemoveVersions(w ...*WorkflowContractVersion) *WorkflowContractUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdateOne) RemoveVersions(v ...*WorkflowContractVersion) *WorkflowContractUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcuo.RemoveVersionIDs(ids...) + return _u.RemoveVersionIDs(ids...) } // ClearOrganization clears the "organization" edge to the Organization entity. -func (wcuo *WorkflowContractUpdateOne) ClearOrganization() *WorkflowContractUpdateOne { - wcuo.mutation.ClearOrganization() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearOrganization() *WorkflowContractUpdateOne { + _u.mutation.ClearOrganization() + return _u } // ClearWorkflows clears all "workflows" edges to the Workflow entity. -func (wcuo *WorkflowContractUpdateOne) ClearWorkflows() *WorkflowContractUpdateOne { - wcuo.mutation.ClearWorkflows() - return wcuo +func (_u *WorkflowContractUpdateOne) ClearWorkflows() *WorkflowContractUpdateOne { + _u.mutation.ClearWorkflows() + return _u } // RemoveWorkflowIDs removes the "workflows" edge to Workflow entities by IDs. -func (wcuo *WorkflowContractUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { - wcuo.mutation.RemoveWorkflowIDs(ids...) - return wcuo +func (_u *WorkflowContractUpdateOne) RemoveWorkflowIDs(ids ...uuid.UUID) *WorkflowContractUpdateOne { + _u.mutation.RemoveWorkflowIDs(ids...) + return _u } // RemoveWorkflows removes "workflows" edges to Workflow entities. -func (wcuo *WorkflowContractUpdateOne) RemoveWorkflows(w ...*Workflow) *WorkflowContractUpdateOne { - ids := make([]uuid.UUID, len(w)) - for i := range w { - ids[i] = w[i].ID +func (_u *WorkflowContractUpdateOne) RemoveWorkflows(v ...*Workflow) *WorkflowContractUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wcuo.RemoveWorkflowIDs(ids...) + return _u.RemoveWorkflowIDs(ids...) } // Where appends a list predicates to the WorkflowContractUpdate builder. -func (wcuo *WorkflowContractUpdateOne) Where(ps ...predicate.WorkflowContract) *WorkflowContractUpdateOne { - wcuo.mutation.Where(ps...) - return wcuo +func (_u *WorkflowContractUpdateOne) Where(ps ...predicate.WorkflowContract) *WorkflowContractUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (wcuo *WorkflowContractUpdateOne) Select(field string, fields ...string) *WorkflowContractUpdateOne { - wcuo.fields = append([]string{field}, fields...) - return wcuo +func (_u *WorkflowContractUpdateOne) Select(field string, fields ...string) *WorkflowContractUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated WorkflowContract entity. -func (wcuo *WorkflowContractUpdateOne) Save(ctx context.Context) (*WorkflowContract, error) { - return withHooks(ctx, wcuo.sqlSave, wcuo.mutation, wcuo.hooks) +func (_u *WorkflowContractUpdateOne) Save(ctx context.Context) (*WorkflowContract, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wcuo *WorkflowContractUpdateOne) SaveX(ctx context.Context) *WorkflowContract { - node, err := wcuo.Save(ctx) +func (_u *WorkflowContractUpdateOne) SaveX(ctx context.Context) *WorkflowContract { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -677,21 +677,21 @@ func (wcuo *WorkflowContractUpdateOne) SaveX(ctx context.Context) *WorkflowContr } // Exec executes the query on the entity. -func (wcuo *WorkflowContractUpdateOne) Exec(ctx context.Context) error { - _, err := wcuo.Save(ctx) +func (_u *WorkflowContractUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcuo *WorkflowContractUpdateOne) ExecX(ctx context.Context) { - if err := wcuo.Exec(ctx); err != nil { +func (_u *WorkflowContractUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wcuo *WorkflowContractUpdateOne) check() error { - if v, ok := wcuo.mutation.ScopedResourceType(); ok { +func (_u *WorkflowContractUpdateOne) check() error { + if v, ok := _u.mutation.ScopedResourceType(); ok { if err := workflowcontract.ScopedResourceTypeValidator(v); err != nil { return &ValidationError{Name: "scoped_resource_type", err: fmt.Errorf(`ent: validator failed for field "WorkflowContract.scoped_resource_type": %w`, err)} } @@ -700,22 +700,22 @@ func (wcuo *WorkflowContractUpdateOne) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wcuo *WorkflowContractUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractUpdateOne { - wcuo.modifiers = append(wcuo.modifiers, modifiers...) - return wcuo +func (_u *WorkflowContractUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowContract, err error) { - if err := wcuo.check(); err != nil { +func (_u *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowContract, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowcontract.Table, workflowcontract.Columns, sqlgraph.NewFieldSpec(workflowcontract.FieldID, field.TypeUUID)) - id, ok := wcuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "WorkflowContract.id" for update`)} } _spec.Node.ID.Value = id - if fields := wcuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowcontract.FieldID) for _, f := range fields { @@ -727,41 +727,41 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } } } - if ps := wcuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wcuo.mutation.UpdatedAt(); ok { + if value, ok := _u.mutation.UpdatedAt(); ok { _spec.SetField(workflowcontract.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := wcuo.mutation.DeletedAt(); ok { + if value, ok := _u.mutation.DeletedAt(); ok { _spec.SetField(workflowcontract.FieldDeletedAt, field.TypeTime, value) } - if wcuo.mutation.DeletedAtCleared() { + if _u.mutation.DeletedAtCleared() { _spec.ClearField(workflowcontract.FieldDeletedAt, field.TypeTime) } - if value, ok := wcuo.mutation.Description(); ok { + if value, ok := _u.mutation.Description(); ok { _spec.SetField(workflowcontract.FieldDescription, field.TypeString, value) } - if wcuo.mutation.DescriptionCleared() { + if _u.mutation.DescriptionCleared() { _spec.ClearField(workflowcontract.FieldDescription, field.TypeString) } - if value, ok := wcuo.mutation.ScopedResourceType(); ok { + if value, ok := _u.mutation.ScopedResourceType(); ok { _spec.SetField(workflowcontract.FieldScopedResourceType, field.TypeEnum, value) } - if wcuo.mutation.ScopedResourceTypeCleared() { + if _u.mutation.ScopedResourceTypeCleared() { _spec.ClearField(workflowcontract.FieldScopedResourceType, field.TypeEnum) } - if value, ok := wcuo.mutation.ScopedResourceID(); ok { + if value, ok := _u.mutation.ScopedResourceID(); ok { _spec.SetField(workflowcontract.FieldScopedResourceID, field.TypeUUID, value) } - if wcuo.mutation.ScopedResourceIDCleared() { + if _u.mutation.ScopedResourceIDCleared() { _spec.ClearField(workflowcontract.FieldScopedResourceID, field.TypeUUID) } - if wcuo.mutation.VersionsCleared() { + if _u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -774,7 +774,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcuo.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !wcuo.mutation.VersionsCleared() { + if nodes := _u.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !_u.mutation.VersionsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -790,7 +790,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcuo.mutation.VersionsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: false, @@ -806,7 +806,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wcuo.mutation.OrganizationCleared() { + if _u.mutation.OrganizationCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -819,7 +819,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcuo.mutation.OrganizationIDs(); len(nodes) > 0 { + if nodes := _u.mutation.OrganizationIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -835,7 +835,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wcuo.mutation.WorkflowsCleared() { + if _u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -848,7 +848,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcuo.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !wcuo.mutation.WorkflowsCleared() { + if nodes := _u.mutation.RemovedWorkflowsIDs(); len(nodes) > 0 && !_u.mutation.WorkflowsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -864,7 +864,7 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcuo.mutation.WorkflowsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.WorkflowsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, @@ -880,11 +880,11 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wcuo.modifiers...) - _node = &WorkflowContract{config: wcuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &WorkflowContract{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, wcuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowcontract.Label} } else if sqlgraph.IsConstraintError(err) { @@ -892,6 +892,6 @@ func (wcuo *WorkflowContractUpdateOne) sqlSave(ctx context.Context) (_node *Work } return nil, err } - wcuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/workflowcontractversion.go b/app/controlplane/pkg/data/ent/workflowcontractversion.go index 7fbaddf9a..4332024bc 100644 --- a/app/controlplane/pkg/data/ent/workflowcontractversion.go +++ b/app/controlplane/pkg/data/ent/workflowcontractversion.go @@ -83,7 +83,7 @@ func (*WorkflowContractVersion) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the WorkflowContractVersion fields. -func (wcv *WorkflowContractVersion) assignValues(columns []string, values []any) error { +func (_m *WorkflowContractVersion) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -93,47 +93,47 @@ func (wcv *WorkflowContractVersion) assignValues(columns []string, values []any) if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - wcv.ID = *value + _m.ID = *value } case workflowcontractversion.FieldBody: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field body", values[i]) } else if value != nil { - wcv.Body = *value + _m.Body = *value } case workflowcontractversion.FieldRawBody: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field raw_body", values[i]) } else if value != nil { - wcv.RawBody = *value + _m.RawBody = *value } case workflowcontractversion.FieldRawBodyFormat: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field raw_body_format", values[i]) } else if value.Valid { - wcv.RawBodyFormat = unmarshal.RawFormat(value.String) + _m.RawBodyFormat = unmarshal.RawFormat(value.String) } case workflowcontractversion.FieldRevision: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field revision", values[i]) } else if value.Valid { - wcv.Revision = int(value.Int64) + _m.Revision = int(value.Int64) } case workflowcontractversion.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - wcv.CreatedAt = value.Time + _m.CreatedAt = value.Time } case workflowcontractversion.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field workflow_contract_versions", values[i]) } else if value.Valid { - wcv.workflow_contract_versions = new(uuid.UUID) - *wcv.workflow_contract_versions = *value.S.(*uuid.UUID) + _m.workflow_contract_versions = new(uuid.UUID) + *_m.workflow_contract_versions = *value.S.(*uuid.UUID) } default: - wcv.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -141,52 +141,52 @@ func (wcv *WorkflowContractVersion) assignValues(columns []string, values []any) // Value returns the ent.Value that was dynamically selected and assigned to the WorkflowContractVersion. // This includes values selected through modifiers, order, etc. -func (wcv *WorkflowContractVersion) Value(name string) (ent.Value, error) { - return wcv.selectValues.Get(name) +func (_m *WorkflowContractVersion) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryContract queries the "contract" edge of the WorkflowContractVersion entity. -func (wcv *WorkflowContractVersion) QueryContract() *WorkflowContractQuery { - return NewWorkflowContractVersionClient(wcv.config).QueryContract(wcv) +func (_m *WorkflowContractVersion) QueryContract() *WorkflowContractQuery { + return NewWorkflowContractVersionClient(_m.config).QueryContract(_m) } // Update returns a builder for updating this WorkflowContractVersion. // Note that you need to call WorkflowContractVersion.Unwrap() before calling this method if this WorkflowContractVersion // was returned from a transaction, and the transaction was committed or rolled back. -func (wcv *WorkflowContractVersion) Update() *WorkflowContractVersionUpdateOne { - return NewWorkflowContractVersionClient(wcv.config).UpdateOne(wcv) +func (_m *WorkflowContractVersion) Update() *WorkflowContractVersionUpdateOne { + return NewWorkflowContractVersionClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the WorkflowContractVersion entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (wcv *WorkflowContractVersion) Unwrap() *WorkflowContractVersion { - _tx, ok := wcv.config.driver.(*txDriver) +func (_m *WorkflowContractVersion) Unwrap() *WorkflowContractVersion { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: WorkflowContractVersion is not a transactional entity") } - wcv.config.driver = _tx.drv - return wcv + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (wcv *WorkflowContractVersion) String() string { +func (_m *WorkflowContractVersion) String() string { var builder strings.Builder builder.WriteString("WorkflowContractVersion(") - builder.WriteString(fmt.Sprintf("id=%v, ", wcv.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("body=") - builder.WriteString(fmt.Sprintf("%v", wcv.Body)) + builder.WriteString(fmt.Sprintf("%v", _m.Body)) builder.WriteString(", ") builder.WriteString("raw_body=") - builder.WriteString(fmt.Sprintf("%v", wcv.RawBody)) + builder.WriteString(fmt.Sprintf("%v", _m.RawBody)) builder.WriteString(", ") builder.WriteString("raw_body_format=") - builder.WriteString(fmt.Sprintf("%v", wcv.RawBodyFormat)) + builder.WriteString(fmt.Sprintf("%v", _m.RawBodyFormat)) builder.WriteString(", ") builder.WriteString("revision=") - builder.WriteString(fmt.Sprintf("%v", wcv.Revision)) + builder.WriteString(fmt.Sprintf("%v", _m.Revision)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(wcv.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } diff --git a/app/controlplane/pkg/data/ent/workflowcontractversion_create.go b/app/controlplane/pkg/data/ent/workflowcontractversion_create.go index 3d38bf82f..e6e372265 100644 --- a/app/controlplane/pkg/data/ent/workflowcontractversion_create.go +++ b/app/controlplane/pkg/data/ent/workflowcontractversion_create.go @@ -27,98 +27,98 @@ type WorkflowContractVersionCreate struct { } // SetBody sets the "body" field. -func (wcvc *WorkflowContractVersionCreate) SetBody(b []byte) *WorkflowContractVersionCreate { - wcvc.mutation.SetBody(b) - return wcvc +func (_c *WorkflowContractVersionCreate) SetBody(v []byte) *WorkflowContractVersionCreate { + _c.mutation.SetBody(v) + return _c } // SetRawBody sets the "raw_body" field. -func (wcvc *WorkflowContractVersionCreate) SetRawBody(b []byte) *WorkflowContractVersionCreate { - wcvc.mutation.SetRawBody(b) - return wcvc +func (_c *WorkflowContractVersionCreate) SetRawBody(v []byte) *WorkflowContractVersionCreate { + _c.mutation.SetRawBody(v) + return _c } // SetRawBodyFormat sets the "raw_body_format" field. -func (wcvc *WorkflowContractVersionCreate) SetRawBodyFormat(uf unmarshal.RawFormat) *WorkflowContractVersionCreate { - wcvc.mutation.SetRawBodyFormat(uf) - return wcvc +func (_c *WorkflowContractVersionCreate) SetRawBodyFormat(v unmarshal.RawFormat) *WorkflowContractVersionCreate { + _c.mutation.SetRawBodyFormat(v) + return _c } // SetRevision sets the "revision" field. -func (wcvc *WorkflowContractVersionCreate) SetRevision(i int) *WorkflowContractVersionCreate { - wcvc.mutation.SetRevision(i) - return wcvc +func (_c *WorkflowContractVersionCreate) SetRevision(v int) *WorkflowContractVersionCreate { + _c.mutation.SetRevision(v) + return _c } // SetNillableRevision sets the "revision" field if the given value is not nil. -func (wcvc *WorkflowContractVersionCreate) SetNillableRevision(i *int) *WorkflowContractVersionCreate { - if i != nil { - wcvc.SetRevision(*i) +func (_c *WorkflowContractVersionCreate) SetNillableRevision(v *int) *WorkflowContractVersionCreate { + if v != nil { + _c.SetRevision(*v) } - return wcvc + return _c } // SetCreatedAt sets the "created_at" field. -func (wcvc *WorkflowContractVersionCreate) SetCreatedAt(t time.Time) *WorkflowContractVersionCreate { - wcvc.mutation.SetCreatedAt(t) - return wcvc +func (_c *WorkflowContractVersionCreate) SetCreatedAt(v time.Time) *WorkflowContractVersionCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (wcvc *WorkflowContractVersionCreate) SetNillableCreatedAt(t *time.Time) *WorkflowContractVersionCreate { - if t != nil { - wcvc.SetCreatedAt(*t) +func (_c *WorkflowContractVersionCreate) SetNillableCreatedAt(v *time.Time) *WorkflowContractVersionCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return wcvc + return _c } // SetID sets the "id" field. -func (wcvc *WorkflowContractVersionCreate) SetID(u uuid.UUID) *WorkflowContractVersionCreate { - wcvc.mutation.SetID(u) - return wcvc +func (_c *WorkflowContractVersionCreate) SetID(v uuid.UUID) *WorkflowContractVersionCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (wcvc *WorkflowContractVersionCreate) SetNillableID(u *uuid.UUID) *WorkflowContractVersionCreate { - if u != nil { - wcvc.SetID(*u) +func (_c *WorkflowContractVersionCreate) SetNillableID(v *uuid.UUID) *WorkflowContractVersionCreate { + if v != nil { + _c.SetID(*v) } - return wcvc + return _c } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wcvc *WorkflowContractVersionCreate) SetContractID(id uuid.UUID) *WorkflowContractVersionCreate { - wcvc.mutation.SetContractID(id) - return wcvc +func (_c *WorkflowContractVersionCreate) SetContractID(id uuid.UUID) *WorkflowContractVersionCreate { + _c.mutation.SetContractID(id) + return _c } // SetNillableContractID sets the "contract" edge to the WorkflowContract entity by ID if the given value is not nil. -func (wcvc *WorkflowContractVersionCreate) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionCreate { +func (_c *WorkflowContractVersionCreate) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionCreate { if id != nil { - wcvc = wcvc.SetContractID(*id) + _c = _c.SetContractID(*id) } - return wcvc + return _c } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wcvc *WorkflowContractVersionCreate) SetContract(w *WorkflowContract) *WorkflowContractVersionCreate { - return wcvc.SetContractID(w.ID) +func (_c *WorkflowContractVersionCreate) SetContract(v *WorkflowContract) *WorkflowContractVersionCreate { + return _c.SetContractID(v.ID) } // Mutation returns the WorkflowContractVersionMutation object of the builder. -func (wcvc *WorkflowContractVersionCreate) Mutation() *WorkflowContractVersionMutation { - return wcvc.mutation +func (_c *WorkflowContractVersionCreate) Mutation() *WorkflowContractVersionMutation { + return _c.mutation } // Save creates the WorkflowContractVersion in the database. -func (wcvc *WorkflowContractVersionCreate) Save(ctx context.Context) (*WorkflowContractVersion, error) { - wcvc.defaults() - return withHooks(ctx, wcvc.sqlSave, wcvc.mutation, wcvc.hooks) +func (_c *WorkflowContractVersionCreate) Save(ctx context.Context) (*WorkflowContractVersion, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (wcvc *WorkflowContractVersionCreate) SaveX(ctx context.Context) *WorkflowContractVersion { - v, err := wcvc.Save(ctx) +func (_c *WorkflowContractVersionCreate) SaveX(ctx context.Context) *WorkflowContractVersion { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -126,67 +126,67 @@ func (wcvc *WorkflowContractVersionCreate) SaveX(ctx context.Context) *WorkflowC } // Exec executes the query. -func (wcvc *WorkflowContractVersionCreate) Exec(ctx context.Context) error { - _, err := wcvc.Save(ctx) +func (_c *WorkflowContractVersionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcvc *WorkflowContractVersionCreate) ExecX(ctx context.Context) { - if err := wcvc.Exec(ctx); err != nil { +func (_c *WorkflowContractVersionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (wcvc *WorkflowContractVersionCreate) defaults() { - if _, ok := wcvc.mutation.Revision(); !ok { +func (_c *WorkflowContractVersionCreate) defaults() { + if _, ok := _c.mutation.Revision(); !ok { v := workflowcontractversion.DefaultRevision - wcvc.mutation.SetRevision(v) + _c.mutation.SetRevision(v) } - if _, ok := wcvc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { v := workflowcontractversion.DefaultCreatedAt() - wcvc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := wcvc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := workflowcontractversion.DefaultID() - wcvc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (wcvc *WorkflowContractVersionCreate) check() error { - if _, ok := wcvc.mutation.RawBody(); !ok { +func (_c *WorkflowContractVersionCreate) check() error { + if _, ok := _c.mutation.RawBody(); !ok { return &ValidationError{Name: "raw_body", err: errors.New(`ent: missing required field "WorkflowContractVersion.raw_body"`)} } - if v, ok := wcvc.mutation.RawBody(); ok { + if v, ok := _c.mutation.RawBody(); ok { if err := workflowcontractversion.RawBodyValidator(v); err != nil { return &ValidationError{Name: "raw_body", err: fmt.Errorf(`ent: validator failed for field "WorkflowContractVersion.raw_body": %w`, err)} } } - if _, ok := wcvc.mutation.RawBodyFormat(); !ok { + if _, ok := _c.mutation.RawBodyFormat(); !ok { return &ValidationError{Name: "raw_body_format", err: errors.New(`ent: missing required field "WorkflowContractVersion.raw_body_format"`)} } - if v, ok := wcvc.mutation.RawBodyFormat(); ok { + if v, ok := _c.mutation.RawBodyFormat(); ok { if err := workflowcontractversion.RawBodyFormatValidator(v); err != nil { return &ValidationError{Name: "raw_body_format", err: fmt.Errorf(`ent: validator failed for field "WorkflowContractVersion.raw_body_format": %w`, err)} } } - if _, ok := wcvc.mutation.Revision(); !ok { + if _, ok := _c.mutation.Revision(); !ok { return &ValidationError{Name: "revision", err: errors.New(`ent: missing required field "WorkflowContractVersion.revision"`)} } - if _, ok := wcvc.mutation.CreatedAt(); !ok { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "WorkflowContractVersion.created_at"`)} } return nil } -func (wcvc *WorkflowContractVersionCreate) sqlSave(ctx context.Context) (*WorkflowContractVersion, error) { - if err := wcvc.check(); err != nil { +func (_c *WorkflowContractVersionCreate) sqlSave(ctx context.Context) (*WorkflowContractVersion, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := wcvc.createSpec() - if err := sqlgraph.CreateNode(ctx, wcvc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -199,42 +199,42 @@ func (wcvc *WorkflowContractVersionCreate) sqlSave(ctx context.Context) (*Workfl return nil, err } } - wcvc.mutation.id = &_node.ID - wcvc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (wcvc *WorkflowContractVersionCreate) createSpec() (*WorkflowContractVersion, *sqlgraph.CreateSpec) { +func (_c *WorkflowContractVersionCreate) createSpec() (*WorkflowContractVersion, *sqlgraph.CreateSpec) { var ( - _node = &WorkflowContractVersion{config: wcvc.config} + _node = &WorkflowContractVersion{config: _c.config} _spec = sqlgraph.NewCreateSpec(workflowcontractversion.Table, sqlgraph.NewFieldSpec(workflowcontractversion.FieldID, field.TypeUUID)) ) - _spec.OnConflict = wcvc.conflict - if id, ok := wcvc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := wcvc.mutation.Body(); ok { + if value, ok := _c.mutation.Body(); ok { _spec.SetField(workflowcontractversion.FieldBody, field.TypeBytes, value) _node.Body = value } - if value, ok := wcvc.mutation.RawBody(); ok { + if value, ok := _c.mutation.RawBody(); ok { _spec.SetField(workflowcontractversion.FieldRawBody, field.TypeBytes, value) _node.RawBody = value } - if value, ok := wcvc.mutation.RawBodyFormat(); ok { + if value, ok := _c.mutation.RawBodyFormat(); ok { _spec.SetField(workflowcontractversion.FieldRawBodyFormat, field.TypeEnum, value) _node.RawBodyFormat = value } - if value, ok := wcvc.mutation.Revision(); ok { + if value, ok := _c.mutation.Revision(); ok { _spec.SetField(workflowcontractversion.FieldRevision, field.TypeInt, value) _node.Revision = value } - if value, ok := wcvc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(workflowcontractversion.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if nodes := wcvc.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -270,10 +270,10 @@ func (wcvc *WorkflowContractVersionCreate) createSpec() (*WorkflowContractVersio // SetBody(v+v). // }). // Exec(ctx) -func (wcvc *WorkflowContractVersionCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowContractVersionUpsertOne { - wcvc.conflict = opts +func (_c *WorkflowContractVersionCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowContractVersionUpsertOne { + _c.conflict = opts return &WorkflowContractVersionUpsertOne{ - create: wcvc, + create: _c, } } @@ -283,10 +283,10 @@ func (wcvc *WorkflowContractVersionCreate) OnConflict(opts ...sql.ConflictOption // client.WorkflowContractVersion.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wcvc *WorkflowContractVersionCreate) OnConflictColumns(columns ...string) *WorkflowContractVersionUpsertOne { - wcvc.conflict = append(wcvc.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowContractVersionCreate) OnConflictColumns(columns ...string) *WorkflowContractVersionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowContractVersionUpsertOne{ - create: wcvc, + create: _c, } } @@ -436,16 +436,16 @@ type WorkflowContractVersionCreateBulk struct { } // Save creates the WorkflowContractVersion entities in the database. -func (wcvcb *WorkflowContractVersionCreateBulk) Save(ctx context.Context) ([]*WorkflowContractVersion, error) { - if wcvcb.err != nil { - return nil, wcvcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(wcvcb.builders)) - nodes := make([]*WorkflowContractVersion, len(wcvcb.builders)) - mutators := make([]Mutator, len(wcvcb.builders)) - for i := range wcvcb.builders { +func (_c *WorkflowContractVersionCreateBulk) Save(ctx context.Context) ([]*WorkflowContractVersion, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*WorkflowContractVersion, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := wcvcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*WorkflowContractVersionMutation) @@ -459,12 +459,12 @@ func (wcvcb *WorkflowContractVersionCreateBulk) Save(ctx context.Context) ([]*Wo var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, wcvcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = wcvcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, wcvcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -484,7 +484,7 @@ func (wcvcb *WorkflowContractVersionCreateBulk) Save(ctx context.Context) ([]*Wo }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, wcvcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -492,8 +492,8 @@ func (wcvcb *WorkflowContractVersionCreateBulk) Save(ctx context.Context) ([]*Wo } // SaveX is like Save, but panics if an error occurs. -func (wcvcb *WorkflowContractVersionCreateBulk) SaveX(ctx context.Context) []*WorkflowContractVersion { - v, err := wcvcb.Save(ctx) +func (_c *WorkflowContractVersionCreateBulk) SaveX(ctx context.Context) []*WorkflowContractVersion { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -501,14 +501,14 @@ func (wcvcb *WorkflowContractVersionCreateBulk) SaveX(ctx context.Context) []*Wo } // Exec executes the query. -func (wcvcb *WorkflowContractVersionCreateBulk) Exec(ctx context.Context) error { - _, err := wcvcb.Save(ctx) +func (_c *WorkflowContractVersionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcvcb *WorkflowContractVersionCreateBulk) ExecX(ctx context.Context) { - if err := wcvcb.Exec(ctx); err != nil { +func (_c *WorkflowContractVersionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -528,10 +528,10 @@ func (wcvcb *WorkflowContractVersionCreateBulk) ExecX(ctx context.Context) { // SetBody(v+v). // }). // Exec(ctx) -func (wcvcb *WorkflowContractVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowContractVersionUpsertBulk { - wcvcb.conflict = opts +func (_c *WorkflowContractVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowContractVersionUpsertBulk { + _c.conflict = opts return &WorkflowContractVersionUpsertBulk{ - create: wcvcb, + create: _c, } } @@ -541,10 +541,10 @@ func (wcvcb *WorkflowContractVersionCreateBulk) OnConflict(opts ...sql.ConflictO // client.WorkflowContractVersion.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wcvcb *WorkflowContractVersionCreateBulk) OnConflictColumns(columns ...string) *WorkflowContractVersionUpsertBulk { - wcvcb.conflict = append(wcvcb.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowContractVersionCreateBulk) OnConflictColumns(columns ...string) *WorkflowContractVersionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowContractVersionUpsertBulk{ - create: wcvcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/workflowcontractversion_delete.go b/app/controlplane/pkg/data/ent/workflowcontractversion_delete.go index 72308824c..2062e9565 100644 --- a/app/controlplane/pkg/data/ent/workflowcontractversion_delete.go +++ b/app/controlplane/pkg/data/ent/workflowcontractversion_delete.go @@ -20,56 +20,56 @@ type WorkflowContractVersionDelete struct { } // Where appends a list predicates to the WorkflowContractVersionDelete builder. -func (wcvd *WorkflowContractVersionDelete) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionDelete { - wcvd.mutation.Where(ps...) - return wcvd +func (_d *WorkflowContractVersionDelete) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (wcvd *WorkflowContractVersionDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, wcvd.sqlExec, wcvd.mutation, wcvd.hooks) +func (_d *WorkflowContractVersionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (wcvd *WorkflowContractVersionDelete) ExecX(ctx context.Context) int { - n, err := wcvd.Exec(ctx) +func (_d *WorkflowContractVersionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (wcvd *WorkflowContractVersionDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *WorkflowContractVersionDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(workflowcontractversion.Table, sqlgraph.NewFieldSpec(workflowcontractversion.FieldID, field.TypeUUID)) - if ps := wcvd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, wcvd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - wcvd.mutation.done = true + _d.mutation.done = true return affected, err } // WorkflowContractVersionDeleteOne is the builder for deleting a single WorkflowContractVersion entity. type WorkflowContractVersionDeleteOne struct { - wcvd *WorkflowContractVersionDelete + _d *WorkflowContractVersionDelete } // Where appends a list predicates to the WorkflowContractVersionDelete builder. -func (wcvdo *WorkflowContractVersionDeleteOne) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionDeleteOne { - wcvdo.wcvd.mutation.Where(ps...) - return wcvdo +func (_d *WorkflowContractVersionDeleteOne) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (wcvdo *WorkflowContractVersionDeleteOne) Exec(ctx context.Context) error { - n, err := wcvdo.wcvd.Exec(ctx) +func (_d *WorkflowContractVersionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (wcvdo *WorkflowContractVersionDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (wcvdo *WorkflowContractVersionDeleteOne) ExecX(ctx context.Context) { - if err := wcvdo.Exec(ctx); err != nil { +func (_d *WorkflowContractVersionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/workflowcontractversion_query.go b/app/controlplane/pkg/data/ent/workflowcontractversion_query.go index ff8c77da5..915e257ec 100644 --- a/app/controlplane/pkg/data/ent/workflowcontractversion_query.go +++ b/app/controlplane/pkg/data/ent/workflowcontractversion_query.go @@ -34,44 +34,44 @@ type WorkflowContractVersionQuery struct { } // Where adds a new predicate for the WorkflowContractVersionQuery builder. -func (wcvq *WorkflowContractVersionQuery) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionQuery { - wcvq.predicates = append(wcvq.predicates, ps...) - return wcvq +func (_q *WorkflowContractVersionQuery) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (wcvq *WorkflowContractVersionQuery) Limit(limit int) *WorkflowContractVersionQuery { - wcvq.ctx.Limit = &limit - return wcvq +func (_q *WorkflowContractVersionQuery) Limit(limit int) *WorkflowContractVersionQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (wcvq *WorkflowContractVersionQuery) Offset(offset int) *WorkflowContractVersionQuery { - wcvq.ctx.Offset = &offset - return wcvq +func (_q *WorkflowContractVersionQuery) Offset(offset int) *WorkflowContractVersionQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (wcvq *WorkflowContractVersionQuery) Unique(unique bool) *WorkflowContractVersionQuery { - wcvq.ctx.Unique = &unique - return wcvq +func (_q *WorkflowContractVersionQuery) Unique(unique bool) *WorkflowContractVersionQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (wcvq *WorkflowContractVersionQuery) Order(o ...workflowcontractversion.OrderOption) *WorkflowContractVersionQuery { - wcvq.order = append(wcvq.order, o...) - return wcvq +func (_q *WorkflowContractVersionQuery) Order(o ...workflowcontractversion.OrderOption) *WorkflowContractVersionQuery { + _q.order = append(_q.order, o...) + return _q } // QueryContract chains the current query on the "contract" edge. -func (wcvq *WorkflowContractVersionQuery) QueryContract() *WorkflowContractQuery { - query := (&WorkflowContractClient{config: wcvq.config}).Query() +func (_q *WorkflowContractVersionQuery) QueryContract() *WorkflowContractQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wcvq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wcvq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -80,7 +80,7 @@ func (wcvq *WorkflowContractVersionQuery) QueryContract() *WorkflowContractQuery sqlgraph.To(workflowcontract.Table, workflowcontract.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowcontractversion.ContractTable, workflowcontractversion.ContractColumn), ) - fromU = sqlgraph.SetNeighbors(wcvq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -88,8 +88,8 @@ func (wcvq *WorkflowContractVersionQuery) QueryContract() *WorkflowContractQuery // First returns the first WorkflowContractVersion entity from the query. // Returns a *NotFoundError when no WorkflowContractVersion was found. -func (wcvq *WorkflowContractVersionQuery) First(ctx context.Context) (*WorkflowContractVersion, error) { - nodes, err := wcvq.Limit(1).All(setContextOp(ctx, wcvq.ctx, ent.OpQueryFirst)) +func (_q *WorkflowContractVersionQuery) First(ctx context.Context) (*WorkflowContractVersion, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -100,8 +100,8 @@ func (wcvq *WorkflowContractVersionQuery) First(ctx context.Context) (*WorkflowC } // FirstX is like First, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) FirstX(ctx context.Context) *WorkflowContractVersion { - node, err := wcvq.First(ctx) +func (_q *WorkflowContractVersionQuery) FirstX(ctx context.Context) *WorkflowContractVersion { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -110,9 +110,9 @@ func (wcvq *WorkflowContractVersionQuery) FirstX(ctx context.Context) *WorkflowC // FirstID returns the first WorkflowContractVersion ID from the query. // Returns a *NotFoundError when no WorkflowContractVersion ID was found. -func (wcvq *WorkflowContractVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowContractVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wcvq.Limit(1).IDs(setContextOp(ctx, wcvq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -123,8 +123,8 @@ func (wcvq *WorkflowContractVersionQuery) FirstID(ctx context.Context) (id uuid. } // FirstIDX is like FirstID, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := wcvq.FirstID(ctx) +func (_q *WorkflowContractVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -134,8 +134,8 @@ func (wcvq *WorkflowContractVersionQuery) FirstIDX(ctx context.Context) uuid.UUI // Only returns a single WorkflowContractVersion entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one WorkflowContractVersion entity is found. // Returns a *NotFoundError when no WorkflowContractVersion entities are found. -func (wcvq *WorkflowContractVersionQuery) Only(ctx context.Context) (*WorkflowContractVersion, error) { - nodes, err := wcvq.Limit(2).All(setContextOp(ctx, wcvq.ctx, ent.OpQueryOnly)) +func (_q *WorkflowContractVersionQuery) Only(ctx context.Context) (*WorkflowContractVersion, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -150,8 +150,8 @@ func (wcvq *WorkflowContractVersionQuery) Only(ctx context.Context) (*WorkflowCo } // OnlyX is like Only, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) OnlyX(ctx context.Context) *WorkflowContractVersion { - node, err := wcvq.Only(ctx) +func (_q *WorkflowContractVersionQuery) OnlyX(ctx context.Context) *WorkflowContractVersion { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -161,9 +161,9 @@ func (wcvq *WorkflowContractVersionQuery) OnlyX(ctx context.Context) *WorkflowCo // OnlyID is like Only, but returns the only WorkflowContractVersion ID in the query. // Returns a *NotSingularError when more than one WorkflowContractVersion ID is found. // Returns a *NotFoundError when no entities are found. -func (wcvq *WorkflowContractVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowContractVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wcvq.Limit(2).IDs(setContextOp(ctx, wcvq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -178,8 +178,8 @@ func (wcvq *WorkflowContractVersionQuery) OnlyID(ctx context.Context) (id uuid.U } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := wcvq.OnlyID(ctx) +func (_q *WorkflowContractVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -187,18 +187,18 @@ func (wcvq *WorkflowContractVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID } // All executes the query and returns a list of WorkflowContractVersions. -func (wcvq *WorkflowContractVersionQuery) All(ctx context.Context) ([]*WorkflowContractVersion, error) { - ctx = setContextOp(ctx, wcvq.ctx, ent.OpQueryAll) - if err := wcvq.prepareQuery(ctx); err != nil { +func (_q *WorkflowContractVersionQuery) All(ctx context.Context) ([]*WorkflowContractVersion, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*WorkflowContractVersion, *WorkflowContractVersionQuery]() - return withInterceptors[[]*WorkflowContractVersion](ctx, wcvq, qr, wcvq.inters) + return withInterceptors[[]*WorkflowContractVersion](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) AllX(ctx context.Context) []*WorkflowContractVersion { - nodes, err := wcvq.All(ctx) +func (_q *WorkflowContractVersionQuery) AllX(ctx context.Context) []*WorkflowContractVersion { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -206,20 +206,20 @@ func (wcvq *WorkflowContractVersionQuery) AllX(ctx context.Context) []*WorkflowC } // IDs executes the query and returns a list of WorkflowContractVersion IDs. -func (wcvq *WorkflowContractVersionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if wcvq.ctx.Unique == nil && wcvq.path != nil { - wcvq.Unique(true) +func (_q *WorkflowContractVersionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, wcvq.ctx, ent.OpQueryIDs) - if err = wcvq.Select(workflowcontractversion.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(workflowcontractversion.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := wcvq.IDs(ctx) +func (_q *WorkflowContractVersionQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -227,17 +227,17 @@ func (wcvq *WorkflowContractVersionQuery) IDsX(ctx context.Context) []uuid.UUID } // Count returns the count of the given query. -func (wcvq *WorkflowContractVersionQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, wcvq.ctx, ent.OpQueryCount) - if err := wcvq.prepareQuery(ctx); err != nil { +func (_q *WorkflowContractVersionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, wcvq, querierCount[*WorkflowContractVersionQuery](), wcvq.inters) + return withInterceptors[int](ctx, _q, querierCount[*WorkflowContractVersionQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) CountX(ctx context.Context) int { - count, err := wcvq.Count(ctx) +func (_q *WorkflowContractVersionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -245,9 +245,9 @@ func (wcvq *WorkflowContractVersionQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (wcvq *WorkflowContractVersionQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, wcvq.ctx, ent.OpQueryExist) - switch _, err := wcvq.FirstID(ctx); { +func (_q *WorkflowContractVersionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -258,8 +258,8 @@ func (wcvq *WorkflowContractVersionQuery) Exist(ctx context.Context) (bool, erro } // ExistX is like Exist, but panics if an error occurs. -func (wcvq *WorkflowContractVersionQuery) ExistX(ctx context.Context) bool { - exist, err := wcvq.Exist(ctx) +func (_q *WorkflowContractVersionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -268,33 +268,33 @@ func (wcvq *WorkflowContractVersionQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the WorkflowContractVersionQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (wcvq *WorkflowContractVersionQuery) Clone() *WorkflowContractVersionQuery { - if wcvq == nil { +func (_q *WorkflowContractVersionQuery) Clone() *WorkflowContractVersionQuery { + if _q == nil { return nil } return &WorkflowContractVersionQuery{ - config: wcvq.config, - ctx: wcvq.ctx.Clone(), - order: append([]workflowcontractversion.OrderOption{}, wcvq.order...), - inters: append([]Interceptor{}, wcvq.inters...), - predicates: append([]predicate.WorkflowContractVersion{}, wcvq.predicates...), - withContract: wcvq.withContract.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]workflowcontractversion.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.WorkflowContractVersion{}, _q.predicates...), + withContract: _q.withContract.Clone(), // clone intermediate query. - sql: wcvq.sql.Clone(), - path: wcvq.path, - modifiers: append([]func(*sql.Selector){}, wcvq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithContract tells the query-builder to eager-load the nodes that are connected to // the "contract" edge. The optional arguments are used to configure the query builder of the edge. -func (wcvq *WorkflowContractVersionQuery) WithContract(opts ...func(*WorkflowContractQuery)) *WorkflowContractVersionQuery { - query := (&WorkflowContractClient{config: wcvq.config}).Query() +func (_q *WorkflowContractVersionQuery) WithContract(opts ...func(*WorkflowContractQuery)) *WorkflowContractVersionQuery { + query := (&WorkflowContractClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wcvq.withContract = query - return wcvq + _q.withContract = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -311,10 +311,10 @@ func (wcvq *WorkflowContractVersionQuery) WithContract(opts ...func(*WorkflowCon // GroupBy(workflowcontractversion.FieldBody). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (wcvq *WorkflowContractVersionQuery) GroupBy(field string, fields ...string) *WorkflowContractVersionGroupBy { - wcvq.ctx.Fields = append([]string{field}, fields...) - grbuild := &WorkflowContractVersionGroupBy{build: wcvq} - grbuild.flds = &wcvq.ctx.Fields +func (_q *WorkflowContractVersionQuery) GroupBy(field string, fields ...string) *WorkflowContractVersionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &WorkflowContractVersionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = workflowcontractversion.Label grbuild.scan = grbuild.Scan return grbuild @@ -332,55 +332,55 @@ func (wcvq *WorkflowContractVersionQuery) GroupBy(field string, fields ...string // client.WorkflowContractVersion.Query(). // Select(workflowcontractversion.FieldBody). // Scan(ctx, &v) -func (wcvq *WorkflowContractVersionQuery) Select(fields ...string) *WorkflowContractVersionSelect { - wcvq.ctx.Fields = append(wcvq.ctx.Fields, fields...) - sbuild := &WorkflowContractVersionSelect{WorkflowContractVersionQuery: wcvq} +func (_q *WorkflowContractVersionQuery) Select(fields ...string) *WorkflowContractVersionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &WorkflowContractVersionSelect{WorkflowContractVersionQuery: _q} sbuild.label = workflowcontractversion.Label - sbuild.flds, sbuild.scan = &wcvq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a WorkflowContractVersionSelect configured with the given aggregations. -func (wcvq *WorkflowContractVersionQuery) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionSelect { - return wcvq.Select().Aggregate(fns...) +func (_q *WorkflowContractVersionQuery) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionSelect { + return _q.Select().Aggregate(fns...) } -func (wcvq *WorkflowContractVersionQuery) prepareQuery(ctx context.Context) error { - for _, inter := range wcvq.inters { +func (_q *WorkflowContractVersionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, wcvq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range wcvq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !workflowcontractversion.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if wcvq.path != nil { - prev, err := wcvq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - wcvq.sql = prev + _q.sql = prev } return nil } -func (wcvq *WorkflowContractVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowContractVersion, error) { +func (_q *WorkflowContractVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowContractVersion, error) { var ( nodes = []*WorkflowContractVersion{} - withFKs = wcvq.withFKs - _spec = wcvq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [1]bool{ - wcvq.withContract != nil, + _q.withContract != nil, } ) - if wcvq.withContract != nil { + if _q.withContract != nil { withFKs = true } if withFKs { @@ -390,25 +390,25 @@ func (wcvq *WorkflowContractVersionQuery) sqlAll(ctx context.Context, hooks ...q return (*WorkflowContractVersion).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &WorkflowContractVersion{config: wcvq.config} + node := &WorkflowContractVersion{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(wcvq.modifiers) > 0 { - _spec.Modifiers = wcvq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, wcvq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := wcvq.withContract; query != nil { - if err := wcvq.loadContract(ctx, query, nodes, nil, + if query := _q.withContract; query != nil { + if err := _q.loadContract(ctx, query, nodes, nil, func(n *WorkflowContractVersion, e *WorkflowContract) { n.Edges.Contract = e }); err != nil { return nil, err } @@ -416,7 +416,7 @@ func (wcvq *WorkflowContractVersionQuery) sqlAll(ctx context.Context, hooks ...q return nodes, nil } -func (wcvq *WorkflowContractVersionQuery) loadContract(ctx context.Context, query *WorkflowContractQuery, nodes []*WorkflowContractVersion, init func(*WorkflowContractVersion), assign func(*WorkflowContractVersion, *WorkflowContract)) error { +func (_q *WorkflowContractVersionQuery) loadContract(ctx context.Context, query *WorkflowContractQuery, nodes []*WorkflowContractVersion, init func(*WorkflowContractVersion), assign func(*WorkflowContractVersion, *WorkflowContract)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*WorkflowContractVersion) for i := range nodes { @@ -449,27 +449,27 @@ func (wcvq *WorkflowContractVersionQuery) loadContract(ctx context.Context, quer return nil } -func (wcvq *WorkflowContractVersionQuery) sqlCount(ctx context.Context) (int, error) { - _spec := wcvq.querySpec() - if len(wcvq.modifiers) > 0 { - _spec.Modifiers = wcvq.modifiers +func (_q *WorkflowContractVersionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = wcvq.ctx.Fields - if len(wcvq.ctx.Fields) > 0 { - _spec.Unique = wcvq.ctx.Unique != nil && *wcvq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, wcvq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (wcvq *WorkflowContractVersionQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *WorkflowContractVersionQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(workflowcontractversion.Table, workflowcontractversion.Columns, sqlgraph.NewFieldSpec(workflowcontractversion.FieldID, field.TypeUUID)) - _spec.From = wcvq.sql - if unique := wcvq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if wcvq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := wcvq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowcontractversion.FieldID) for i := range fields { @@ -478,20 +478,20 @@ func (wcvq *WorkflowContractVersionQuery) querySpec() *sqlgraph.QuerySpec { } } } - if ps := wcvq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := wcvq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := wcvq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := wcvq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -501,36 +501,36 @@ func (wcvq *WorkflowContractVersionQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (wcvq *WorkflowContractVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(wcvq.driver.Dialect()) +func (_q *WorkflowContractVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(workflowcontractversion.Table) - columns := wcvq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = workflowcontractversion.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if wcvq.sql != nil { - selector = wcvq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if wcvq.ctx.Unique != nil && *wcvq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range wcvq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range wcvq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range wcvq.order { + for _, p := range _q.order { p(selector) } - if offset := wcvq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := wcvq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -539,33 +539,33 @@ func (wcvq *WorkflowContractVersionQuery) sqlQuery(ctx context.Context) *sql.Sel // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (wcvq *WorkflowContractVersionQuery) ForUpdate(opts ...sql.LockOption) *WorkflowContractVersionQuery { - if wcvq.driver.Dialect() == dialect.Postgres { - wcvq.Unique(false) +func (_q *WorkflowContractVersionQuery) ForUpdate(opts ...sql.LockOption) *WorkflowContractVersionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wcvq.modifiers = append(wcvq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return wcvq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (wcvq *WorkflowContractVersionQuery) ForShare(opts ...sql.LockOption) *WorkflowContractVersionQuery { - if wcvq.driver.Dialect() == dialect.Postgres { - wcvq.Unique(false) +func (_q *WorkflowContractVersionQuery) ForShare(opts ...sql.LockOption) *WorkflowContractVersionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wcvq.modifiers = append(wcvq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return wcvq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (wcvq *WorkflowContractVersionQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractVersionSelect { - wcvq.modifiers = append(wcvq.modifiers, modifiers...) - return wcvq.Select() +func (_q *WorkflowContractVersionQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractVersionSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // WorkflowContractVersionGroupBy is the group-by builder for WorkflowContractVersion entities. @@ -575,41 +575,41 @@ type WorkflowContractVersionGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (wcvgb *WorkflowContractVersionGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionGroupBy { - wcvgb.fns = append(wcvgb.fns, fns...) - return wcvgb +func (_g *WorkflowContractVersionGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (wcvgb *WorkflowContractVersionGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wcvgb.build.ctx, ent.OpQueryGroupBy) - if err := wcvgb.build.prepareQuery(ctx); err != nil { +func (_g *WorkflowContractVersionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowContractVersionQuery, *WorkflowContractVersionGroupBy](ctx, wcvgb.build, wcvgb, wcvgb.build.inters, v) + return scanWithInterceptors[*WorkflowContractVersionQuery, *WorkflowContractVersionGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (wcvgb *WorkflowContractVersionGroupBy) sqlScan(ctx context.Context, root *WorkflowContractVersionQuery, v any) error { +func (_g *WorkflowContractVersionGroupBy) sqlScan(ctx context.Context, root *WorkflowContractVersionQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(wcvgb.fns)) - for _, fn := range wcvgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*wcvgb.flds)+len(wcvgb.fns)) - for _, f := range *wcvgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*wcvgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := wcvgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -623,27 +623,27 @@ type WorkflowContractVersionSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (wcvs *WorkflowContractVersionSelect) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionSelect { - wcvs.fns = append(wcvs.fns, fns...) - return wcvs +func (_s *WorkflowContractVersionSelect) Aggregate(fns ...AggregateFunc) *WorkflowContractVersionSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (wcvs *WorkflowContractVersionSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wcvs.ctx, ent.OpQuerySelect) - if err := wcvs.prepareQuery(ctx); err != nil { +func (_s *WorkflowContractVersionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowContractVersionQuery, *WorkflowContractVersionSelect](ctx, wcvs.WorkflowContractVersionQuery, wcvs, wcvs.inters, v) + return scanWithInterceptors[*WorkflowContractVersionQuery, *WorkflowContractVersionSelect](ctx, _s.WorkflowContractVersionQuery, _s, _s.inters, v) } -func (wcvs *WorkflowContractVersionSelect) sqlScan(ctx context.Context, root *WorkflowContractVersionQuery, v any) error { +func (_s *WorkflowContractVersionSelect) sqlScan(ctx context.Context, root *WorkflowContractVersionQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(wcvs.fns)) - for _, fn := range wcvs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*wcvs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -651,7 +651,7 @@ func (wcvs *WorkflowContractVersionSelect) sqlScan(ctx context.Context, root *Wo } rows := &sql.Rows{} query, args := selector.Query() - if err := wcvs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -659,7 +659,7 @@ func (wcvs *WorkflowContractVersionSelect) sqlScan(ctx context.Context, root *Wo } // Modify adds a query modifier for attaching custom logic to queries. -func (wcvs *WorkflowContractVersionSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractVersionSelect { - wcvs.modifiers = append(wcvs.modifiers, modifiers...) - return wcvs +func (_s *WorkflowContractVersionSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowContractVersionSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/workflowcontractversion_update.go b/app/controlplane/pkg/data/ent/workflowcontractversion_update.go index aeab3c607..c81449f3d 100644 --- a/app/controlplane/pkg/data/ent/workflowcontractversion_update.go +++ b/app/controlplane/pkg/data/ent/workflowcontractversion_update.go @@ -26,63 +26,63 @@ type WorkflowContractVersionUpdate struct { } // Where appends a list predicates to the WorkflowContractVersionUpdate builder. -func (wcvu *WorkflowContractVersionUpdate) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionUpdate { - wcvu.mutation.Where(ps...) - return wcvu +func (_u *WorkflowContractVersionUpdate) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionUpdate { + _u.mutation.Where(ps...) + return _u } // SetRawBodyFormat sets the "raw_body_format" field. -func (wcvu *WorkflowContractVersionUpdate) SetRawBodyFormat(uf unmarshal.RawFormat) *WorkflowContractVersionUpdate { - wcvu.mutation.SetRawBodyFormat(uf) - return wcvu +func (_u *WorkflowContractVersionUpdate) SetRawBodyFormat(v unmarshal.RawFormat) *WorkflowContractVersionUpdate { + _u.mutation.SetRawBodyFormat(v) + return _u } // SetNillableRawBodyFormat sets the "raw_body_format" field if the given value is not nil. -func (wcvu *WorkflowContractVersionUpdate) SetNillableRawBodyFormat(uf *unmarshal.RawFormat) *WorkflowContractVersionUpdate { - if uf != nil { - wcvu.SetRawBodyFormat(*uf) +func (_u *WorkflowContractVersionUpdate) SetNillableRawBodyFormat(v *unmarshal.RawFormat) *WorkflowContractVersionUpdate { + if v != nil { + _u.SetRawBodyFormat(*v) } - return wcvu + return _u } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wcvu *WorkflowContractVersionUpdate) SetContractID(id uuid.UUID) *WorkflowContractVersionUpdate { - wcvu.mutation.SetContractID(id) - return wcvu +func (_u *WorkflowContractVersionUpdate) SetContractID(id uuid.UUID) *WorkflowContractVersionUpdate { + _u.mutation.SetContractID(id) + return _u } // SetNillableContractID sets the "contract" edge to the WorkflowContract entity by ID if the given value is not nil. -func (wcvu *WorkflowContractVersionUpdate) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionUpdate { +func (_u *WorkflowContractVersionUpdate) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionUpdate { if id != nil { - wcvu = wcvu.SetContractID(*id) + _u = _u.SetContractID(*id) } - return wcvu + return _u } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wcvu *WorkflowContractVersionUpdate) SetContract(w *WorkflowContract) *WorkflowContractVersionUpdate { - return wcvu.SetContractID(w.ID) +func (_u *WorkflowContractVersionUpdate) SetContract(v *WorkflowContract) *WorkflowContractVersionUpdate { + return _u.SetContractID(v.ID) } // Mutation returns the WorkflowContractVersionMutation object of the builder. -func (wcvu *WorkflowContractVersionUpdate) Mutation() *WorkflowContractVersionMutation { - return wcvu.mutation +func (_u *WorkflowContractVersionUpdate) Mutation() *WorkflowContractVersionMutation { + return _u.mutation } // ClearContract clears the "contract" edge to the WorkflowContract entity. -func (wcvu *WorkflowContractVersionUpdate) ClearContract() *WorkflowContractVersionUpdate { - wcvu.mutation.ClearContract() - return wcvu +func (_u *WorkflowContractVersionUpdate) ClearContract() *WorkflowContractVersionUpdate { + _u.mutation.ClearContract() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (wcvu *WorkflowContractVersionUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, wcvu.sqlSave, wcvu.mutation, wcvu.hooks) +func (_u *WorkflowContractVersionUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wcvu *WorkflowContractVersionUpdate) SaveX(ctx context.Context) int { - affected, err := wcvu.Save(ctx) +func (_u *WorkflowContractVersionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -90,21 +90,21 @@ func (wcvu *WorkflowContractVersionUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (wcvu *WorkflowContractVersionUpdate) Exec(ctx context.Context) error { - _, err := wcvu.Save(ctx) +func (_u *WorkflowContractVersionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcvu *WorkflowContractVersionUpdate) ExecX(ctx context.Context) { - if err := wcvu.Exec(ctx); err != nil { +func (_u *WorkflowContractVersionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wcvu *WorkflowContractVersionUpdate) check() error { - if v, ok := wcvu.mutation.RawBodyFormat(); ok { +func (_u *WorkflowContractVersionUpdate) check() error { + if v, ok := _u.mutation.RawBodyFormat(); ok { if err := workflowcontractversion.RawBodyFormatValidator(v); err != nil { return &ValidationError{Name: "raw_body_format", err: fmt.Errorf(`ent: validator failed for field "WorkflowContractVersion.raw_body_format": %w`, err)} } @@ -113,30 +113,30 @@ func (wcvu *WorkflowContractVersionUpdate) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wcvu *WorkflowContractVersionUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractVersionUpdate { - wcvu.modifiers = append(wcvu.modifiers, modifiers...) - return wcvu +func (_u *WorkflowContractVersionUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractVersionUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wcvu *WorkflowContractVersionUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := wcvu.check(); err != nil { - return n, err +func (_u *WorkflowContractVersionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowcontractversion.Table, workflowcontractversion.Columns, sqlgraph.NewFieldSpec(workflowcontractversion.FieldID, field.TypeUUID)) - if ps := wcvu.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if wcvu.mutation.BodyCleared() { + if _u.mutation.BodyCleared() { _spec.ClearField(workflowcontractversion.FieldBody, field.TypeBytes) } - if value, ok := wcvu.mutation.RawBodyFormat(); ok { + if value, ok := _u.mutation.RawBodyFormat(); ok { _spec.SetField(workflowcontractversion.FieldRawBodyFormat, field.TypeEnum, value) } - if wcvu.mutation.ContractCleared() { + if _u.mutation.ContractCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -149,7 +149,7 @@ func (wcvu *WorkflowContractVersionUpdate) sqlSave(ctx context.Context) (n int, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcvu.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -165,8 +165,8 @@ func (wcvu *WorkflowContractVersionUpdate) sqlSave(ctx context.Context) (n int, } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wcvu.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, wcvu.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowcontractversion.Label} } else if sqlgraph.IsConstraintError(err) { @@ -174,8 +174,8 @@ func (wcvu *WorkflowContractVersionUpdate) sqlSave(ctx context.Context) (n int, } return 0, err } - wcvu.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // WorkflowContractVersionUpdateOne is the builder for updating a single WorkflowContractVersion entity. @@ -188,70 +188,70 @@ type WorkflowContractVersionUpdateOne struct { } // SetRawBodyFormat sets the "raw_body_format" field. -func (wcvuo *WorkflowContractVersionUpdateOne) SetRawBodyFormat(uf unmarshal.RawFormat) *WorkflowContractVersionUpdateOne { - wcvuo.mutation.SetRawBodyFormat(uf) - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) SetRawBodyFormat(v unmarshal.RawFormat) *WorkflowContractVersionUpdateOne { + _u.mutation.SetRawBodyFormat(v) + return _u } // SetNillableRawBodyFormat sets the "raw_body_format" field if the given value is not nil. -func (wcvuo *WorkflowContractVersionUpdateOne) SetNillableRawBodyFormat(uf *unmarshal.RawFormat) *WorkflowContractVersionUpdateOne { - if uf != nil { - wcvuo.SetRawBodyFormat(*uf) +func (_u *WorkflowContractVersionUpdateOne) SetNillableRawBodyFormat(v *unmarshal.RawFormat) *WorkflowContractVersionUpdateOne { + if v != nil { + _u.SetRawBodyFormat(*v) } - return wcvuo + return _u } // SetContractID sets the "contract" edge to the WorkflowContract entity by ID. -func (wcvuo *WorkflowContractVersionUpdateOne) SetContractID(id uuid.UUID) *WorkflowContractVersionUpdateOne { - wcvuo.mutation.SetContractID(id) - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) SetContractID(id uuid.UUID) *WorkflowContractVersionUpdateOne { + _u.mutation.SetContractID(id) + return _u } // SetNillableContractID sets the "contract" edge to the WorkflowContract entity by ID if the given value is not nil. -func (wcvuo *WorkflowContractVersionUpdateOne) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionUpdateOne { +func (_u *WorkflowContractVersionUpdateOne) SetNillableContractID(id *uuid.UUID) *WorkflowContractVersionUpdateOne { if id != nil { - wcvuo = wcvuo.SetContractID(*id) + _u = _u.SetContractID(*id) } - return wcvuo + return _u } // SetContract sets the "contract" edge to the WorkflowContract entity. -func (wcvuo *WorkflowContractVersionUpdateOne) SetContract(w *WorkflowContract) *WorkflowContractVersionUpdateOne { - return wcvuo.SetContractID(w.ID) +func (_u *WorkflowContractVersionUpdateOne) SetContract(v *WorkflowContract) *WorkflowContractVersionUpdateOne { + return _u.SetContractID(v.ID) } // Mutation returns the WorkflowContractVersionMutation object of the builder. -func (wcvuo *WorkflowContractVersionUpdateOne) Mutation() *WorkflowContractVersionMutation { - return wcvuo.mutation +func (_u *WorkflowContractVersionUpdateOne) Mutation() *WorkflowContractVersionMutation { + return _u.mutation } // ClearContract clears the "contract" edge to the WorkflowContract entity. -func (wcvuo *WorkflowContractVersionUpdateOne) ClearContract() *WorkflowContractVersionUpdateOne { - wcvuo.mutation.ClearContract() - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) ClearContract() *WorkflowContractVersionUpdateOne { + _u.mutation.ClearContract() + return _u } // Where appends a list predicates to the WorkflowContractVersionUpdate builder. -func (wcvuo *WorkflowContractVersionUpdateOne) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionUpdateOne { - wcvuo.mutation.Where(ps...) - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) Where(ps ...predicate.WorkflowContractVersion) *WorkflowContractVersionUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (wcvuo *WorkflowContractVersionUpdateOne) Select(field string, fields ...string) *WorkflowContractVersionUpdateOne { - wcvuo.fields = append([]string{field}, fields...) - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) Select(field string, fields ...string) *WorkflowContractVersionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated WorkflowContractVersion entity. -func (wcvuo *WorkflowContractVersionUpdateOne) Save(ctx context.Context) (*WorkflowContractVersion, error) { - return withHooks(ctx, wcvuo.sqlSave, wcvuo.mutation, wcvuo.hooks) +func (_u *WorkflowContractVersionUpdateOne) Save(ctx context.Context) (*WorkflowContractVersion, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wcvuo *WorkflowContractVersionUpdateOne) SaveX(ctx context.Context) *WorkflowContractVersion { - node, err := wcvuo.Save(ctx) +func (_u *WorkflowContractVersionUpdateOne) SaveX(ctx context.Context) *WorkflowContractVersion { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -259,21 +259,21 @@ func (wcvuo *WorkflowContractVersionUpdateOne) SaveX(ctx context.Context) *Workf } // Exec executes the query on the entity. -func (wcvuo *WorkflowContractVersionUpdateOne) Exec(ctx context.Context) error { - _, err := wcvuo.Save(ctx) +func (_u *WorkflowContractVersionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wcvuo *WorkflowContractVersionUpdateOne) ExecX(ctx context.Context) { - if err := wcvuo.Exec(ctx); err != nil { +func (_u *WorkflowContractVersionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wcvuo *WorkflowContractVersionUpdateOne) check() error { - if v, ok := wcvuo.mutation.RawBodyFormat(); ok { +func (_u *WorkflowContractVersionUpdateOne) check() error { + if v, ok := _u.mutation.RawBodyFormat(); ok { if err := workflowcontractversion.RawBodyFormatValidator(v); err != nil { return &ValidationError{Name: "raw_body_format", err: fmt.Errorf(`ent: validator failed for field "WorkflowContractVersion.raw_body_format": %w`, err)} } @@ -282,22 +282,22 @@ func (wcvuo *WorkflowContractVersionUpdateOne) check() error { } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wcvuo *WorkflowContractVersionUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractVersionUpdateOne { - wcvuo.modifiers = append(wcvuo.modifiers, modifiers...) - return wcvuo +func (_u *WorkflowContractVersionUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowContractVersionUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wcvuo *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowContractVersion, err error) { - if err := wcvuo.check(); err != nil { +func (_u *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowContractVersion, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowcontractversion.Table, workflowcontractversion.Columns, sqlgraph.NewFieldSpec(workflowcontractversion.FieldID, field.TypeUUID)) - id, ok := wcvuo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "WorkflowContractVersion.id" for update`)} } _spec.Node.ID.Value = id - if fields := wcvuo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowcontractversion.FieldID) for _, f := range fields { @@ -309,20 +309,20 @@ func (wcvuo *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_no } } } - if ps := wcvuo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if wcvuo.mutation.BodyCleared() { + if _u.mutation.BodyCleared() { _spec.ClearField(workflowcontractversion.FieldBody, field.TypeBytes) } - if value, ok := wcvuo.mutation.RawBodyFormat(); ok { + if value, ok := _u.mutation.RawBodyFormat(); ok { _spec.SetField(workflowcontractversion.FieldRawBodyFormat, field.TypeEnum, value) } - if wcvuo.mutation.ContractCleared() { + if _u.mutation.ContractCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -335,7 +335,7 @@ func (wcvuo *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_no } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wcvuo.mutation.ContractIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -351,11 +351,11 @@ func (wcvuo *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_no } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wcvuo.modifiers...) - _node = &WorkflowContractVersion{config: wcvuo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &WorkflowContractVersion{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, wcvuo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowcontractversion.Label} } else if sqlgraph.IsConstraintError(err) { @@ -363,6 +363,6 @@ func (wcvuo *WorkflowContractVersionUpdateOne) sqlSave(ctx context.Context) (_no } return nil, err } - wcvuo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/data/ent/workflowrun.go b/app/controlplane/pkg/data/ent/workflowrun.go index 565e34443..0e4db9786 100644 --- a/app/controlplane/pkg/data/ent/workflowrun.go +++ b/app/controlplane/pkg/data/ent/workflowrun.go @@ -158,7 +158,7 @@ func (*WorkflowRun) scanValues(columns []string) ([]any, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the WorkflowRun fields. -func (wr *WorkflowRun) assignValues(columns []string, values []any) error { +func (_m *WorkflowRun) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -168,49 +168,49 @@ func (wr *WorkflowRun) assignValues(columns []string, values []any) error { if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) } else if value != nil { - wr.ID = *value + _m.ID = *value } case workflowrun.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - wr.CreatedAt = value.Time + _m.CreatedAt = value.Time } case workflowrun.FieldFinishedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field finished_at", values[i]) } else if value.Valid { - wr.FinishedAt = value.Time + _m.FinishedAt = value.Time } case workflowrun.FieldState: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field state", values[i]) } else if value.Valid { - wr.State = biz.WorkflowRunStatus(value.String) + _m.State = biz.WorkflowRunStatus(value.String) } case workflowrun.FieldReason: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field reason", values[i]) } else if value.Valid { - wr.Reason = value.String + _m.Reason = value.String } case workflowrun.FieldRunURL: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field run_url", values[i]) } else if value.Valid { - wr.RunURL = value.String + _m.RunURL = value.String } case workflowrun.FieldRunnerType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field runner_type", values[i]) } else if value.Valid { - wr.RunnerType = value.String + _m.RunnerType = value.String } case workflowrun.FieldAttestation: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field attestation", values[i]) } else if value != nil && len(*value) > 0 { - if err := json.Unmarshal(*value, &wr.Attestation); err != nil { + if err := json.Unmarshal(*value, &_m.Attestation); err != nil { return fmt.Errorf("unmarshal field attestation: %w", err) } } @@ -218,54 +218,54 @@ func (wr *WorkflowRun) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field attestation_digest", values[i]) } else if value.Valid { - wr.AttestationDigest = value.String + _m.AttestationDigest = value.String } case workflowrun.FieldAttestationState: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field attestation_state", values[i]) } else if value != nil { - wr.AttestationState = *value + _m.AttestationState = *value } case workflowrun.FieldContractRevisionUsed: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field contract_revision_used", values[i]) } else if value.Valid { - wr.ContractRevisionUsed = int(value.Int64) + _m.ContractRevisionUsed = int(value.Int64) } case workflowrun.FieldContractRevisionLatest: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field contract_revision_latest", values[i]) } else if value.Valid { - wr.ContractRevisionLatest = int(value.Int64) + _m.ContractRevisionLatest = int(value.Int64) } case workflowrun.FieldVersionID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field version_id", values[i]) } else if value != nil { - wr.VersionID = *value + _m.VersionID = *value } case workflowrun.FieldWorkflowID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field workflow_id", values[i]) } else if value != nil { - wr.WorkflowID = *value + _m.WorkflowID = *value } case workflowrun.FieldHasPolicyViolations: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field has_policy_violations", values[i]) } else if value.Valid { - wr.HasPolicyViolations = new(bool) - *wr.HasPolicyViolations = value.Bool + _m.HasPolicyViolations = new(bool) + *_m.HasPolicyViolations = value.Bool } case workflowrun.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field workflow_run_contract_version", values[i]) } else if value.Valid { - wr.workflow_run_contract_version = new(uuid.UUID) - *wr.workflow_run_contract_version = *value.S.(*uuid.UUID) + _m.workflow_run_contract_version = new(uuid.UUID) + *_m.workflow_run_contract_version = *value.S.(*uuid.UUID) } default: - wr.selectValues.Set(columns[i], values[i]) + _m.selectValues.Set(columns[i], values[i]) } } return nil @@ -273,98 +273,98 @@ func (wr *WorkflowRun) assignValues(columns []string, values []any) error { // Value returns the ent.Value that was dynamically selected and assigned to the WorkflowRun. // This includes values selected through modifiers, order, etc. -func (wr *WorkflowRun) Value(name string) (ent.Value, error) { - return wr.selectValues.Get(name) +func (_m *WorkflowRun) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) } // QueryWorkflow queries the "workflow" edge of the WorkflowRun entity. -func (wr *WorkflowRun) QueryWorkflow() *WorkflowQuery { - return NewWorkflowRunClient(wr.config).QueryWorkflow(wr) +func (_m *WorkflowRun) QueryWorkflow() *WorkflowQuery { + return NewWorkflowRunClient(_m.config).QueryWorkflow(_m) } // QueryContractVersion queries the "contract_version" edge of the WorkflowRun entity. -func (wr *WorkflowRun) QueryContractVersion() *WorkflowContractVersionQuery { - return NewWorkflowRunClient(wr.config).QueryContractVersion(wr) +func (_m *WorkflowRun) QueryContractVersion() *WorkflowContractVersionQuery { + return NewWorkflowRunClient(_m.config).QueryContractVersion(_m) } // QueryCasBackends queries the "cas_backends" edge of the WorkflowRun entity. -func (wr *WorkflowRun) QueryCasBackends() *CASBackendQuery { - return NewWorkflowRunClient(wr.config).QueryCasBackends(wr) +func (_m *WorkflowRun) QueryCasBackends() *CASBackendQuery { + return NewWorkflowRunClient(_m.config).QueryCasBackends(_m) } // QueryVersion queries the "version" edge of the WorkflowRun entity. -func (wr *WorkflowRun) QueryVersion() *ProjectVersionQuery { - return NewWorkflowRunClient(wr.config).QueryVersion(wr) +func (_m *WorkflowRun) QueryVersion() *ProjectVersionQuery { + return NewWorkflowRunClient(_m.config).QueryVersion(_m) } // QueryAttestationBundle queries the "attestation_bundle" edge of the WorkflowRun entity. -func (wr *WorkflowRun) QueryAttestationBundle() *AttestationQuery { - return NewWorkflowRunClient(wr.config).QueryAttestationBundle(wr) +func (_m *WorkflowRun) QueryAttestationBundle() *AttestationQuery { + return NewWorkflowRunClient(_m.config).QueryAttestationBundle(_m) } // Update returns a builder for updating this WorkflowRun. // Note that you need to call WorkflowRun.Unwrap() before calling this method if this WorkflowRun // was returned from a transaction, and the transaction was committed or rolled back. -func (wr *WorkflowRun) Update() *WorkflowRunUpdateOne { - return NewWorkflowRunClient(wr.config).UpdateOne(wr) +func (_m *WorkflowRun) Update() *WorkflowRunUpdateOne { + return NewWorkflowRunClient(_m.config).UpdateOne(_m) } // Unwrap unwraps the WorkflowRun entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (wr *WorkflowRun) Unwrap() *WorkflowRun { - _tx, ok := wr.config.driver.(*txDriver) +func (_m *WorkflowRun) Unwrap() *WorkflowRun { + _tx, ok := _m.config.driver.(*txDriver) if !ok { panic("ent: WorkflowRun is not a transactional entity") } - wr.config.driver = _tx.drv - return wr + _m.config.driver = _tx.drv + return _m } // String implements the fmt.Stringer. -func (wr *WorkflowRun) String() string { +func (_m *WorkflowRun) String() string { var builder strings.Builder builder.WriteString("WorkflowRun(") - builder.WriteString(fmt.Sprintf("id=%v, ", wr.ID)) + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) builder.WriteString("created_at=") - builder.WriteString(wr.CreatedAt.Format(time.ANSIC)) + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("finished_at=") - builder.WriteString(wr.FinishedAt.Format(time.ANSIC)) + builder.WriteString(_m.FinishedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("state=") - builder.WriteString(fmt.Sprintf("%v", wr.State)) + builder.WriteString(fmt.Sprintf("%v", _m.State)) builder.WriteString(", ") builder.WriteString("reason=") - builder.WriteString(wr.Reason) + builder.WriteString(_m.Reason) builder.WriteString(", ") builder.WriteString("run_url=") - builder.WriteString(wr.RunURL) + builder.WriteString(_m.RunURL) builder.WriteString(", ") builder.WriteString("runner_type=") - builder.WriteString(wr.RunnerType) + builder.WriteString(_m.RunnerType) builder.WriteString(", ") builder.WriteString("attestation=") - builder.WriteString(fmt.Sprintf("%v", wr.Attestation)) + builder.WriteString(fmt.Sprintf("%v", _m.Attestation)) builder.WriteString(", ") builder.WriteString("attestation_digest=") - builder.WriteString(wr.AttestationDigest) + builder.WriteString(_m.AttestationDigest) builder.WriteString(", ") builder.WriteString("attestation_state=") - builder.WriteString(fmt.Sprintf("%v", wr.AttestationState)) + builder.WriteString(fmt.Sprintf("%v", _m.AttestationState)) builder.WriteString(", ") builder.WriteString("contract_revision_used=") - builder.WriteString(fmt.Sprintf("%v", wr.ContractRevisionUsed)) + builder.WriteString(fmt.Sprintf("%v", _m.ContractRevisionUsed)) builder.WriteString(", ") builder.WriteString("contract_revision_latest=") - builder.WriteString(fmt.Sprintf("%v", wr.ContractRevisionLatest)) + builder.WriteString(fmt.Sprintf("%v", _m.ContractRevisionLatest)) builder.WriteString(", ") builder.WriteString("version_id=") - builder.WriteString(fmt.Sprintf("%v", wr.VersionID)) + builder.WriteString(fmt.Sprintf("%v", _m.VersionID)) builder.WriteString(", ") builder.WriteString("workflow_id=") - builder.WriteString(fmt.Sprintf("%v", wr.WorkflowID)) + builder.WriteString(fmt.Sprintf("%v", _m.WorkflowID)) builder.WriteString(", ") - if v := wr.HasPolicyViolations; v != nil { + if v := _m.HasPolicyViolations; v != nil { builder.WriteString("has_policy_violations=") builder.WriteString(fmt.Sprintf("%v", *v)) } diff --git a/app/controlplane/pkg/data/ent/workflowrun_create.go b/app/controlplane/pkg/data/ent/workflowrun_create.go index 2764210cb..2199ebdf2 100644 --- a/app/controlplane/pkg/data/ent/workflowrun_create.go +++ b/app/controlplane/pkg/data/ent/workflowrun_create.go @@ -32,244 +32,244 @@ type WorkflowRunCreate struct { } // SetCreatedAt sets the "created_at" field. -func (wrc *WorkflowRunCreate) SetCreatedAt(t time.Time) *WorkflowRunCreate { - wrc.mutation.SetCreatedAt(t) - return wrc +func (_c *WorkflowRunCreate) SetCreatedAt(v time.Time) *WorkflowRunCreate { + _c.mutation.SetCreatedAt(v) + return _c } // SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableCreatedAt(t *time.Time) *WorkflowRunCreate { - if t != nil { - wrc.SetCreatedAt(*t) +func (_c *WorkflowRunCreate) SetNillableCreatedAt(v *time.Time) *WorkflowRunCreate { + if v != nil { + _c.SetCreatedAt(*v) } - return wrc + return _c } // SetFinishedAt sets the "finished_at" field. -func (wrc *WorkflowRunCreate) SetFinishedAt(t time.Time) *WorkflowRunCreate { - wrc.mutation.SetFinishedAt(t) - return wrc +func (_c *WorkflowRunCreate) SetFinishedAt(v time.Time) *WorkflowRunCreate { + _c.mutation.SetFinishedAt(v) + return _c } // SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableFinishedAt(t *time.Time) *WorkflowRunCreate { - if t != nil { - wrc.SetFinishedAt(*t) +func (_c *WorkflowRunCreate) SetNillableFinishedAt(v *time.Time) *WorkflowRunCreate { + if v != nil { + _c.SetFinishedAt(*v) } - return wrc + return _c } // SetState sets the "state" field. -func (wrc *WorkflowRunCreate) SetState(brs biz.WorkflowRunStatus) *WorkflowRunCreate { - wrc.mutation.SetState(brs) - return wrc +func (_c *WorkflowRunCreate) SetState(v biz.WorkflowRunStatus) *WorkflowRunCreate { + _c.mutation.SetState(v) + return _c } // SetNillableState sets the "state" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableState(brs *biz.WorkflowRunStatus) *WorkflowRunCreate { - if brs != nil { - wrc.SetState(*brs) +func (_c *WorkflowRunCreate) SetNillableState(v *biz.WorkflowRunStatus) *WorkflowRunCreate { + if v != nil { + _c.SetState(*v) } - return wrc + return _c } // SetReason sets the "reason" field. -func (wrc *WorkflowRunCreate) SetReason(s string) *WorkflowRunCreate { - wrc.mutation.SetReason(s) - return wrc +func (_c *WorkflowRunCreate) SetReason(v string) *WorkflowRunCreate { + _c.mutation.SetReason(v) + return _c } // SetNillableReason sets the "reason" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableReason(s *string) *WorkflowRunCreate { - if s != nil { - wrc.SetReason(*s) +func (_c *WorkflowRunCreate) SetNillableReason(v *string) *WorkflowRunCreate { + if v != nil { + _c.SetReason(*v) } - return wrc + return _c } // SetRunURL sets the "run_url" field. -func (wrc *WorkflowRunCreate) SetRunURL(s string) *WorkflowRunCreate { - wrc.mutation.SetRunURL(s) - return wrc +func (_c *WorkflowRunCreate) SetRunURL(v string) *WorkflowRunCreate { + _c.mutation.SetRunURL(v) + return _c } // SetNillableRunURL sets the "run_url" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableRunURL(s *string) *WorkflowRunCreate { - if s != nil { - wrc.SetRunURL(*s) +func (_c *WorkflowRunCreate) SetNillableRunURL(v *string) *WorkflowRunCreate { + if v != nil { + _c.SetRunURL(*v) } - return wrc + return _c } // SetRunnerType sets the "runner_type" field. -func (wrc *WorkflowRunCreate) SetRunnerType(s string) *WorkflowRunCreate { - wrc.mutation.SetRunnerType(s) - return wrc +func (_c *WorkflowRunCreate) SetRunnerType(v string) *WorkflowRunCreate { + _c.mutation.SetRunnerType(v) + return _c } // SetNillableRunnerType sets the "runner_type" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableRunnerType(s *string) *WorkflowRunCreate { - if s != nil { - wrc.SetRunnerType(*s) +func (_c *WorkflowRunCreate) SetNillableRunnerType(v *string) *WorkflowRunCreate { + if v != nil { + _c.SetRunnerType(*v) } - return wrc + return _c } // SetAttestation sets the "attestation" field. -func (wrc *WorkflowRunCreate) SetAttestation(d *dsse.Envelope) *WorkflowRunCreate { - wrc.mutation.SetAttestation(d) - return wrc +func (_c *WorkflowRunCreate) SetAttestation(v *dsse.Envelope) *WorkflowRunCreate { + _c.mutation.SetAttestation(v) + return _c } // SetAttestationDigest sets the "attestation_digest" field. -func (wrc *WorkflowRunCreate) SetAttestationDigest(s string) *WorkflowRunCreate { - wrc.mutation.SetAttestationDigest(s) - return wrc +func (_c *WorkflowRunCreate) SetAttestationDigest(v string) *WorkflowRunCreate { + _c.mutation.SetAttestationDigest(v) + return _c } // SetNillableAttestationDigest sets the "attestation_digest" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableAttestationDigest(s *string) *WorkflowRunCreate { - if s != nil { - wrc.SetAttestationDigest(*s) +func (_c *WorkflowRunCreate) SetNillableAttestationDigest(v *string) *WorkflowRunCreate { + if v != nil { + _c.SetAttestationDigest(*v) } - return wrc + return _c } // SetAttestationState sets the "attestation_state" field. -func (wrc *WorkflowRunCreate) SetAttestationState(b []byte) *WorkflowRunCreate { - wrc.mutation.SetAttestationState(b) - return wrc +func (_c *WorkflowRunCreate) SetAttestationState(v []byte) *WorkflowRunCreate { + _c.mutation.SetAttestationState(v) + return _c } // SetContractRevisionUsed sets the "contract_revision_used" field. -func (wrc *WorkflowRunCreate) SetContractRevisionUsed(i int) *WorkflowRunCreate { - wrc.mutation.SetContractRevisionUsed(i) - return wrc +func (_c *WorkflowRunCreate) SetContractRevisionUsed(v int) *WorkflowRunCreate { + _c.mutation.SetContractRevisionUsed(v) + return _c } // SetContractRevisionLatest sets the "contract_revision_latest" field. -func (wrc *WorkflowRunCreate) SetContractRevisionLatest(i int) *WorkflowRunCreate { - wrc.mutation.SetContractRevisionLatest(i) - return wrc +func (_c *WorkflowRunCreate) SetContractRevisionLatest(v int) *WorkflowRunCreate { + _c.mutation.SetContractRevisionLatest(v) + return _c } // SetVersionID sets the "version_id" field. -func (wrc *WorkflowRunCreate) SetVersionID(u uuid.UUID) *WorkflowRunCreate { - wrc.mutation.SetVersionID(u) - return wrc +func (_c *WorkflowRunCreate) SetVersionID(v uuid.UUID) *WorkflowRunCreate { + _c.mutation.SetVersionID(v) + return _c } // SetWorkflowID sets the "workflow_id" field. -func (wrc *WorkflowRunCreate) SetWorkflowID(u uuid.UUID) *WorkflowRunCreate { - wrc.mutation.SetWorkflowID(u) - return wrc +func (_c *WorkflowRunCreate) SetWorkflowID(v uuid.UUID) *WorkflowRunCreate { + _c.mutation.SetWorkflowID(v) + return _c } // SetHasPolicyViolations sets the "has_policy_violations" field. -func (wrc *WorkflowRunCreate) SetHasPolicyViolations(b bool) *WorkflowRunCreate { - wrc.mutation.SetHasPolicyViolations(b) - return wrc +func (_c *WorkflowRunCreate) SetHasPolicyViolations(v bool) *WorkflowRunCreate { + _c.mutation.SetHasPolicyViolations(v) + return _c } // SetNillableHasPolicyViolations sets the "has_policy_violations" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableHasPolicyViolations(b *bool) *WorkflowRunCreate { - if b != nil { - wrc.SetHasPolicyViolations(*b) +func (_c *WorkflowRunCreate) SetNillableHasPolicyViolations(v *bool) *WorkflowRunCreate { + if v != nil { + _c.SetHasPolicyViolations(*v) } - return wrc + return _c } // SetID sets the "id" field. -func (wrc *WorkflowRunCreate) SetID(u uuid.UUID) *WorkflowRunCreate { - wrc.mutation.SetID(u) - return wrc +func (_c *WorkflowRunCreate) SetID(v uuid.UUID) *WorkflowRunCreate { + _c.mutation.SetID(v) + return _c } // SetNillableID sets the "id" field if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableID(u *uuid.UUID) *WorkflowRunCreate { - if u != nil { - wrc.SetID(*u) +func (_c *WorkflowRunCreate) SetNillableID(v *uuid.UUID) *WorkflowRunCreate { + if v != nil { + _c.SetID(*v) } - return wrc + return _c } // SetWorkflow sets the "workflow" edge to the Workflow entity. -func (wrc *WorkflowRunCreate) SetWorkflow(w *Workflow) *WorkflowRunCreate { - return wrc.SetWorkflowID(w.ID) +func (_c *WorkflowRunCreate) SetWorkflow(v *Workflow) *WorkflowRunCreate { + return _c.SetWorkflowID(v.ID) } // SetContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID. -func (wrc *WorkflowRunCreate) SetContractVersionID(id uuid.UUID) *WorkflowRunCreate { - wrc.mutation.SetContractVersionID(id) - return wrc +func (_c *WorkflowRunCreate) SetContractVersionID(id uuid.UUID) *WorkflowRunCreate { + _c.mutation.SetContractVersionID(id) + return _c } // SetNillableContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunCreate { +func (_c *WorkflowRunCreate) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunCreate { if id != nil { - wrc = wrc.SetContractVersionID(*id) + _c = _c.SetContractVersionID(*id) } - return wrc + return _c } // SetContractVersion sets the "contract_version" edge to the WorkflowContractVersion entity. -func (wrc *WorkflowRunCreate) SetContractVersion(w *WorkflowContractVersion) *WorkflowRunCreate { - return wrc.SetContractVersionID(w.ID) +func (_c *WorkflowRunCreate) SetContractVersion(v *WorkflowContractVersion) *WorkflowRunCreate { + return _c.SetContractVersionID(v.ID) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (wrc *WorkflowRunCreate) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunCreate { - wrc.mutation.AddCasBackendIDs(ids...) - return wrc +func (_c *WorkflowRunCreate) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunCreate { + _c.mutation.AddCasBackendIDs(ids...) + return _c } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (wrc *WorkflowRunCreate) AddCasBackends(c ...*CASBackend) *WorkflowRunCreate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_c *WorkflowRunCreate) AddCasBackends(v ...*CASBackend) *WorkflowRunCreate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wrc.AddCasBackendIDs(ids...) + return _c.AddCasBackendIDs(ids...) } // SetVersion sets the "version" edge to the ProjectVersion entity. -func (wrc *WorkflowRunCreate) SetVersion(p *ProjectVersion) *WorkflowRunCreate { - return wrc.SetVersionID(p.ID) +func (_c *WorkflowRunCreate) SetVersion(v *ProjectVersion) *WorkflowRunCreate { + return _c.SetVersionID(v.ID) } // SetAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID. -func (wrc *WorkflowRunCreate) SetAttestationBundleID(id uuid.UUID) *WorkflowRunCreate { - wrc.mutation.SetAttestationBundleID(id) - return wrc +func (_c *WorkflowRunCreate) SetAttestationBundleID(id uuid.UUID) *WorkflowRunCreate { + _c.mutation.SetAttestationBundleID(id) + return _c } // SetNillableAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID if the given value is not nil. -func (wrc *WorkflowRunCreate) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunCreate { +func (_c *WorkflowRunCreate) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunCreate { if id != nil { - wrc = wrc.SetAttestationBundleID(*id) + _c = _c.SetAttestationBundleID(*id) } - return wrc + return _c } // SetAttestationBundle sets the "attestation_bundle" edge to the Attestation entity. -func (wrc *WorkflowRunCreate) SetAttestationBundle(a *Attestation) *WorkflowRunCreate { - return wrc.SetAttestationBundleID(a.ID) +func (_c *WorkflowRunCreate) SetAttestationBundle(v *Attestation) *WorkflowRunCreate { + return _c.SetAttestationBundleID(v.ID) } // Mutation returns the WorkflowRunMutation object of the builder. -func (wrc *WorkflowRunCreate) Mutation() *WorkflowRunMutation { - return wrc.mutation +func (_c *WorkflowRunCreate) Mutation() *WorkflowRunMutation { + return _c.mutation } // Save creates the WorkflowRun in the database. -func (wrc *WorkflowRunCreate) Save(ctx context.Context) (*WorkflowRun, error) { - wrc.defaults() - return withHooks(ctx, wrc.sqlSave, wrc.mutation, wrc.hooks) +func (_c *WorkflowRunCreate) Save(ctx context.Context) (*WorkflowRun, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } // SaveX calls Save and panics if Save returns an error. -func (wrc *WorkflowRunCreate) SaveX(ctx context.Context) *WorkflowRun { - v, err := wrc.Save(ctx) +func (_c *WorkflowRunCreate) SaveX(ctx context.Context) *WorkflowRun { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -277,74 +277,74 @@ func (wrc *WorkflowRunCreate) SaveX(ctx context.Context) *WorkflowRun { } // Exec executes the query. -func (wrc *WorkflowRunCreate) Exec(ctx context.Context) error { - _, err := wrc.Save(ctx) +func (_c *WorkflowRunCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wrc *WorkflowRunCreate) ExecX(ctx context.Context) { - if err := wrc.Exec(ctx); err != nil { +func (_c *WorkflowRunCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. -func (wrc *WorkflowRunCreate) defaults() { - if _, ok := wrc.mutation.CreatedAt(); !ok { +func (_c *WorkflowRunCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { v := workflowrun.DefaultCreatedAt() - wrc.mutation.SetCreatedAt(v) + _c.mutation.SetCreatedAt(v) } - if _, ok := wrc.mutation.State(); !ok { + if _, ok := _c.mutation.State(); !ok { v := workflowrun.DefaultState - wrc.mutation.SetState(v) + _c.mutation.SetState(v) } - if _, ok := wrc.mutation.ID(); !ok { + if _, ok := _c.mutation.ID(); !ok { v := workflowrun.DefaultID() - wrc.mutation.SetID(v) + _c.mutation.SetID(v) } } // check runs all checks and user-defined validators on the builder. -func (wrc *WorkflowRunCreate) check() error { - if _, ok := wrc.mutation.CreatedAt(); !ok { +func (_c *WorkflowRunCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "WorkflowRun.created_at"`)} } - if _, ok := wrc.mutation.State(); !ok { + if _, ok := _c.mutation.State(); !ok { return &ValidationError{Name: "state", err: errors.New(`ent: missing required field "WorkflowRun.state"`)} } - if v, ok := wrc.mutation.State(); ok { + if v, ok := _c.mutation.State(); ok { if err := workflowrun.StateValidator(v); err != nil { return &ValidationError{Name: "state", err: fmt.Errorf(`ent: validator failed for field "WorkflowRun.state": %w`, err)} } } - if _, ok := wrc.mutation.ContractRevisionUsed(); !ok { + if _, ok := _c.mutation.ContractRevisionUsed(); !ok { return &ValidationError{Name: "contract_revision_used", err: errors.New(`ent: missing required field "WorkflowRun.contract_revision_used"`)} } - if _, ok := wrc.mutation.ContractRevisionLatest(); !ok { + if _, ok := _c.mutation.ContractRevisionLatest(); !ok { return &ValidationError{Name: "contract_revision_latest", err: errors.New(`ent: missing required field "WorkflowRun.contract_revision_latest"`)} } - if _, ok := wrc.mutation.VersionID(); !ok { + if _, ok := _c.mutation.VersionID(); !ok { return &ValidationError{Name: "version_id", err: errors.New(`ent: missing required field "WorkflowRun.version_id"`)} } - if _, ok := wrc.mutation.WorkflowID(); !ok { + if _, ok := _c.mutation.WorkflowID(); !ok { return &ValidationError{Name: "workflow_id", err: errors.New(`ent: missing required field "WorkflowRun.workflow_id"`)} } - if len(wrc.mutation.WorkflowIDs()) == 0 { + if len(_c.mutation.WorkflowIDs()) == 0 { return &ValidationError{Name: "workflow", err: errors.New(`ent: missing required edge "WorkflowRun.workflow"`)} } - if len(wrc.mutation.VersionIDs()) == 0 { + if len(_c.mutation.VersionIDs()) == 0 { return &ValidationError{Name: "version", err: errors.New(`ent: missing required edge "WorkflowRun.version"`)} } return nil } -func (wrc *WorkflowRunCreate) sqlSave(ctx context.Context) (*WorkflowRun, error) { - if err := wrc.check(); err != nil { +func (_c *WorkflowRunCreate) sqlSave(ctx context.Context) (*WorkflowRun, error) { + if err := _c.check(); err != nil { return nil, err } - _node, _spec := wrc.createSpec() - if err := sqlgraph.CreateNode(ctx, wrc.driver, _spec); err != nil { + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -357,70 +357,70 @@ func (wrc *WorkflowRunCreate) sqlSave(ctx context.Context) (*WorkflowRun, error) return nil, err } } - wrc.mutation.id = &_node.ID - wrc.mutation.done = true + _c.mutation.id = &_node.ID + _c.mutation.done = true return _node, nil } -func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) { +func (_c *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) { var ( - _node = &WorkflowRun{config: wrc.config} + _node = &WorkflowRun{config: _c.config} _spec = sqlgraph.NewCreateSpec(workflowrun.Table, sqlgraph.NewFieldSpec(workflowrun.FieldID, field.TypeUUID)) ) - _spec.OnConflict = wrc.conflict - if id, ok := wrc.mutation.ID(); ok { + _spec.OnConflict = _c.conflict + if id, ok := _c.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := wrc.mutation.CreatedAt(); ok { + if value, ok := _c.mutation.CreatedAt(); ok { _spec.SetField(workflowrun.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if value, ok := wrc.mutation.FinishedAt(); ok { + if value, ok := _c.mutation.FinishedAt(); ok { _spec.SetField(workflowrun.FieldFinishedAt, field.TypeTime, value) _node.FinishedAt = value } - if value, ok := wrc.mutation.State(); ok { + if value, ok := _c.mutation.State(); ok { _spec.SetField(workflowrun.FieldState, field.TypeEnum, value) _node.State = value } - if value, ok := wrc.mutation.Reason(); ok { + if value, ok := _c.mutation.Reason(); ok { _spec.SetField(workflowrun.FieldReason, field.TypeString, value) _node.Reason = value } - if value, ok := wrc.mutation.RunURL(); ok { + if value, ok := _c.mutation.RunURL(); ok { _spec.SetField(workflowrun.FieldRunURL, field.TypeString, value) _node.RunURL = value } - if value, ok := wrc.mutation.RunnerType(); ok { + if value, ok := _c.mutation.RunnerType(); ok { _spec.SetField(workflowrun.FieldRunnerType, field.TypeString, value) _node.RunnerType = value } - if value, ok := wrc.mutation.Attestation(); ok { + if value, ok := _c.mutation.Attestation(); ok { _spec.SetField(workflowrun.FieldAttestation, field.TypeJSON, value) _node.Attestation = value } - if value, ok := wrc.mutation.AttestationDigest(); ok { + if value, ok := _c.mutation.AttestationDigest(); ok { _spec.SetField(workflowrun.FieldAttestationDigest, field.TypeString, value) _node.AttestationDigest = value } - if value, ok := wrc.mutation.AttestationState(); ok { + if value, ok := _c.mutation.AttestationState(); ok { _spec.SetField(workflowrun.FieldAttestationState, field.TypeBytes, value) _node.AttestationState = value } - if value, ok := wrc.mutation.ContractRevisionUsed(); ok { + if value, ok := _c.mutation.ContractRevisionUsed(); ok { _spec.SetField(workflowrun.FieldContractRevisionUsed, field.TypeInt, value) _node.ContractRevisionUsed = value } - if value, ok := wrc.mutation.ContractRevisionLatest(); ok { + if value, ok := _c.mutation.ContractRevisionLatest(); ok { _spec.SetField(workflowrun.FieldContractRevisionLatest, field.TypeInt, value) _node.ContractRevisionLatest = value } - if value, ok := wrc.mutation.HasPolicyViolations(); ok { + if value, ok := _c.mutation.HasPolicyViolations(); ok { _spec.SetField(workflowrun.FieldHasPolicyViolations, field.TypeBool, value) _node.HasPolicyViolations = &value } - if nodes := wrc.mutation.WorkflowIDs(); len(nodes) > 0 { + if nodes := _c.mutation.WorkflowIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -437,7 +437,7 @@ func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) _node.WorkflowID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wrc.mutation.ContractVersionIDs(); len(nodes) > 0 { + if nodes := _c.mutation.ContractVersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -454,7 +454,7 @@ func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) _node.workflow_run_contract_version = &nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wrc.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _c.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -470,7 +470,7 @@ func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) } _spec.Edges = append(_spec.Edges, edge) } - if nodes := wrc.mutation.VersionIDs(); len(nodes) > 0 { + if nodes := _c.mutation.VersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -487,7 +487,7 @@ func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) _node.VersionID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } - if nodes := wrc.mutation.AttestationBundleIDs(); len(nodes) > 0 { + if nodes := _c.mutation.AttestationBundleIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: false, @@ -522,10 +522,10 @@ func (wrc *WorkflowRunCreate) createSpec() (*WorkflowRun, *sqlgraph.CreateSpec) // SetCreatedAt(v+v). // }). // Exec(ctx) -func (wrc *WorkflowRunCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowRunUpsertOne { - wrc.conflict = opts +func (_c *WorkflowRunCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowRunUpsertOne { + _c.conflict = opts return &WorkflowRunUpsertOne{ - create: wrc, + create: _c, } } @@ -535,10 +535,10 @@ func (wrc *WorkflowRunCreate) OnConflict(opts ...sql.ConflictOption) *WorkflowRu // client.WorkflowRun.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wrc *WorkflowRunCreate) OnConflictColumns(columns ...string) *WorkflowRunUpsertOne { - wrc.conflict = append(wrc.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowRunCreate) OnConflictColumns(columns ...string) *WorkflowRunUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowRunUpsertOne{ - create: wrc, + create: _c, } } @@ -1098,16 +1098,16 @@ type WorkflowRunCreateBulk struct { } // Save creates the WorkflowRun entities in the database. -func (wrcb *WorkflowRunCreateBulk) Save(ctx context.Context) ([]*WorkflowRun, error) { - if wrcb.err != nil { - return nil, wrcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(wrcb.builders)) - nodes := make([]*WorkflowRun, len(wrcb.builders)) - mutators := make([]Mutator, len(wrcb.builders)) - for i := range wrcb.builders { +func (_c *WorkflowRunCreateBulk) Save(ctx context.Context) ([]*WorkflowRun, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*WorkflowRun, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { func(i int, root context.Context) { - builder := wrcb.builders[i] + builder := _c.builders[i] builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*WorkflowRunMutation) @@ -1121,12 +1121,12 @@ func (wrcb *WorkflowRunCreateBulk) Save(ctx context.Context) ([]*WorkflowRun, er var err error nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, wrcb.builders[i+1].mutation) + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) } else { spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - spec.OnConflict = wrcb.conflict + spec.OnConflict = _c.conflict // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, wrcb.driver, spec); err != nil { + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } @@ -1146,7 +1146,7 @@ func (wrcb *WorkflowRunCreateBulk) Save(ctx context.Context) ([]*WorkflowRun, er }(i, ctx) } if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, wrcb.builders[0].mutation); err != nil { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { return nil, err } } @@ -1154,8 +1154,8 @@ func (wrcb *WorkflowRunCreateBulk) Save(ctx context.Context) ([]*WorkflowRun, er } // SaveX is like Save, but panics if an error occurs. -func (wrcb *WorkflowRunCreateBulk) SaveX(ctx context.Context) []*WorkflowRun { - v, err := wrcb.Save(ctx) +func (_c *WorkflowRunCreateBulk) SaveX(ctx context.Context) []*WorkflowRun { + v, err := _c.Save(ctx) if err != nil { panic(err) } @@ -1163,14 +1163,14 @@ func (wrcb *WorkflowRunCreateBulk) SaveX(ctx context.Context) []*WorkflowRun { } // Exec executes the query. -func (wrcb *WorkflowRunCreateBulk) Exec(ctx context.Context) error { - _, err := wrcb.Save(ctx) +func (_c *WorkflowRunCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wrcb *WorkflowRunCreateBulk) ExecX(ctx context.Context) { - if err := wrcb.Exec(ctx); err != nil { +func (_c *WorkflowRunCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { panic(err) } } @@ -1190,10 +1190,10 @@ func (wrcb *WorkflowRunCreateBulk) ExecX(ctx context.Context) { // SetCreatedAt(v+v). // }). // Exec(ctx) -func (wrcb *WorkflowRunCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowRunUpsertBulk { - wrcb.conflict = opts +func (_c *WorkflowRunCreateBulk) OnConflict(opts ...sql.ConflictOption) *WorkflowRunUpsertBulk { + _c.conflict = opts return &WorkflowRunUpsertBulk{ - create: wrcb, + create: _c, } } @@ -1203,10 +1203,10 @@ func (wrcb *WorkflowRunCreateBulk) OnConflict(opts ...sql.ConflictOption) *Workf // client.WorkflowRun.Create(). // OnConflict(sql.ConflictColumns(columns...)). // Exec(ctx) -func (wrcb *WorkflowRunCreateBulk) OnConflictColumns(columns ...string) *WorkflowRunUpsertBulk { - wrcb.conflict = append(wrcb.conflict, sql.ConflictColumns(columns...)) +func (_c *WorkflowRunCreateBulk) OnConflictColumns(columns ...string) *WorkflowRunUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) return &WorkflowRunUpsertBulk{ - create: wrcb, + create: _c, } } diff --git a/app/controlplane/pkg/data/ent/workflowrun_delete.go b/app/controlplane/pkg/data/ent/workflowrun_delete.go index b5974790d..1bead5009 100644 --- a/app/controlplane/pkg/data/ent/workflowrun_delete.go +++ b/app/controlplane/pkg/data/ent/workflowrun_delete.go @@ -20,56 +20,56 @@ type WorkflowRunDelete struct { } // Where appends a list predicates to the WorkflowRunDelete builder. -func (wrd *WorkflowRunDelete) Where(ps ...predicate.WorkflowRun) *WorkflowRunDelete { - wrd.mutation.Where(ps...) - return wrd +func (_d *WorkflowRunDelete) Where(ps ...predicate.WorkflowRun) *WorkflowRunDelete { + _d.mutation.Where(ps...) + return _d } // Exec executes the deletion query and returns how many vertices were deleted. -func (wrd *WorkflowRunDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, wrd.sqlExec, wrd.mutation, wrd.hooks) +func (_d *WorkflowRunDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) } // ExecX is like Exec, but panics if an error occurs. -func (wrd *WorkflowRunDelete) ExecX(ctx context.Context) int { - n, err := wrd.Exec(ctx) +func (_d *WorkflowRunDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) if err != nil { panic(err) } return n } -func (wrd *WorkflowRunDelete) sqlExec(ctx context.Context) (int, error) { +func (_d *WorkflowRunDelete) sqlExec(ctx context.Context) (int, error) { _spec := sqlgraph.NewDeleteSpec(workflowrun.Table, sqlgraph.NewFieldSpec(workflowrun.FieldID, field.TypeUUID)) - if ps := wrd.mutation.predicates; len(ps) > 0 { + if ps := _d.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - affected, err := sqlgraph.DeleteNodes(ctx, wrd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } - wrd.mutation.done = true + _d.mutation.done = true return affected, err } // WorkflowRunDeleteOne is the builder for deleting a single WorkflowRun entity. type WorkflowRunDeleteOne struct { - wrd *WorkflowRunDelete + _d *WorkflowRunDelete } // Where appends a list predicates to the WorkflowRunDelete builder. -func (wrdo *WorkflowRunDeleteOne) Where(ps ...predicate.WorkflowRun) *WorkflowRunDeleteOne { - wrdo.wrd.mutation.Where(ps...) - return wrdo +func (_d *WorkflowRunDeleteOne) Where(ps ...predicate.WorkflowRun) *WorkflowRunDeleteOne { + _d._d.mutation.Where(ps...) + return _d } // Exec executes the deletion query. -func (wrdo *WorkflowRunDeleteOne) Exec(ctx context.Context) error { - n, err := wrdo.wrd.Exec(ctx) +func (_d *WorkflowRunDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) switch { case err != nil: return err @@ -81,8 +81,8 @@ func (wrdo *WorkflowRunDeleteOne) Exec(ctx context.Context) error { } // ExecX is like Exec, but panics if an error occurs. -func (wrdo *WorkflowRunDeleteOne) ExecX(ctx context.Context) { - if err := wrdo.Exec(ctx); err != nil { +func (_d *WorkflowRunDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { panic(err) } } diff --git a/app/controlplane/pkg/data/ent/workflowrun_query.go b/app/controlplane/pkg/data/ent/workflowrun_query.go index 50f37936e..bc78efe49 100644 --- a/app/controlplane/pkg/data/ent/workflowrun_query.go +++ b/app/controlplane/pkg/data/ent/workflowrun_query.go @@ -43,44 +43,44 @@ type WorkflowRunQuery struct { } // Where adds a new predicate for the WorkflowRunQuery builder. -func (wrq *WorkflowRunQuery) Where(ps ...predicate.WorkflowRun) *WorkflowRunQuery { - wrq.predicates = append(wrq.predicates, ps...) - return wrq +func (_q *WorkflowRunQuery) Where(ps ...predicate.WorkflowRun) *WorkflowRunQuery { + _q.predicates = append(_q.predicates, ps...) + return _q } // Limit the number of records to be returned by this query. -func (wrq *WorkflowRunQuery) Limit(limit int) *WorkflowRunQuery { - wrq.ctx.Limit = &limit - return wrq +func (_q *WorkflowRunQuery) Limit(limit int) *WorkflowRunQuery { + _q.ctx.Limit = &limit + return _q } // Offset to start from. -func (wrq *WorkflowRunQuery) Offset(offset int) *WorkflowRunQuery { - wrq.ctx.Offset = &offset - return wrq +func (_q *WorkflowRunQuery) Offset(offset int) *WorkflowRunQuery { + _q.ctx.Offset = &offset + return _q } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. -func (wrq *WorkflowRunQuery) Unique(unique bool) *WorkflowRunQuery { - wrq.ctx.Unique = &unique - return wrq +func (_q *WorkflowRunQuery) Unique(unique bool) *WorkflowRunQuery { + _q.ctx.Unique = &unique + return _q } // Order specifies how the records should be ordered. -func (wrq *WorkflowRunQuery) Order(o ...workflowrun.OrderOption) *WorkflowRunQuery { - wrq.order = append(wrq.order, o...) - return wrq +func (_q *WorkflowRunQuery) Order(o ...workflowrun.OrderOption) *WorkflowRunQuery { + _q.order = append(_q.order, o...) + return _q } // QueryWorkflow chains the current query on the "workflow" edge. -func (wrq *WorkflowRunQuery) QueryWorkflow() *WorkflowQuery { - query := (&WorkflowClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) QueryWorkflow() *WorkflowQuery { + query := (&WorkflowClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wrq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wrq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -89,20 +89,20 @@ func (wrq *WorkflowRunQuery) QueryWorkflow() *WorkflowQuery { sqlgraph.To(workflow.Table, workflow.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowrun.WorkflowTable, workflowrun.WorkflowColumn), ) - fromU = sqlgraph.SetNeighbors(wrq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryContractVersion chains the current query on the "contract_version" edge. -func (wrq *WorkflowRunQuery) QueryContractVersion() *WorkflowContractVersionQuery { - query := (&WorkflowContractVersionClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) QueryContractVersion() *WorkflowContractVersionQuery { + query := (&WorkflowContractVersionClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wrq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wrq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -111,20 +111,20 @@ func (wrq *WorkflowRunQuery) QueryContractVersion() *WorkflowContractVersionQuer sqlgraph.To(workflowcontractversion.Table, workflowcontractversion.FieldID), sqlgraph.Edge(sqlgraph.M2O, false, workflowrun.ContractVersionTable, workflowrun.ContractVersionColumn), ) - fromU = sqlgraph.SetNeighbors(wrq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryCasBackends chains the current query on the "cas_backends" edge. -func (wrq *WorkflowRunQuery) QueryCasBackends() *CASBackendQuery { - query := (&CASBackendClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) QueryCasBackends() *CASBackendQuery { + query := (&CASBackendClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wrq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wrq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -133,20 +133,20 @@ func (wrq *WorkflowRunQuery) QueryCasBackends() *CASBackendQuery { sqlgraph.To(casbackend.Table, casbackend.FieldID), sqlgraph.Edge(sqlgraph.M2M, false, workflowrun.CasBackendsTable, workflowrun.CasBackendsPrimaryKey...), ) - fromU = sqlgraph.SetNeighbors(wrq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryVersion chains the current query on the "version" edge. -func (wrq *WorkflowRunQuery) QueryVersion() *ProjectVersionQuery { - query := (&ProjectVersionClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) QueryVersion() *ProjectVersionQuery { + query := (&ProjectVersionClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wrq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wrq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -155,20 +155,20 @@ func (wrq *WorkflowRunQuery) QueryVersion() *ProjectVersionQuery { sqlgraph.To(projectversion.Table, projectversion.FieldID), sqlgraph.Edge(sqlgraph.M2O, true, workflowrun.VersionTable, workflowrun.VersionColumn), ) - fromU = sqlgraph.SetNeighbors(wrq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query } // QueryAttestationBundle chains the current query on the "attestation_bundle" edge. -func (wrq *WorkflowRunQuery) QueryAttestationBundle() *AttestationQuery { - query := (&AttestationClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) QueryAttestationBundle() *AttestationQuery { + query := (&AttestationClient{config: _q.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := wrq.prepareQuery(ctx); err != nil { + if err := _q.prepareQuery(ctx); err != nil { return nil, err } - selector := wrq.sqlQuery(ctx) + selector := _q.sqlQuery(ctx) if err := selector.Err(); err != nil { return nil, err } @@ -177,7 +177,7 @@ func (wrq *WorkflowRunQuery) QueryAttestationBundle() *AttestationQuery { sqlgraph.To(attestation.Table, attestation.FieldID), sqlgraph.Edge(sqlgraph.O2O, false, workflowrun.AttestationBundleTable, workflowrun.AttestationBundleColumn), ) - fromU = sqlgraph.SetNeighbors(wrq.driver.Dialect(), step) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) return fromU, nil } return query @@ -185,8 +185,8 @@ func (wrq *WorkflowRunQuery) QueryAttestationBundle() *AttestationQuery { // First returns the first WorkflowRun entity from the query. // Returns a *NotFoundError when no WorkflowRun was found. -func (wrq *WorkflowRunQuery) First(ctx context.Context) (*WorkflowRun, error) { - nodes, err := wrq.Limit(1).All(setContextOp(ctx, wrq.ctx, ent.OpQueryFirst)) +func (_q *WorkflowRunQuery) First(ctx context.Context) (*WorkflowRun, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -197,8 +197,8 @@ func (wrq *WorkflowRunQuery) First(ctx context.Context) (*WorkflowRun, error) { } // FirstX is like First, but panics if an error occurs. -func (wrq *WorkflowRunQuery) FirstX(ctx context.Context) *WorkflowRun { - node, err := wrq.First(ctx) +func (_q *WorkflowRunQuery) FirstX(ctx context.Context) *WorkflowRun { + node, err := _q.First(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -207,9 +207,9 @@ func (wrq *WorkflowRunQuery) FirstX(ctx context.Context) *WorkflowRun { // FirstID returns the first WorkflowRun ID from the query. // Returns a *NotFoundError when no WorkflowRun ID was found. -func (wrq *WorkflowRunQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowRunQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wrq.Limit(1).IDs(setContextOp(ctx, wrq.ctx, ent.OpQueryFirstID)); err != nil { + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -220,8 +220,8 @@ func (wrq *WorkflowRunQuery) FirstID(ctx context.Context) (id uuid.UUID, err err } // FirstIDX is like FirstID, but panics if an error occurs. -func (wrq *WorkflowRunQuery) FirstIDX(ctx context.Context) uuid.UUID { - id, err := wrq.FirstID(ctx) +func (_q *WorkflowRunQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := _q.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) } @@ -231,8 +231,8 @@ func (wrq *WorkflowRunQuery) FirstIDX(ctx context.Context) uuid.UUID { // Only returns a single WorkflowRun entity found by the query, ensuring it only returns one. // Returns a *NotSingularError when more than one WorkflowRun entity is found. // Returns a *NotFoundError when no WorkflowRun entities are found. -func (wrq *WorkflowRunQuery) Only(ctx context.Context) (*WorkflowRun, error) { - nodes, err := wrq.Limit(2).All(setContextOp(ctx, wrq.ctx, ent.OpQueryOnly)) +func (_q *WorkflowRunQuery) Only(ctx context.Context) (*WorkflowRun, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -247,8 +247,8 @@ func (wrq *WorkflowRunQuery) Only(ctx context.Context) (*WorkflowRun, error) { } // OnlyX is like Only, but panics if an error occurs. -func (wrq *WorkflowRunQuery) OnlyX(ctx context.Context) *WorkflowRun { - node, err := wrq.Only(ctx) +func (_q *WorkflowRunQuery) OnlyX(ctx context.Context) *WorkflowRun { + node, err := _q.Only(ctx) if err != nil { panic(err) } @@ -258,9 +258,9 @@ func (wrq *WorkflowRunQuery) OnlyX(ctx context.Context) *WorkflowRun { // OnlyID is like Only, but returns the only WorkflowRun ID in the query. // Returns a *NotSingularError when more than one WorkflowRun ID is found. // Returns a *NotFoundError when no entities are found. -func (wrq *WorkflowRunQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { +func (_q *WorkflowRunQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = wrq.Limit(2).IDs(setContextOp(ctx, wrq.ctx, ent.OpQueryOnlyID)); err != nil { + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -275,8 +275,8 @@ func (wrq *WorkflowRunQuery) OnlyID(ctx context.Context) (id uuid.UUID, err erro } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (wrq *WorkflowRunQuery) OnlyIDX(ctx context.Context) uuid.UUID { - id, err := wrq.OnlyID(ctx) +func (_q *WorkflowRunQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := _q.OnlyID(ctx) if err != nil { panic(err) } @@ -284,18 +284,18 @@ func (wrq *WorkflowRunQuery) OnlyIDX(ctx context.Context) uuid.UUID { } // All executes the query and returns a list of WorkflowRuns. -func (wrq *WorkflowRunQuery) All(ctx context.Context) ([]*WorkflowRun, error) { - ctx = setContextOp(ctx, wrq.ctx, ent.OpQueryAll) - if err := wrq.prepareQuery(ctx); err != nil { +func (_q *WorkflowRunQuery) All(ctx context.Context) ([]*WorkflowRun, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { return nil, err } qr := querierAll[[]*WorkflowRun, *WorkflowRunQuery]() - return withInterceptors[[]*WorkflowRun](ctx, wrq, qr, wrq.inters) + return withInterceptors[[]*WorkflowRun](ctx, _q, qr, _q.inters) } // AllX is like All, but panics if an error occurs. -func (wrq *WorkflowRunQuery) AllX(ctx context.Context) []*WorkflowRun { - nodes, err := wrq.All(ctx) +func (_q *WorkflowRunQuery) AllX(ctx context.Context) []*WorkflowRun { + nodes, err := _q.All(ctx) if err != nil { panic(err) } @@ -303,20 +303,20 @@ func (wrq *WorkflowRunQuery) AllX(ctx context.Context) []*WorkflowRun { } // IDs executes the query and returns a list of WorkflowRun IDs. -func (wrq *WorkflowRunQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { - if wrq.ctx.Unique == nil && wrq.path != nil { - wrq.Unique(true) +func (_q *WorkflowRunQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) } - ctx = setContextOp(ctx, wrq.ctx, ent.OpQueryIDs) - if err = wrq.Select(workflowrun.FieldID).Scan(ctx, &ids); err != nil { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(workflowrun.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil } // IDsX is like IDs, but panics if an error occurs. -func (wrq *WorkflowRunQuery) IDsX(ctx context.Context) []uuid.UUID { - ids, err := wrq.IDs(ctx) +func (_q *WorkflowRunQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := _q.IDs(ctx) if err != nil { panic(err) } @@ -324,17 +324,17 @@ func (wrq *WorkflowRunQuery) IDsX(ctx context.Context) []uuid.UUID { } // Count returns the count of the given query. -func (wrq *WorkflowRunQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, wrq.ctx, ent.OpQueryCount) - if err := wrq.prepareQuery(ctx); err != nil { +func (_q *WorkflowRunQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { return 0, err } - return withInterceptors[int](ctx, wrq, querierCount[*WorkflowRunQuery](), wrq.inters) + return withInterceptors[int](ctx, _q, querierCount[*WorkflowRunQuery](), _q.inters) } // CountX is like Count, but panics if an error occurs. -func (wrq *WorkflowRunQuery) CountX(ctx context.Context) int { - count, err := wrq.Count(ctx) +func (_q *WorkflowRunQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) if err != nil { panic(err) } @@ -342,9 +342,9 @@ func (wrq *WorkflowRunQuery) CountX(ctx context.Context) int { } // Exist returns true if the query has elements in the graph. -func (wrq *WorkflowRunQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, wrq.ctx, ent.OpQueryExist) - switch _, err := wrq.FirstID(ctx); { +func (_q *WorkflowRunQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { case IsNotFound(err): return false, nil case err != nil: @@ -355,8 +355,8 @@ func (wrq *WorkflowRunQuery) Exist(ctx context.Context) (bool, error) { } // ExistX is like Exist, but panics if an error occurs. -func (wrq *WorkflowRunQuery) ExistX(ctx context.Context) bool { - exist, err := wrq.Exist(ctx) +func (_q *WorkflowRunQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) if err != nil { panic(err) } @@ -365,81 +365,81 @@ func (wrq *WorkflowRunQuery) ExistX(ctx context.Context) bool { // Clone returns a duplicate of the WorkflowRunQuery builder, including all associated steps. It can be // used to prepare common query builders and use them differently after the clone is made. -func (wrq *WorkflowRunQuery) Clone() *WorkflowRunQuery { - if wrq == nil { +func (_q *WorkflowRunQuery) Clone() *WorkflowRunQuery { + if _q == nil { return nil } return &WorkflowRunQuery{ - config: wrq.config, - ctx: wrq.ctx.Clone(), - order: append([]workflowrun.OrderOption{}, wrq.order...), - inters: append([]Interceptor{}, wrq.inters...), - predicates: append([]predicate.WorkflowRun{}, wrq.predicates...), - withWorkflow: wrq.withWorkflow.Clone(), - withContractVersion: wrq.withContractVersion.Clone(), - withCasBackends: wrq.withCasBackends.Clone(), - withVersion: wrq.withVersion.Clone(), - withAttestationBundle: wrq.withAttestationBundle.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]workflowrun.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.WorkflowRun{}, _q.predicates...), + withWorkflow: _q.withWorkflow.Clone(), + withContractVersion: _q.withContractVersion.Clone(), + withCasBackends: _q.withCasBackends.Clone(), + withVersion: _q.withVersion.Clone(), + withAttestationBundle: _q.withAttestationBundle.Clone(), // clone intermediate query. - sql: wrq.sql.Clone(), - path: wrq.path, - modifiers: append([]func(*sql.Selector){}, wrq.modifiers...), + sql: _q.sql.Clone(), + path: _q.path, + modifiers: append([]func(*sql.Selector){}, _q.modifiers...), } } // WithWorkflow tells the query-builder to eager-load the nodes that are connected to // the "workflow" edge. The optional arguments are used to configure the query builder of the edge. -func (wrq *WorkflowRunQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *WorkflowRunQuery { - query := (&WorkflowClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) WithWorkflow(opts ...func(*WorkflowQuery)) *WorkflowRunQuery { + query := (&WorkflowClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wrq.withWorkflow = query - return wrq + _q.withWorkflow = query + return _q } // WithContractVersion tells the query-builder to eager-load the nodes that are connected to // the "contract_version" edge. The optional arguments are used to configure the query builder of the edge. -func (wrq *WorkflowRunQuery) WithContractVersion(opts ...func(*WorkflowContractVersionQuery)) *WorkflowRunQuery { - query := (&WorkflowContractVersionClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) WithContractVersion(opts ...func(*WorkflowContractVersionQuery)) *WorkflowRunQuery { + query := (&WorkflowContractVersionClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wrq.withContractVersion = query - return wrq + _q.withContractVersion = query + return _q } // WithCasBackends tells the query-builder to eager-load the nodes that are connected to // the "cas_backends" edge. The optional arguments are used to configure the query builder of the edge. -func (wrq *WorkflowRunQuery) WithCasBackends(opts ...func(*CASBackendQuery)) *WorkflowRunQuery { - query := (&CASBackendClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) WithCasBackends(opts ...func(*CASBackendQuery)) *WorkflowRunQuery { + query := (&CASBackendClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wrq.withCasBackends = query - return wrq + _q.withCasBackends = query + return _q } // WithVersion tells the query-builder to eager-load the nodes that are connected to // the "version" edge. The optional arguments are used to configure the query builder of the edge. -func (wrq *WorkflowRunQuery) WithVersion(opts ...func(*ProjectVersionQuery)) *WorkflowRunQuery { - query := (&ProjectVersionClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) WithVersion(opts ...func(*ProjectVersionQuery)) *WorkflowRunQuery { + query := (&ProjectVersionClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wrq.withVersion = query - return wrq + _q.withVersion = query + return _q } // WithAttestationBundle tells the query-builder to eager-load the nodes that are connected to // the "attestation_bundle" edge. The optional arguments are used to configure the query builder of the edge. -func (wrq *WorkflowRunQuery) WithAttestationBundle(opts ...func(*AttestationQuery)) *WorkflowRunQuery { - query := (&AttestationClient{config: wrq.config}).Query() +func (_q *WorkflowRunQuery) WithAttestationBundle(opts ...func(*AttestationQuery)) *WorkflowRunQuery { + query := (&AttestationClient{config: _q.config}).Query() for _, opt := range opts { opt(query) } - wrq.withAttestationBundle = query - return wrq + _q.withAttestationBundle = query + return _q } // GroupBy is used to group vertices by one or more fields/columns. @@ -456,10 +456,10 @@ func (wrq *WorkflowRunQuery) WithAttestationBundle(opts ...func(*AttestationQuer // GroupBy(workflowrun.FieldCreatedAt). // Aggregate(ent.Count()). // Scan(ctx, &v) -func (wrq *WorkflowRunQuery) GroupBy(field string, fields ...string) *WorkflowRunGroupBy { - wrq.ctx.Fields = append([]string{field}, fields...) - grbuild := &WorkflowRunGroupBy{build: wrq} - grbuild.flds = &wrq.ctx.Fields +func (_q *WorkflowRunQuery) GroupBy(field string, fields ...string) *WorkflowRunGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &WorkflowRunGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields grbuild.label = workflowrun.Label grbuild.scan = grbuild.Scan return grbuild @@ -477,59 +477,59 @@ func (wrq *WorkflowRunQuery) GroupBy(field string, fields ...string) *WorkflowRu // client.WorkflowRun.Query(). // Select(workflowrun.FieldCreatedAt). // Scan(ctx, &v) -func (wrq *WorkflowRunQuery) Select(fields ...string) *WorkflowRunSelect { - wrq.ctx.Fields = append(wrq.ctx.Fields, fields...) - sbuild := &WorkflowRunSelect{WorkflowRunQuery: wrq} +func (_q *WorkflowRunQuery) Select(fields ...string) *WorkflowRunSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &WorkflowRunSelect{WorkflowRunQuery: _q} sbuild.label = workflowrun.Label - sbuild.flds, sbuild.scan = &wrq.ctx.Fields, sbuild.Scan + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan return sbuild } // Aggregate returns a WorkflowRunSelect configured with the given aggregations. -func (wrq *WorkflowRunQuery) Aggregate(fns ...AggregateFunc) *WorkflowRunSelect { - return wrq.Select().Aggregate(fns...) +func (_q *WorkflowRunQuery) Aggregate(fns ...AggregateFunc) *WorkflowRunSelect { + return _q.Select().Aggregate(fns...) } -func (wrq *WorkflowRunQuery) prepareQuery(ctx context.Context) error { - for _, inter := range wrq.inters { +func (_q *WorkflowRunQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { if inter == nil { return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") } if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, wrq); err != nil { + if err := trv.Traverse(ctx, _q); err != nil { return err } } } - for _, f := range wrq.ctx.Fields { + for _, f := range _q.ctx.Fields { if !workflowrun.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } } - if wrq.path != nil { - prev, err := wrq.path(ctx) + if _q.path != nil { + prev, err := _q.path(ctx) if err != nil { return err } - wrq.sql = prev + _q.sql = prev } return nil } -func (wrq *WorkflowRunQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowRun, error) { +func (_q *WorkflowRunQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*WorkflowRun, error) { var ( nodes = []*WorkflowRun{} - withFKs = wrq.withFKs - _spec = wrq.querySpec() + withFKs = _q.withFKs + _spec = _q.querySpec() loadedTypes = [5]bool{ - wrq.withWorkflow != nil, - wrq.withContractVersion != nil, - wrq.withCasBackends != nil, - wrq.withVersion != nil, - wrq.withAttestationBundle != nil, + _q.withWorkflow != nil, + _q.withContractVersion != nil, + _q.withCasBackends != nil, + _q.withVersion != nil, + _q.withAttestationBundle != nil, } ) - if wrq.withContractVersion != nil { + if _q.withContractVersion != nil { withFKs = true } if withFKs { @@ -539,50 +539,50 @@ func (wrq *WorkflowRunQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([] return (*WorkflowRun).scanValues(nil, columns) } _spec.Assign = func(columns []string, values []any) error { - node := &WorkflowRun{config: wrq.config} + node := &WorkflowRun{config: _q.config} nodes = append(nodes, node) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } - if len(wrq.modifiers) > 0 { - _spec.Modifiers = wrq.modifiers + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } for i := range hooks { hooks[i](ctx, _spec) } - if err := sqlgraph.QueryNodes(ctx, wrq.driver, _spec); err != nil { + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := wrq.withWorkflow; query != nil { - if err := wrq.loadWorkflow(ctx, query, nodes, nil, + if query := _q.withWorkflow; query != nil { + if err := _q.loadWorkflow(ctx, query, nodes, nil, func(n *WorkflowRun, e *Workflow) { n.Edges.Workflow = e }); err != nil { return nil, err } } - if query := wrq.withContractVersion; query != nil { - if err := wrq.loadContractVersion(ctx, query, nodes, nil, + if query := _q.withContractVersion; query != nil { + if err := _q.loadContractVersion(ctx, query, nodes, nil, func(n *WorkflowRun, e *WorkflowContractVersion) { n.Edges.ContractVersion = e }); err != nil { return nil, err } } - if query := wrq.withCasBackends; query != nil { - if err := wrq.loadCasBackends(ctx, query, nodes, + if query := _q.withCasBackends; query != nil { + if err := _q.loadCasBackends(ctx, query, nodes, func(n *WorkflowRun) { n.Edges.CasBackends = []*CASBackend{} }, func(n *WorkflowRun, e *CASBackend) { n.Edges.CasBackends = append(n.Edges.CasBackends, e) }); err != nil { return nil, err } } - if query := wrq.withVersion; query != nil { - if err := wrq.loadVersion(ctx, query, nodes, nil, + if query := _q.withVersion; query != nil { + if err := _q.loadVersion(ctx, query, nodes, nil, func(n *WorkflowRun, e *ProjectVersion) { n.Edges.Version = e }); err != nil { return nil, err } } - if query := wrq.withAttestationBundle; query != nil { - if err := wrq.loadAttestationBundle(ctx, query, nodes, nil, + if query := _q.withAttestationBundle; query != nil { + if err := _q.loadAttestationBundle(ctx, query, nodes, nil, func(n *WorkflowRun, e *Attestation) { n.Edges.AttestationBundle = e }); err != nil { return nil, err } @@ -590,7 +590,7 @@ func (wrq *WorkflowRunQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([] return nodes, nil } -func (wrq *WorkflowRunQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *Workflow)) error { +func (_q *WorkflowRunQuery) loadWorkflow(ctx context.Context, query *WorkflowQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *Workflow)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*WorkflowRun) for i := range nodes { @@ -619,7 +619,7 @@ func (wrq *WorkflowRunQuery) loadWorkflow(ctx context.Context, query *WorkflowQu } return nil } -func (wrq *WorkflowRunQuery) loadContractVersion(ctx context.Context, query *WorkflowContractVersionQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *WorkflowContractVersion)) error { +func (_q *WorkflowRunQuery) loadContractVersion(ctx context.Context, query *WorkflowContractVersionQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *WorkflowContractVersion)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*WorkflowRun) for i := range nodes { @@ -651,7 +651,7 @@ func (wrq *WorkflowRunQuery) loadContractVersion(ctx context.Context, query *Wor } return nil } -func (wrq *WorkflowRunQuery) loadCasBackends(ctx context.Context, query *CASBackendQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *CASBackend)) error { +func (_q *WorkflowRunQuery) loadCasBackends(ctx context.Context, query *CASBackendQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *CASBackend)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*WorkflowRun) nids := make(map[uuid.UUID]map[*WorkflowRun]struct{}) @@ -712,7 +712,7 @@ func (wrq *WorkflowRunQuery) loadCasBackends(ctx context.Context, query *CASBack } return nil } -func (wrq *WorkflowRunQuery) loadVersion(ctx context.Context, query *ProjectVersionQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *ProjectVersion)) error { +func (_q *WorkflowRunQuery) loadVersion(ctx context.Context, query *ProjectVersionQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *ProjectVersion)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*WorkflowRun) for i := range nodes { @@ -741,7 +741,7 @@ func (wrq *WorkflowRunQuery) loadVersion(ctx context.Context, query *ProjectVers } return nil } -func (wrq *WorkflowRunQuery) loadAttestationBundle(ctx context.Context, query *AttestationQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *Attestation)) error { +func (_q *WorkflowRunQuery) loadAttestationBundle(ctx context.Context, query *AttestationQuery, nodes []*WorkflowRun, init func(*WorkflowRun), assign func(*WorkflowRun, *Attestation)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*WorkflowRun) for i := range nodes { @@ -769,27 +769,27 @@ func (wrq *WorkflowRunQuery) loadAttestationBundle(ctx context.Context, query *A return nil } -func (wrq *WorkflowRunQuery) sqlCount(ctx context.Context) (int, error) { - _spec := wrq.querySpec() - if len(wrq.modifiers) > 0 { - _spec.Modifiers = wrq.modifiers +func (_q *WorkflowRunQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers } - _spec.Node.Columns = wrq.ctx.Fields - if len(wrq.ctx.Fields) > 0 { - _spec.Unique = wrq.ctx.Unique != nil && *wrq.ctx.Unique + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique } - return sqlgraph.CountNodes(ctx, wrq.driver, _spec) + return sqlgraph.CountNodes(ctx, _q.driver, _spec) } -func (wrq *WorkflowRunQuery) querySpec() *sqlgraph.QuerySpec { +func (_q *WorkflowRunQuery) querySpec() *sqlgraph.QuerySpec { _spec := sqlgraph.NewQuerySpec(workflowrun.Table, workflowrun.Columns, sqlgraph.NewFieldSpec(workflowrun.FieldID, field.TypeUUID)) - _spec.From = wrq.sql - if unique := wrq.ctx.Unique; unique != nil { + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { _spec.Unique = *unique - } else if wrq.path != nil { + } else if _q.path != nil { _spec.Unique = true } - if fields := wrq.ctx.Fields; len(fields) > 0 { + if fields := _q.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowrun.FieldID) for i := range fields { @@ -797,27 +797,27 @@ func (wrq *WorkflowRunQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if wrq.withWorkflow != nil { + if _q.withWorkflow != nil { _spec.Node.AddColumnOnce(workflowrun.FieldWorkflowID) } - if wrq.withVersion != nil { + if _q.withVersion != nil { _spec.Node.AddColumnOnce(workflowrun.FieldVersionID) } } - if ps := wrq.predicates; len(ps) > 0 { + if ps := _q.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if limit := wrq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := wrq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { _spec.Offset = *offset } - if ps := wrq.order; len(ps) > 0 { + if ps := _q.order; len(ps) > 0 { _spec.Order = func(selector *sql.Selector) { for i := range ps { ps[i](selector) @@ -827,36 +827,36 @@ func (wrq *WorkflowRunQuery) querySpec() *sqlgraph.QuerySpec { return _spec } -func (wrq *WorkflowRunQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(wrq.driver.Dialect()) +func (_q *WorkflowRunQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) t1 := builder.Table(workflowrun.Table) - columns := wrq.ctx.Fields + columns := _q.ctx.Fields if len(columns) == 0 { columns = workflowrun.Columns } selector := builder.Select(t1.Columns(columns...)...).From(t1) - if wrq.sql != nil { - selector = wrq.sql + if _q.sql != nil { + selector = _q.sql selector.Select(selector.Columns(columns...)...) } - if wrq.ctx.Unique != nil && *wrq.ctx.Unique { + if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } - for _, m := range wrq.modifiers { + for _, m := range _q.modifiers { m(selector) } - for _, p := range wrq.predicates { + for _, p := range _q.predicates { p(selector) } - for _, p := range wrq.order { + for _, p := range _q.order { p(selector) } - if offset := wrq.ctx.Offset; offset != nil { + if offset := _q.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := wrq.ctx.Limit; limit != nil { + if limit := _q.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -865,33 +865,33 @@ func (wrq *WorkflowRunQuery) sqlQuery(ctx context.Context) *sql.Selector { // ForUpdate locks the selected rows against concurrent updates, and prevent them from being // updated, deleted or "selected ... for update" by other sessions, until the transaction is // either committed or rolled-back. -func (wrq *WorkflowRunQuery) ForUpdate(opts ...sql.LockOption) *WorkflowRunQuery { - if wrq.driver.Dialect() == dialect.Postgres { - wrq.Unique(false) +func (_q *WorkflowRunQuery) ForUpdate(opts ...sql.LockOption) *WorkflowRunQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wrq.modifiers = append(wrq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForUpdate(opts...) }) - return wrq + return _q } // ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock // on any rows that are read. Other sessions can read the rows, but cannot modify them // until your transaction commits. -func (wrq *WorkflowRunQuery) ForShare(opts ...sql.LockOption) *WorkflowRunQuery { - if wrq.driver.Dialect() == dialect.Postgres { - wrq.Unique(false) +func (_q *WorkflowRunQuery) ForShare(opts ...sql.LockOption) *WorkflowRunQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) } - wrq.modifiers = append(wrq.modifiers, func(s *sql.Selector) { + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { s.ForShare(opts...) }) - return wrq + return _q } // Modify adds a query modifier for attaching custom logic to queries. -func (wrq *WorkflowRunQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowRunSelect { - wrq.modifiers = append(wrq.modifiers, modifiers...) - return wrq.Select() +func (_q *WorkflowRunQuery) Modify(modifiers ...func(s *sql.Selector)) *WorkflowRunSelect { + _q.modifiers = append(_q.modifiers, modifiers...) + return _q.Select() } // WorkflowRunGroupBy is the group-by builder for WorkflowRun entities. @@ -901,41 +901,41 @@ type WorkflowRunGroupBy struct { } // Aggregate adds the given aggregation functions to the group-by query. -func (wrgb *WorkflowRunGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowRunGroupBy { - wrgb.fns = append(wrgb.fns, fns...) - return wrgb +func (_g *WorkflowRunGroupBy) Aggregate(fns ...AggregateFunc) *WorkflowRunGroupBy { + _g.fns = append(_g.fns, fns...) + return _g } // Scan applies the selector query and scans the result into the given value. -func (wrgb *WorkflowRunGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wrgb.build.ctx, ent.OpQueryGroupBy) - if err := wrgb.build.prepareQuery(ctx); err != nil { +func (_g *WorkflowRunGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowRunQuery, *WorkflowRunGroupBy](ctx, wrgb.build, wrgb, wrgb.build.inters, v) + return scanWithInterceptors[*WorkflowRunQuery, *WorkflowRunGroupBy](ctx, _g.build, _g, _g.build.inters, v) } -func (wrgb *WorkflowRunGroupBy) sqlScan(ctx context.Context, root *WorkflowRunQuery, v any) error { +func (_g *WorkflowRunGroupBy) sqlScan(ctx context.Context, root *WorkflowRunQuery, v any) error { selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(wrgb.fns)) - for _, fn := range wrgb.fns { + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { aggregation = append(aggregation, fn(selector)) } if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*wrgb.flds)+len(wrgb.fns)) - for _, f := range *wrgb.flds { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { columns = append(columns, selector.C(f)) } columns = append(columns, aggregation...) selector.Select(columns...) } - selector.GroupBy(selector.Columns(*wrgb.flds...)...) + selector.GroupBy(selector.Columns(*_g.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := wrgb.build.driver.Query(ctx, query, args, rows); err != nil { + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -949,27 +949,27 @@ type WorkflowRunSelect struct { } // Aggregate adds the given aggregation functions to the selector query. -func (wrs *WorkflowRunSelect) Aggregate(fns ...AggregateFunc) *WorkflowRunSelect { - wrs.fns = append(wrs.fns, fns...) - return wrs +func (_s *WorkflowRunSelect) Aggregate(fns ...AggregateFunc) *WorkflowRunSelect { + _s.fns = append(_s.fns, fns...) + return _s } // Scan applies the selector query and scans the result into the given value. -func (wrs *WorkflowRunSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, wrs.ctx, ent.OpQuerySelect) - if err := wrs.prepareQuery(ctx); err != nil { +func (_s *WorkflowRunSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { return err } - return scanWithInterceptors[*WorkflowRunQuery, *WorkflowRunSelect](ctx, wrs.WorkflowRunQuery, wrs, wrs.inters, v) + return scanWithInterceptors[*WorkflowRunQuery, *WorkflowRunSelect](ctx, _s.WorkflowRunQuery, _s, _s.inters, v) } -func (wrs *WorkflowRunSelect) sqlScan(ctx context.Context, root *WorkflowRunQuery, v any) error { +func (_s *WorkflowRunSelect) sqlScan(ctx context.Context, root *WorkflowRunQuery, v any) error { selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(wrs.fns)) - for _, fn := range wrs.fns { + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { aggregation = append(aggregation, fn(selector)) } - switch n := len(*wrs.selector.flds); { + switch n := len(*_s.selector.flds); { case n == 0 && len(aggregation) > 0: selector.Select(aggregation...) case n != 0 && len(aggregation) > 0: @@ -977,7 +977,7 @@ func (wrs *WorkflowRunSelect) sqlScan(ctx context.Context, root *WorkflowRunQuer } rows := &sql.Rows{} query, args := selector.Query() - if err := wrs.driver.Query(ctx, query, args, rows); err != nil { + if err := _s.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() @@ -985,7 +985,7 @@ func (wrs *WorkflowRunSelect) sqlScan(ctx context.Context, root *WorkflowRunQuer } // Modify adds a query modifier for attaching custom logic to queries. -func (wrs *WorkflowRunSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowRunSelect { - wrs.modifiers = append(wrs.modifiers, modifiers...) - return wrs +func (_s *WorkflowRunSelect) Modify(modifiers ...func(s *sql.Selector)) *WorkflowRunSelect { + _s.modifiers = append(_s.modifiers, modifiers...) + return _s } diff --git a/app/controlplane/pkg/data/ent/workflowrun_update.go b/app/controlplane/pkg/data/ent/workflowrun_update.go index 9c192433b..df7d06bc1 100644 --- a/app/controlplane/pkg/data/ent/workflowrun_update.go +++ b/app/controlplane/pkg/data/ent/workflowrun_update.go @@ -31,335 +31,335 @@ type WorkflowRunUpdate struct { } // Where appends a list predicates to the WorkflowRunUpdate builder. -func (wru *WorkflowRunUpdate) Where(ps ...predicate.WorkflowRun) *WorkflowRunUpdate { - wru.mutation.Where(ps...) - return wru +func (_u *WorkflowRunUpdate) Where(ps ...predicate.WorkflowRun) *WorkflowRunUpdate { + _u.mutation.Where(ps...) + return _u } // SetFinishedAt sets the "finished_at" field. -func (wru *WorkflowRunUpdate) SetFinishedAt(t time.Time) *WorkflowRunUpdate { - wru.mutation.SetFinishedAt(t) - return wru +func (_u *WorkflowRunUpdate) SetFinishedAt(v time.Time) *WorkflowRunUpdate { + _u.mutation.SetFinishedAt(v) + return _u } // SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableFinishedAt(t *time.Time) *WorkflowRunUpdate { - if t != nil { - wru.SetFinishedAt(*t) +func (_u *WorkflowRunUpdate) SetNillableFinishedAt(v *time.Time) *WorkflowRunUpdate { + if v != nil { + _u.SetFinishedAt(*v) } - return wru + return _u } // ClearFinishedAt clears the value of the "finished_at" field. -func (wru *WorkflowRunUpdate) ClearFinishedAt() *WorkflowRunUpdate { - wru.mutation.ClearFinishedAt() - return wru +func (_u *WorkflowRunUpdate) ClearFinishedAt() *WorkflowRunUpdate { + _u.mutation.ClearFinishedAt() + return _u } // SetState sets the "state" field. -func (wru *WorkflowRunUpdate) SetState(brs biz.WorkflowRunStatus) *WorkflowRunUpdate { - wru.mutation.SetState(brs) - return wru +func (_u *WorkflowRunUpdate) SetState(v biz.WorkflowRunStatus) *WorkflowRunUpdate { + _u.mutation.SetState(v) + return _u } // SetNillableState sets the "state" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableState(brs *biz.WorkflowRunStatus) *WorkflowRunUpdate { - if brs != nil { - wru.SetState(*brs) +func (_u *WorkflowRunUpdate) SetNillableState(v *biz.WorkflowRunStatus) *WorkflowRunUpdate { + if v != nil { + _u.SetState(*v) } - return wru + return _u } // SetReason sets the "reason" field. -func (wru *WorkflowRunUpdate) SetReason(s string) *WorkflowRunUpdate { - wru.mutation.SetReason(s) - return wru +func (_u *WorkflowRunUpdate) SetReason(v string) *WorkflowRunUpdate { + _u.mutation.SetReason(v) + return _u } // SetNillableReason sets the "reason" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableReason(s *string) *WorkflowRunUpdate { - if s != nil { - wru.SetReason(*s) +func (_u *WorkflowRunUpdate) SetNillableReason(v *string) *WorkflowRunUpdate { + if v != nil { + _u.SetReason(*v) } - return wru + return _u } // ClearReason clears the value of the "reason" field. -func (wru *WorkflowRunUpdate) ClearReason() *WorkflowRunUpdate { - wru.mutation.ClearReason() - return wru +func (_u *WorkflowRunUpdate) ClearReason() *WorkflowRunUpdate { + _u.mutation.ClearReason() + return _u } // SetRunURL sets the "run_url" field. -func (wru *WorkflowRunUpdate) SetRunURL(s string) *WorkflowRunUpdate { - wru.mutation.SetRunURL(s) - return wru +func (_u *WorkflowRunUpdate) SetRunURL(v string) *WorkflowRunUpdate { + _u.mutation.SetRunURL(v) + return _u } // SetNillableRunURL sets the "run_url" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableRunURL(s *string) *WorkflowRunUpdate { - if s != nil { - wru.SetRunURL(*s) +func (_u *WorkflowRunUpdate) SetNillableRunURL(v *string) *WorkflowRunUpdate { + if v != nil { + _u.SetRunURL(*v) } - return wru + return _u } // ClearRunURL clears the value of the "run_url" field. -func (wru *WorkflowRunUpdate) ClearRunURL() *WorkflowRunUpdate { - wru.mutation.ClearRunURL() - return wru +func (_u *WorkflowRunUpdate) ClearRunURL() *WorkflowRunUpdate { + _u.mutation.ClearRunURL() + return _u } // SetRunnerType sets the "runner_type" field. -func (wru *WorkflowRunUpdate) SetRunnerType(s string) *WorkflowRunUpdate { - wru.mutation.SetRunnerType(s) - return wru +func (_u *WorkflowRunUpdate) SetRunnerType(v string) *WorkflowRunUpdate { + _u.mutation.SetRunnerType(v) + return _u } // SetNillableRunnerType sets the "runner_type" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableRunnerType(s *string) *WorkflowRunUpdate { - if s != nil { - wru.SetRunnerType(*s) +func (_u *WorkflowRunUpdate) SetNillableRunnerType(v *string) *WorkflowRunUpdate { + if v != nil { + _u.SetRunnerType(*v) } - return wru + return _u } // ClearRunnerType clears the value of the "runner_type" field. -func (wru *WorkflowRunUpdate) ClearRunnerType() *WorkflowRunUpdate { - wru.mutation.ClearRunnerType() - return wru +func (_u *WorkflowRunUpdate) ClearRunnerType() *WorkflowRunUpdate { + _u.mutation.ClearRunnerType() + return _u } // SetAttestation sets the "attestation" field. -func (wru *WorkflowRunUpdate) SetAttestation(d *dsse.Envelope) *WorkflowRunUpdate { - wru.mutation.SetAttestation(d) - return wru +func (_u *WorkflowRunUpdate) SetAttestation(v *dsse.Envelope) *WorkflowRunUpdate { + _u.mutation.SetAttestation(v) + return _u } // ClearAttestation clears the value of the "attestation" field. -func (wru *WorkflowRunUpdate) ClearAttestation() *WorkflowRunUpdate { - wru.mutation.ClearAttestation() - return wru +func (_u *WorkflowRunUpdate) ClearAttestation() *WorkflowRunUpdate { + _u.mutation.ClearAttestation() + return _u } // SetAttestationDigest sets the "attestation_digest" field. -func (wru *WorkflowRunUpdate) SetAttestationDigest(s string) *WorkflowRunUpdate { - wru.mutation.SetAttestationDigest(s) - return wru +func (_u *WorkflowRunUpdate) SetAttestationDigest(v string) *WorkflowRunUpdate { + _u.mutation.SetAttestationDigest(v) + return _u } // SetNillableAttestationDigest sets the "attestation_digest" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableAttestationDigest(s *string) *WorkflowRunUpdate { - if s != nil { - wru.SetAttestationDigest(*s) +func (_u *WorkflowRunUpdate) SetNillableAttestationDigest(v *string) *WorkflowRunUpdate { + if v != nil { + _u.SetAttestationDigest(*v) } - return wru + return _u } // ClearAttestationDigest clears the value of the "attestation_digest" field. -func (wru *WorkflowRunUpdate) ClearAttestationDigest() *WorkflowRunUpdate { - wru.mutation.ClearAttestationDigest() - return wru +func (_u *WorkflowRunUpdate) ClearAttestationDigest() *WorkflowRunUpdate { + _u.mutation.ClearAttestationDigest() + return _u } // SetAttestationState sets the "attestation_state" field. -func (wru *WorkflowRunUpdate) SetAttestationState(b []byte) *WorkflowRunUpdate { - wru.mutation.SetAttestationState(b) - return wru +func (_u *WorkflowRunUpdate) SetAttestationState(v []byte) *WorkflowRunUpdate { + _u.mutation.SetAttestationState(v) + return _u } // ClearAttestationState clears the value of the "attestation_state" field. -func (wru *WorkflowRunUpdate) ClearAttestationState() *WorkflowRunUpdate { - wru.mutation.ClearAttestationState() - return wru +func (_u *WorkflowRunUpdate) ClearAttestationState() *WorkflowRunUpdate { + _u.mutation.ClearAttestationState() + return _u } // SetContractRevisionUsed sets the "contract_revision_used" field. -func (wru *WorkflowRunUpdate) SetContractRevisionUsed(i int) *WorkflowRunUpdate { - wru.mutation.ResetContractRevisionUsed() - wru.mutation.SetContractRevisionUsed(i) - return wru +func (_u *WorkflowRunUpdate) SetContractRevisionUsed(v int) *WorkflowRunUpdate { + _u.mutation.ResetContractRevisionUsed() + _u.mutation.SetContractRevisionUsed(v) + return _u } // SetNillableContractRevisionUsed sets the "contract_revision_used" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableContractRevisionUsed(i *int) *WorkflowRunUpdate { - if i != nil { - wru.SetContractRevisionUsed(*i) +func (_u *WorkflowRunUpdate) SetNillableContractRevisionUsed(v *int) *WorkflowRunUpdate { + if v != nil { + _u.SetContractRevisionUsed(*v) } - return wru + return _u } -// AddContractRevisionUsed adds i to the "contract_revision_used" field. -func (wru *WorkflowRunUpdate) AddContractRevisionUsed(i int) *WorkflowRunUpdate { - wru.mutation.AddContractRevisionUsed(i) - return wru +// AddContractRevisionUsed adds value to the "contract_revision_used" field. +func (_u *WorkflowRunUpdate) AddContractRevisionUsed(v int) *WorkflowRunUpdate { + _u.mutation.AddContractRevisionUsed(v) + return _u } // SetContractRevisionLatest sets the "contract_revision_latest" field. -func (wru *WorkflowRunUpdate) SetContractRevisionLatest(i int) *WorkflowRunUpdate { - wru.mutation.ResetContractRevisionLatest() - wru.mutation.SetContractRevisionLatest(i) - return wru +func (_u *WorkflowRunUpdate) SetContractRevisionLatest(v int) *WorkflowRunUpdate { + _u.mutation.ResetContractRevisionLatest() + _u.mutation.SetContractRevisionLatest(v) + return _u } // SetNillableContractRevisionLatest sets the "contract_revision_latest" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableContractRevisionLatest(i *int) *WorkflowRunUpdate { - if i != nil { - wru.SetContractRevisionLatest(*i) +func (_u *WorkflowRunUpdate) SetNillableContractRevisionLatest(v *int) *WorkflowRunUpdate { + if v != nil { + _u.SetContractRevisionLatest(*v) } - return wru + return _u } -// AddContractRevisionLatest adds i to the "contract_revision_latest" field. -func (wru *WorkflowRunUpdate) AddContractRevisionLatest(i int) *WorkflowRunUpdate { - wru.mutation.AddContractRevisionLatest(i) - return wru +// AddContractRevisionLatest adds value to the "contract_revision_latest" field. +func (_u *WorkflowRunUpdate) AddContractRevisionLatest(v int) *WorkflowRunUpdate { + _u.mutation.AddContractRevisionLatest(v) + return _u } // SetVersionID sets the "version_id" field. -func (wru *WorkflowRunUpdate) SetVersionID(u uuid.UUID) *WorkflowRunUpdate { - wru.mutation.SetVersionID(u) - return wru +func (_u *WorkflowRunUpdate) SetVersionID(v uuid.UUID) *WorkflowRunUpdate { + _u.mutation.SetVersionID(v) + return _u } // SetNillableVersionID sets the "version_id" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableVersionID(u *uuid.UUID) *WorkflowRunUpdate { - if u != nil { - wru.SetVersionID(*u) +func (_u *WorkflowRunUpdate) SetNillableVersionID(v *uuid.UUID) *WorkflowRunUpdate { + if v != nil { + _u.SetVersionID(*v) } - return wru + return _u } // SetHasPolicyViolations sets the "has_policy_violations" field. -func (wru *WorkflowRunUpdate) SetHasPolicyViolations(b bool) *WorkflowRunUpdate { - wru.mutation.SetHasPolicyViolations(b) - return wru +func (_u *WorkflowRunUpdate) SetHasPolicyViolations(v bool) *WorkflowRunUpdate { + _u.mutation.SetHasPolicyViolations(v) + return _u } // SetNillableHasPolicyViolations sets the "has_policy_violations" field if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableHasPolicyViolations(b *bool) *WorkflowRunUpdate { - if b != nil { - wru.SetHasPolicyViolations(*b) +func (_u *WorkflowRunUpdate) SetNillableHasPolicyViolations(v *bool) *WorkflowRunUpdate { + if v != nil { + _u.SetHasPolicyViolations(*v) } - return wru + return _u } // ClearHasPolicyViolations clears the value of the "has_policy_violations" field. -func (wru *WorkflowRunUpdate) ClearHasPolicyViolations() *WorkflowRunUpdate { - wru.mutation.ClearHasPolicyViolations() - return wru +func (_u *WorkflowRunUpdate) ClearHasPolicyViolations() *WorkflowRunUpdate { + _u.mutation.ClearHasPolicyViolations() + return _u } // SetContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID. -func (wru *WorkflowRunUpdate) SetContractVersionID(id uuid.UUID) *WorkflowRunUpdate { - wru.mutation.SetContractVersionID(id) - return wru +func (_u *WorkflowRunUpdate) SetContractVersionID(id uuid.UUID) *WorkflowRunUpdate { + _u.mutation.SetContractVersionID(id) + return _u } // SetNillableContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunUpdate { +func (_u *WorkflowRunUpdate) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunUpdate { if id != nil { - wru = wru.SetContractVersionID(*id) + _u = _u.SetContractVersionID(*id) } - return wru + return _u } // SetContractVersion sets the "contract_version" edge to the WorkflowContractVersion entity. -func (wru *WorkflowRunUpdate) SetContractVersion(w *WorkflowContractVersion) *WorkflowRunUpdate { - return wru.SetContractVersionID(w.ID) +func (_u *WorkflowRunUpdate) SetContractVersion(v *WorkflowContractVersion) *WorkflowRunUpdate { + return _u.SetContractVersionID(v.ID) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (wru *WorkflowRunUpdate) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdate { - wru.mutation.AddCasBackendIDs(ids...) - return wru +func (_u *WorkflowRunUpdate) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdate { + _u.mutation.AddCasBackendIDs(ids...) + return _u } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (wru *WorkflowRunUpdate) AddCasBackends(c ...*CASBackend) *WorkflowRunUpdate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *WorkflowRunUpdate) AddCasBackends(v ...*CASBackend) *WorkflowRunUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wru.AddCasBackendIDs(ids...) + return _u.AddCasBackendIDs(ids...) } // SetVersion sets the "version" edge to the ProjectVersion entity. -func (wru *WorkflowRunUpdate) SetVersion(p *ProjectVersion) *WorkflowRunUpdate { - return wru.SetVersionID(p.ID) +func (_u *WorkflowRunUpdate) SetVersion(v *ProjectVersion) *WorkflowRunUpdate { + return _u.SetVersionID(v.ID) } // SetAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID. -func (wru *WorkflowRunUpdate) SetAttestationBundleID(id uuid.UUID) *WorkflowRunUpdate { - wru.mutation.SetAttestationBundleID(id) - return wru +func (_u *WorkflowRunUpdate) SetAttestationBundleID(id uuid.UUID) *WorkflowRunUpdate { + _u.mutation.SetAttestationBundleID(id) + return _u } // SetNillableAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID if the given value is not nil. -func (wru *WorkflowRunUpdate) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunUpdate { +func (_u *WorkflowRunUpdate) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunUpdate { if id != nil { - wru = wru.SetAttestationBundleID(*id) + _u = _u.SetAttestationBundleID(*id) } - return wru + return _u } // SetAttestationBundle sets the "attestation_bundle" edge to the Attestation entity. -func (wru *WorkflowRunUpdate) SetAttestationBundle(a *Attestation) *WorkflowRunUpdate { - return wru.SetAttestationBundleID(a.ID) +func (_u *WorkflowRunUpdate) SetAttestationBundle(v *Attestation) *WorkflowRunUpdate { + return _u.SetAttestationBundleID(v.ID) } // Mutation returns the WorkflowRunMutation object of the builder. -func (wru *WorkflowRunUpdate) Mutation() *WorkflowRunMutation { - return wru.mutation +func (_u *WorkflowRunUpdate) Mutation() *WorkflowRunMutation { + return _u.mutation } // ClearContractVersion clears the "contract_version" edge to the WorkflowContractVersion entity. -func (wru *WorkflowRunUpdate) ClearContractVersion() *WorkflowRunUpdate { - wru.mutation.ClearContractVersion() - return wru +func (_u *WorkflowRunUpdate) ClearContractVersion() *WorkflowRunUpdate { + _u.mutation.ClearContractVersion() + return _u } // ClearCasBackends clears all "cas_backends" edges to the CASBackend entity. -func (wru *WorkflowRunUpdate) ClearCasBackends() *WorkflowRunUpdate { - wru.mutation.ClearCasBackends() - return wru +func (_u *WorkflowRunUpdate) ClearCasBackends() *WorkflowRunUpdate { + _u.mutation.ClearCasBackends() + return _u } // RemoveCasBackendIDs removes the "cas_backends" edge to CASBackend entities by IDs. -func (wru *WorkflowRunUpdate) RemoveCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdate { - wru.mutation.RemoveCasBackendIDs(ids...) - return wru +func (_u *WorkflowRunUpdate) RemoveCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdate { + _u.mutation.RemoveCasBackendIDs(ids...) + return _u } // RemoveCasBackends removes "cas_backends" edges to CASBackend entities. -func (wru *WorkflowRunUpdate) RemoveCasBackends(c ...*CASBackend) *WorkflowRunUpdate { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *WorkflowRunUpdate) RemoveCasBackends(v ...*CASBackend) *WorkflowRunUpdate { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wru.RemoveCasBackendIDs(ids...) + return _u.RemoveCasBackendIDs(ids...) } // ClearVersion clears the "version" edge to the ProjectVersion entity. -func (wru *WorkflowRunUpdate) ClearVersion() *WorkflowRunUpdate { - wru.mutation.ClearVersion() - return wru +func (_u *WorkflowRunUpdate) ClearVersion() *WorkflowRunUpdate { + _u.mutation.ClearVersion() + return _u } // ClearAttestationBundle clears the "attestation_bundle" edge to the Attestation entity. -func (wru *WorkflowRunUpdate) ClearAttestationBundle() *WorkflowRunUpdate { - wru.mutation.ClearAttestationBundle() - return wru +func (_u *WorkflowRunUpdate) ClearAttestationBundle() *WorkflowRunUpdate { + _u.mutation.ClearAttestationBundle() + return _u } // Save executes the query and returns the number of nodes affected by the update operation. -func (wru *WorkflowRunUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, wru.sqlSave, wru.mutation, wru.hooks) +func (_u *WorkflowRunUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wru *WorkflowRunUpdate) SaveX(ctx context.Context) int { - affected, err := wru.Save(ctx) +func (_u *WorkflowRunUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) if err != nil { panic(err) } @@ -367,116 +367,116 @@ func (wru *WorkflowRunUpdate) SaveX(ctx context.Context) int { } // Exec executes the query. -func (wru *WorkflowRunUpdate) Exec(ctx context.Context) error { - _, err := wru.Save(ctx) +func (_u *WorkflowRunUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wru *WorkflowRunUpdate) ExecX(ctx context.Context) { - if err := wru.Exec(ctx); err != nil { +func (_u *WorkflowRunUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wru *WorkflowRunUpdate) check() error { - if v, ok := wru.mutation.State(); ok { +func (_u *WorkflowRunUpdate) check() error { + if v, ok := _u.mutation.State(); ok { if err := workflowrun.StateValidator(v); err != nil { return &ValidationError{Name: "state", err: fmt.Errorf(`ent: validator failed for field "WorkflowRun.state": %w`, err)} } } - if wru.mutation.WorkflowCleared() && len(wru.mutation.WorkflowIDs()) > 0 { + if _u.mutation.WorkflowCleared() && len(_u.mutation.WorkflowIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "WorkflowRun.workflow"`) } - if wru.mutation.VersionCleared() && len(wru.mutation.VersionIDs()) > 0 { + if _u.mutation.VersionCleared() && len(_u.mutation.VersionIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "WorkflowRun.version"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wru *WorkflowRunUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowRunUpdate { - wru.modifiers = append(wru.modifiers, modifiers...) - return wru +func (_u *WorkflowRunUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowRunUpdate { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := wru.check(); err != nil { - return n, err +func (_u *WorkflowRunUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowrun.Table, workflowrun.Columns, sqlgraph.NewFieldSpec(workflowrun.FieldID, field.TypeUUID)) - if ps := wru.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wru.mutation.FinishedAt(); ok { + if value, ok := _u.mutation.FinishedAt(); ok { _spec.SetField(workflowrun.FieldFinishedAt, field.TypeTime, value) } - if wru.mutation.FinishedAtCleared() { + if _u.mutation.FinishedAtCleared() { _spec.ClearField(workflowrun.FieldFinishedAt, field.TypeTime) } - if value, ok := wru.mutation.State(); ok { + if value, ok := _u.mutation.State(); ok { _spec.SetField(workflowrun.FieldState, field.TypeEnum, value) } - if value, ok := wru.mutation.Reason(); ok { + if value, ok := _u.mutation.Reason(); ok { _spec.SetField(workflowrun.FieldReason, field.TypeString, value) } - if wru.mutation.ReasonCleared() { + if _u.mutation.ReasonCleared() { _spec.ClearField(workflowrun.FieldReason, field.TypeString) } - if value, ok := wru.mutation.RunURL(); ok { + if value, ok := _u.mutation.RunURL(); ok { _spec.SetField(workflowrun.FieldRunURL, field.TypeString, value) } - if wru.mutation.RunURLCleared() { + if _u.mutation.RunURLCleared() { _spec.ClearField(workflowrun.FieldRunURL, field.TypeString) } - if value, ok := wru.mutation.RunnerType(); ok { + if value, ok := _u.mutation.RunnerType(); ok { _spec.SetField(workflowrun.FieldRunnerType, field.TypeString, value) } - if wru.mutation.RunnerTypeCleared() { + if _u.mutation.RunnerTypeCleared() { _spec.ClearField(workflowrun.FieldRunnerType, field.TypeString) } - if value, ok := wru.mutation.Attestation(); ok { + if value, ok := _u.mutation.Attestation(); ok { _spec.SetField(workflowrun.FieldAttestation, field.TypeJSON, value) } - if wru.mutation.AttestationCleared() { + if _u.mutation.AttestationCleared() { _spec.ClearField(workflowrun.FieldAttestation, field.TypeJSON) } - if value, ok := wru.mutation.AttestationDigest(); ok { + if value, ok := _u.mutation.AttestationDigest(); ok { _spec.SetField(workflowrun.FieldAttestationDigest, field.TypeString, value) } - if wru.mutation.AttestationDigestCleared() { + if _u.mutation.AttestationDigestCleared() { _spec.ClearField(workflowrun.FieldAttestationDigest, field.TypeString) } - if value, ok := wru.mutation.AttestationState(); ok { + if value, ok := _u.mutation.AttestationState(); ok { _spec.SetField(workflowrun.FieldAttestationState, field.TypeBytes, value) } - if wru.mutation.AttestationStateCleared() { + if _u.mutation.AttestationStateCleared() { _spec.ClearField(workflowrun.FieldAttestationState, field.TypeBytes) } - if value, ok := wru.mutation.ContractRevisionUsed(); ok { + if value, ok := _u.mutation.ContractRevisionUsed(); ok { _spec.SetField(workflowrun.FieldContractRevisionUsed, field.TypeInt, value) } - if value, ok := wru.mutation.AddedContractRevisionUsed(); ok { + if value, ok := _u.mutation.AddedContractRevisionUsed(); ok { _spec.AddField(workflowrun.FieldContractRevisionUsed, field.TypeInt, value) } - if value, ok := wru.mutation.ContractRevisionLatest(); ok { + if value, ok := _u.mutation.ContractRevisionLatest(); ok { _spec.SetField(workflowrun.FieldContractRevisionLatest, field.TypeInt, value) } - if value, ok := wru.mutation.AddedContractRevisionLatest(); ok { + if value, ok := _u.mutation.AddedContractRevisionLatest(); ok { _spec.AddField(workflowrun.FieldContractRevisionLatest, field.TypeInt, value) } - if value, ok := wru.mutation.HasPolicyViolations(); ok { + if value, ok := _u.mutation.HasPolicyViolations(); ok { _spec.SetField(workflowrun.FieldHasPolicyViolations, field.TypeBool, value) } - if wru.mutation.HasPolicyViolationsCleared() { + if _u.mutation.HasPolicyViolationsCleared() { _spec.ClearField(workflowrun.FieldHasPolicyViolations, field.TypeBool) } - if wru.mutation.ContractVersionCleared() { + if _u.mutation.ContractVersionCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -489,7 +489,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wru.mutation.ContractVersionIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractVersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -505,7 +505,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wru.mutation.CasBackendsCleared() { + if _u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -518,7 +518,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wru.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !wru.mutation.CasBackendsCleared() { + if nodes := _u.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !_u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -534,7 +534,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wru.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -550,7 +550,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wru.mutation.VersionCleared() { + if _u.mutation.VersionCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -563,7 +563,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wru.mutation.VersionIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -579,7 +579,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wru.mutation.AttestationBundleCleared() { + if _u.mutation.AttestationBundleCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: false, @@ -592,7 +592,7 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wru.mutation.AttestationBundleIDs(); len(nodes) > 0 { + if nodes := _u.mutation.AttestationBundleIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: false, @@ -608,8 +608,8 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wru.modifiers...) - if n, err = sqlgraph.UpdateNodes(ctx, wru.driver, _spec); err != nil { + _spec.AddModifiers(_u.modifiers...) + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowrun.Label} } else if sqlgraph.IsConstraintError(err) { @@ -617,8 +617,8 @@ func (wru *WorkflowRunUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } - wru.mutation.done = true - return n, nil + _u.mutation.done = true + return _node, nil } // WorkflowRunUpdateOne is the builder for updating a single WorkflowRun entity. @@ -631,342 +631,342 @@ type WorkflowRunUpdateOne struct { } // SetFinishedAt sets the "finished_at" field. -func (wruo *WorkflowRunUpdateOne) SetFinishedAt(t time.Time) *WorkflowRunUpdateOne { - wruo.mutation.SetFinishedAt(t) - return wruo +func (_u *WorkflowRunUpdateOne) SetFinishedAt(v time.Time) *WorkflowRunUpdateOne { + _u.mutation.SetFinishedAt(v) + return _u } // SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableFinishedAt(t *time.Time) *WorkflowRunUpdateOne { - if t != nil { - wruo.SetFinishedAt(*t) +func (_u *WorkflowRunUpdateOne) SetNillableFinishedAt(v *time.Time) *WorkflowRunUpdateOne { + if v != nil { + _u.SetFinishedAt(*v) } - return wruo + return _u } // ClearFinishedAt clears the value of the "finished_at" field. -func (wruo *WorkflowRunUpdateOne) ClearFinishedAt() *WorkflowRunUpdateOne { - wruo.mutation.ClearFinishedAt() - return wruo +func (_u *WorkflowRunUpdateOne) ClearFinishedAt() *WorkflowRunUpdateOne { + _u.mutation.ClearFinishedAt() + return _u } // SetState sets the "state" field. -func (wruo *WorkflowRunUpdateOne) SetState(brs biz.WorkflowRunStatus) *WorkflowRunUpdateOne { - wruo.mutation.SetState(brs) - return wruo +func (_u *WorkflowRunUpdateOne) SetState(v biz.WorkflowRunStatus) *WorkflowRunUpdateOne { + _u.mutation.SetState(v) + return _u } // SetNillableState sets the "state" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableState(brs *biz.WorkflowRunStatus) *WorkflowRunUpdateOne { - if brs != nil { - wruo.SetState(*brs) +func (_u *WorkflowRunUpdateOne) SetNillableState(v *biz.WorkflowRunStatus) *WorkflowRunUpdateOne { + if v != nil { + _u.SetState(*v) } - return wruo + return _u } // SetReason sets the "reason" field. -func (wruo *WorkflowRunUpdateOne) SetReason(s string) *WorkflowRunUpdateOne { - wruo.mutation.SetReason(s) - return wruo +func (_u *WorkflowRunUpdateOne) SetReason(v string) *WorkflowRunUpdateOne { + _u.mutation.SetReason(v) + return _u } // SetNillableReason sets the "reason" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableReason(s *string) *WorkflowRunUpdateOne { - if s != nil { - wruo.SetReason(*s) +func (_u *WorkflowRunUpdateOne) SetNillableReason(v *string) *WorkflowRunUpdateOne { + if v != nil { + _u.SetReason(*v) } - return wruo + return _u } // ClearReason clears the value of the "reason" field. -func (wruo *WorkflowRunUpdateOne) ClearReason() *WorkflowRunUpdateOne { - wruo.mutation.ClearReason() - return wruo +func (_u *WorkflowRunUpdateOne) ClearReason() *WorkflowRunUpdateOne { + _u.mutation.ClearReason() + return _u } // SetRunURL sets the "run_url" field. -func (wruo *WorkflowRunUpdateOne) SetRunURL(s string) *WorkflowRunUpdateOne { - wruo.mutation.SetRunURL(s) - return wruo +func (_u *WorkflowRunUpdateOne) SetRunURL(v string) *WorkflowRunUpdateOne { + _u.mutation.SetRunURL(v) + return _u } // SetNillableRunURL sets the "run_url" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableRunURL(s *string) *WorkflowRunUpdateOne { - if s != nil { - wruo.SetRunURL(*s) +func (_u *WorkflowRunUpdateOne) SetNillableRunURL(v *string) *WorkflowRunUpdateOne { + if v != nil { + _u.SetRunURL(*v) } - return wruo + return _u } // ClearRunURL clears the value of the "run_url" field. -func (wruo *WorkflowRunUpdateOne) ClearRunURL() *WorkflowRunUpdateOne { - wruo.mutation.ClearRunURL() - return wruo +func (_u *WorkflowRunUpdateOne) ClearRunURL() *WorkflowRunUpdateOne { + _u.mutation.ClearRunURL() + return _u } // SetRunnerType sets the "runner_type" field. -func (wruo *WorkflowRunUpdateOne) SetRunnerType(s string) *WorkflowRunUpdateOne { - wruo.mutation.SetRunnerType(s) - return wruo +func (_u *WorkflowRunUpdateOne) SetRunnerType(v string) *WorkflowRunUpdateOne { + _u.mutation.SetRunnerType(v) + return _u } // SetNillableRunnerType sets the "runner_type" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableRunnerType(s *string) *WorkflowRunUpdateOne { - if s != nil { - wruo.SetRunnerType(*s) +func (_u *WorkflowRunUpdateOne) SetNillableRunnerType(v *string) *WorkflowRunUpdateOne { + if v != nil { + _u.SetRunnerType(*v) } - return wruo + return _u } // ClearRunnerType clears the value of the "runner_type" field. -func (wruo *WorkflowRunUpdateOne) ClearRunnerType() *WorkflowRunUpdateOne { - wruo.mutation.ClearRunnerType() - return wruo +func (_u *WorkflowRunUpdateOne) ClearRunnerType() *WorkflowRunUpdateOne { + _u.mutation.ClearRunnerType() + return _u } // SetAttestation sets the "attestation" field. -func (wruo *WorkflowRunUpdateOne) SetAttestation(d *dsse.Envelope) *WorkflowRunUpdateOne { - wruo.mutation.SetAttestation(d) - return wruo +func (_u *WorkflowRunUpdateOne) SetAttestation(v *dsse.Envelope) *WorkflowRunUpdateOne { + _u.mutation.SetAttestation(v) + return _u } // ClearAttestation clears the value of the "attestation" field. -func (wruo *WorkflowRunUpdateOne) ClearAttestation() *WorkflowRunUpdateOne { - wruo.mutation.ClearAttestation() - return wruo +func (_u *WorkflowRunUpdateOne) ClearAttestation() *WorkflowRunUpdateOne { + _u.mutation.ClearAttestation() + return _u } // SetAttestationDigest sets the "attestation_digest" field. -func (wruo *WorkflowRunUpdateOne) SetAttestationDigest(s string) *WorkflowRunUpdateOne { - wruo.mutation.SetAttestationDigest(s) - return wruo +func (_u *WorkflowRunUpdateOne) SetAttestationDigest(v string) *WorkflowRunUpdateOne { + _u.mutation.SetAttestationDigest(v) + return _u } // SetNillableAttestationDigest sets the "attestation_digest" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableAttestationDigest(s *string) *WorkflowRunUpdateOne { - if s != nil { - wruo.SetAttestationDigest(*s) +func (_u *WorkflowRunUpdateOne) SetNillableAttestationDigest(v *string) *WorkflowRunUpdateOne { + if v != nil { + _u.SetAttestationDigest(*v) } - return wruo + return _u } // ClearAttestationDigest clears the value of the "attestation_digest" field. -func (wruo *WorkflowRunUpdateOne) ClearAttestationDigest() *WorkflowRunUpdateOne { - wruo.mutation.ClearAttestationDigest() - return wruo +func (_u *WorkflowRunUpdateOne) ClearAttestationDigest() *WorkflowRunUpdateOne { + _u.mutation.ClearAttestationDigest() + return _u } // SetAttestationState sets the "attestation_state" field. -func (wruo *WorkflowRunUpdateOne) SetAttestationState(b []byte) *WorkflowRunUpdateOne { - wruo.mutation.SetAttestationState(b) - return wruo +func (_u *WorkflowRunUpdateOne) SetAttestationState(v []byte) *WorkflowRunUpdateOne { + _u.mutation.SetAttestationState(v) + return _u } // ClearAttestationState clears the value of the "attestation_state" field. -func (wruo *WorkflowRunUpdateOne) ClearAttestationState() *WorkflowRunUpdateOne { - wruo.mutation.ClearAttestationState() - return wruo +func (_u *WorkflowRunUpdateOne) ClearAttestationState() *WorkflowRunUpdateOne { + _u.mutation.ClearAttestationState() + return _u } // SetContractRevisionUsed sets the "contract_revision_used" field. -func (wruo *WorkflowRunUpdateOne) SetContractRevisionUsed(i int) *WorkflowRunUpdateOne { - wruo.mutation.ResetContractRevisionUsed() - wruo.mutation.SetContractRevisionUsed(i) - return wruo +func (_u *WorkflowRunUpdateOne) SetContractRevisionUsed(v int) *WorkflowRunUpdateOne { + _u.mutation.ResetContractRevisionUsed() + _u.mutation.SetContractRevisionUsed(v) + return _u } // SetNillableContractRevisionUsed sets the "contract_revision_used" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableContractRevisionUsed(i *int) *WorkflowRunUpdateOne { - if i != nil { - wruo.SetContractRevisionUsed(*i) +func (_u *WorkflowRunUpdateOne) SetNillableContractRevisionUsed(v *int) *WorkflowRunUpdateOne { + if v != nil { + _u.SetContractRevisionUsed(*v) } - return wruo + return _u } -// AddContractRevisionUsed adds i to the "contract_revision_used" field. -func (wruo *WorkflowRunUpdateOne) AddContractRevisionUsed(i int) *WorkflowRunUpdateOne { - wruo.mutation.AddContractRevisionUsed(i) - return wruo +// AddContractRevisionUsed adds value to the "contract_revision_used" field. +func (_u *WorkflowRunUpdateOne) AddContractRevisionUsed(v int) *WorkflowRunUpdateOne { + _u.mutation.AddContractRevisionUsed(v) + return _u } // SetContractRevisionLatest sets the "contract_revision_latest" field. -func (wruo *WorkflowRunUpdateOne) SetContractRevisionLatest(i int) *WorkflowRunUpdateOne { - wruo.mutation.ResetContractRevisionLatest() - wruo.mutation.SetContractRevisionLatest(i) - return wruo +func (_u *WorkflowRunUpdateOne) SetContractRevisionLatest(v int) *WorkflowRunUpdateOne { + _u.mutation.ResetContractRevisionLatest() + _u.mutation.SetContractRevisionLatest(v) + return _u } // SetNillableContractRevisionLatest sets the "contract_revision_latest" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableContractRevisionLatest(i *int) *WorkflowRunUpdateOne { - if i != nil { - wruo.SetContractRevisionLatest(*i) +func (_u *WorkflowRunUpdateOne) SetNillableContractRevisionLatest(v *int) *WorkflowRunUpdateOne { + if v != nil { + _u.SetContractRevisionLatest(*v) } - return wruo + return _u } -// AddContractRevisionLatest adds i to the "contract_revision_latest" field. -func (wruo *WorkflowRunUpdateOne) AddContractRevisionLatest(i int) *WorkflowRunUpdateOne { - wruo.mutation.AddContractRevisionLatest(i) - return wruo +// AddContractRevisionLatest adds value to the "contract_revision_latest" field. +func (_u *WorkflowRunUpdateOne) AddContractRevisionLatest(v int) *WorkflowRunUpdateOne { + _u.mutation.AddContractRevisionLatest(v) + return _u } // SetVersionID sets the "version_id" field. -func (wruo *WorkflowRunUpdateOne) SetVersionID(u uuid.UUID) *WorkflowRunUpdateOne { - wruo.mutation.SetVersionID(u) - return wruo +func (_u *WorkflowRunUpdateOne) SetVersionID(v uuid.UUID) *WorkflowRunUpdateOne { + _u.mutation.SetVersionID(v) + return _u } // SetNillableVersionID sets the "version_id" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableVersionID(u *uuid.UUID) *WorkflowRunUpdateOne { - if u != nil { - wruo.SetVersionID(*u) +func (_u *WorkflowRunUpdateOne) SetNillableVersionID(v *uuid.UUID) *WorkflowRunUpdateOne { + if v != nil { + _u.SetVersionID(*v) } - return wruo + return _u } // SetHasPolicyViolations sets the "has_policy_violations" field. -func (wruo *WorkflowRunUpdateOne) SetHasPolicyViolations(b bool) *WorkflowRunUpdateOne { - wruo.mutation.SetHasPolicyViolations(b) - return wruo +func (_u *WorkflowRunUpdateOne) SetHasPolicyViolations(v bool) *WorkflowRunUpdateOne { + _u.mutation.SetHasPolicyViolations(v) + return _u } // SetNillableHasPolicyViolations sets the "has_policy_violations" field if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableHasPolicyViolations(b *bool) *WorkflowRunUpdateOne { - if b != nil { - wruo.SetHasPolicyViolations(*b) +func (_u *WorkflowRunUpdateOne) SetNillableHasPolicyViolations(v *bool) *WorkflowRunUpdateOne { + if v != nil { + _u.SetHasPolicyViolations(*v) } - return wruo + return _u } // ClearHasPolicyViolations clears the value of the "has_policy_violations" field. -func (wruo *WorkflowRunUpdateOne) ClearHasPolicyViolations() *WorkflowRunUpdateOne { - wruo.mutation.ClearHasPolicyViolations() - return wruo +func (_u *WorkflowRunUpdateOne) ClearHasPolicyViolations() *WorkflowRunUpdateOne { + _u.mutation.ClearHasPolicyViolations() + return _u } // SetContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID. -func (wruo *WorkflowRunUpdateOne) SetContractVersionID(id uuid.UUID) *WorkflowRunUpdateOne { - wruo.mutation.SetContractVersionID(id) - return wruo +func (_u *WorkflowRunUpdateOne) SetContractVersionID(id uuid.UUID) *WorkflowRunUpdateOne { + _u.mutation.SetContractVersionID(id) + return _u } // SetNillableContractVersionID sets the "contract_version" edge to the WorkflowContractVersion entity by ID if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunUpdateOne { +func (_u *WorkflowRunUpdateOne) SetNillableContractVersionID(id *uuid.UUID) *WorkflowRunUpdateOne { if id != nil { - wruo = wruo.SetContractVersionID(*id) + _u = _u.SetContractVersionID(*id) } - return wruo + return _u } // SetContractVersion sets the "contract_version" edge to the WorkflowContractVersion entity. -func (wruo *WorkflowRunUpdateOne) SetContractVersion(w *WorkflowContractVersion) *WorkflowRunUpdateOne { - return wruo.SetContractVersionID(w.ID) +func (_u *WorkflowRunUpdateOne) SetContractVersion(v *WorkflowContractVersion) *WorkflowRunUpdateOne { + return _u.SetContractVersionID(v.ID) } // AddCasBackendIDs adds the "cas_backends" edge to the CASBackend entity by IDs. -func (wruo *WorkflowRunUpdateOne) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdateOne { - wruo.mutation.AddCasBackendIDs(ids...) - return wruo +func (_u *WorkflowRunUpdateOne) AddCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdateOne { + _u.mutation.AddCasBackendIDs(ids...) + return _u } // AddCasBackends adds the "cas_backends" edges to the CASBackend entity. -func (wruo *WorkflowRunUpdateOne) AddCasBackends(c ...*CASBackend) *WorkflowRunUpdateOne { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *WorkflowRunUpdateOne) AddCasBackends(v ...*CASBackend) *WorkflowRunUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wruo.AddCasBackendIDs(ids...) + return _u.AddCasBackendIDs(ids...) } // SetVersion sets the "version" edge to the ProjectVersion entity. -func (wruo *WorkflowRunUpdateOne) SetVersion(p *ProjectVersion) *WorkflowRunUpdateOne { - return wruo.SetVersionID(p.ID) +func (_u *WorkflowRunUpdateOne) SetVersion(v *ProjectVersion) *WorkflowRunUpdateOne { + return _u.SetVersionID(v.ID) } // SetAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID. -func (wruo *WorkflowRunUpdateOne) SetAttestationBundleID(id uuid.UUID) *WorkflowRunUpdateOne { - wruo.mutation.SetAttestationBundleID(id) - return wruo +func (_u *WorkflowRunUpdateOne) SetAttestationBundleID(id uuid.UUID) *WorkflowRunUpdateOne { + _u.mutation.SetAttestationBundleID(id) + return _u } // SetNillableAttestationBundleID sets the "attestation_bundle" edge to the Attestation entity by ID if the given value is not nil. -func (wruo *WorkflowRunUpdateOne) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunUpdateOne { +func (_u *WorkflowRunUpdateOne) SetNillableAttestationBundleID(id *uuid.UUID) *WorkflowRunUpdateOne { if id != nil { - wruo = wruo.SetAttestationBundleID(*id) + _u = _u.SetAttestationBundleID(*id) } - return wruo + return _u } // SetAttestationBundle sets the "attestation_bundle" edge to the Attestation entity. -func (wruo *WorkflowRunUpdateOne) SetAttestationBundle(a *Attestation) *WorkflowRunUpdateOne { - return wruo.SetAttestationBundleID(a.ID) +func (_u *WorkflowRunUpdateOne) SetAttestationBundle(v *Attestation) *WorkflowRunUpdateOne { + return _u.SetAttestationBundleID(v.ID) } // Mutation returns the WorkflowRunMutation object of the builder. -func (wruo *WorkflowRunUpdateOne) Mutation() *WorkflowRunMutation { - return wruo.mutation +func (_u *WorkflowRunUpdateOne) Mutation() *WorkflowRunMutation { + return _u.mutation } // ClearContractVersion clears the "contract_version" edge to the WorkflowContractVersion entity. -func (wruo *WorkflowRunUpdateOne) ClearContractVersion() *WorkflowRunUpdateOne { - wruo.mutation.ClearContractVersion() - return wruo +func (_u *WorkflowRunUpdateOne) ClearContractVersion() *WorkflowRunUpdateOne { + _u.mutation.ClearContractVersion() + return _u } // ClearCasBackends clears all "cas_backends" edges to the CASBackend entity. -func (wruo *WorkflowRunUpdateOne) ClearCasBackends() *WorkflowRunUpdateOne { - wruo.mutation.ClearCasBackends() - return wruo +func (_u *WorkflowRunUpdateOne) ClearCasBackends() *WorkflowRunUpdateOne { + _u.mutation.ClearCasBackends() + return _u } // RemoveCasBackendIDs removes the "cas_backends" edge to CASBackend entities by IDs. -func (wruo *WorkflowRunUpdateOne) RemoveCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdateOne { - wruo.mutation.RemoveCasBackendIDs(ids...) - return wruo +func (_u *WorkflowRunUpdateOne) RemoveCasBackendIDs(ids ...uuid.UUID) *WorkflowRunUpdateOne { + _u.mutation.RemoveCasBackendIDs(ids...) + return _u } // RemoveCasBackends removes "cas_backends" edges to CASBackend entities. -func (wruo *WorkflowRunUpdateOne) RemoveCasBackends(c ...*CASBackend) *WorkflowRunUpdateOne { - ids := make([]uuid.UUID, len(c)) - for i := range c { - ids[i] = c[i].ID +func (_u *WorkflowRunUpdateOne) RemoveCasBackends(v ...*CASBackend) *WorkflowRunUpdateOne { + ids := make([]uuid.UUID, len(v)) + for i := range v { + ids[i] = v[i].ID } - return wruo.RemoveCasBackendIDs(ids...) + return _u.RemoveCasBackendIDs(ids...) } // ClearVersion clears the "version" edge to the ProjectVersion entity. -func (wruo *WorkflowRunUpdateOne) ClearVersion() *WorkflowRunUpdateOne { - wruo.mutation.ClearVersion() - return wruo +func (_u *WorkflowRunUpdateOne) ClearVersion() *WorkflowRunUpdateOne { + _u.mutation.ClearVersion() + return _u } // ClearAttestationBundle clears the "attestation_bundle" edge to the Attestation entity. -func (wruo *WorkflowRunUpdateOne) ClearAttestationBundle() *WorkflowRunUpdateOne { - wruo.mutation.ClearAttestationBundle() - return wruo +func (_u *WorkflowRunUpdateOne) ClearAttestationBundle() *WorkflowRunUpdateOne { + _u.mutation.ClearAttestationBundle() + return _u } // Where appends a list predicates to the WorkflowRunUpdate builder. -func (wruo *WorkflowRunUpdateOne) Where(ps ...predicate.WorkflowRun) *WorkflowRunUpdateOne { - wruo.mutation.Where(ps...) - return wruo +func (_u *WorkflowRunUpdateOne) Where(ps ...predicate.WorkflowRun) *WorkflowRunUpdateOne { + _u.mutation.Where(ps...) + return _u } // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. -func (wruo *WorkflowRunUpdateOne) Select(field string, fields ...string) *WorkflowRunUpdateOne { - wruo.fields = append([]string{field}, fields...) - return wruo +func (_u *WorkflowRunUpdateOne) Select(field string, fields ...string) *WorkflowRunUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u } // Save executes the query and returns the updated WorkflowRun entity. -func (wruo *WorkflowRunUpdateOne) Save(ctx context.Context) (*WorkflowRun, error) { - return withHooks(ctx, wruo.sqlSave, wruo.mutation, wruo.hooks) +func (_u *WorkflowRunUpdateOne) Save(ctx context.Context) (*WorkflowRun, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) } // SaveX is like Save, but panics if an error occurs. -func (wruo *WorkflowRunUpdateOne) SaveX(ctx context.Context) *WorkflowRun { - node, err := wruo.Save(ctx) +func (_u *WorkflowRunUpdateOne) SaveX(ctx context.Context) *WorkflowRun { + node, err := _u.Save(ctx) if err != nil { panic(err) } @@ -974,51 +974,51 @@ func (wruo *WorkflowRunUpdateOne) SaveX(ctx context.Context) *WorkflowRun { } // Exec executes the query on the entity. -func (wruo *WorkflowRunUpdateOne) Exec(ctx context.Context) error { - _, err := wruo.Save(ctx) +func (_u *WorkflowRunUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. -func (wruo *WorkflowRunUpdateOne) ExecX(ctx context.Context) { - if err := wruo.Exec(ctx); err != nil { +func (_u *WorkflowRunUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { panic(err) } } // check runs all checks and user-defined validators on the builder. -func (wruo *WorkflowRunUpdateOne) check() error { - if v, ok := wruo.mutation.State(); ok { +func (_u *WorkflowRunUpdateOne) check() error { + if v, ok := _u.mutation.State(); ok { if err := workflowrun.StateValidator(v); err != nil { return &ValidationError{Name: "state", err: fmt.Errorf(`ent: validator failed for field "WorkflowRun.state": %w`, err)} } } - if wruo.mutation.WorkflowCleared() && len(wruo.mutation.WorkflowIDs()) > 0 { + if _u.mutation.WorkflowCleared() && len(_u.mutation.WorkflowIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "WorkflowRun.workflow"`) } - if wruo.mutation.VersionCleared() && len(wruo.mutation.VersionIDs()) > 0 { + if _u.mutation.VersionCleared() && len(_u.mutation.VersionIDs()) > 0 { return errors.New(`ent: clearing a required unique edge "WorkflowRun.version"`) } return nil } // Modify adds a statement modifier for attaching custom logic to the UPDATE statement. -func (wruo *WorkflowRunUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowRunUpdateOne { - wruo.modifiers = append(wruo.modifiers, modifiers...) - return wruo +func (_u *WorkflowRunUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *WorkflowRunUpdateOne { + _u.modifiers = append(_u.modifiers, modifiers...) + return _u } -func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowRun, err error) { - if err := wruo.check(); err != nil { +func (_u *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowRun, err error) { + if err := _u.check(); err != nil { return _node, err } _spec := sqlgraph.NewUpdateSpec(workflowrun.Table, workflowrun.Columns, sqlgraph.NewFieldSpec(workflowrun.FieldID, field.TypeUUID)) - id, ok := wruo.mutation.ID() + id, ok := _u.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "WorkflowRun.id" for update`)} } _spec.Node.ID.Value = id - if fields := wruo.fields; len(fields) > 0 { + if fields := _u.fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, workflowrun.FieldID) for _, f := range fields { @@ -1030,77 +1030,77 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } } } - if ps := wruo.mutation.predicates; len(ps) > 0 { + if ps := _u.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } - if value, ok := wruo.mutation.FinishedAt(); ok { + if value, ok := _u.mutation.FinishedAt(); ok { _spec.SetField(workflowrun.FieldFinishedAt, field.TypeTime, value) } - if wruo.mutation.FinishedAtCleared() { + if _u.mutation.FinishedAtCleared() { _spec.ClearField(workflowrun.FieldFinishedAt, field.TypeTime) } - if value, ok := wruo.mutation.State(); ok { + if value, ok := _u.mutation.State(); ok { _spec.SetField(workflowrun.FieldState, field.TypeEnum, value) } - if value, ok := wruo.mutation.Reason(); ok { + if value, ok := _u.mutation.Reason(); ok { _spec.SetField(workflowrun.FieldReason, field.TypeString, value) } - if wruo.mutation.ReasonCleared() { + if _u.mutation.ReasonCleared() { _spec.ClearField(workflowrun.FieldReason, field.TypeString) } - if value, ok := wruo.mutation.RunURL(); ok { + if value, ok := _u.mutation.RunURL(); ok { _spec.SetField(workflowrun.FieldRunURL, field.TypeString, value) } - if wruo.mutation.RunURLCleared() { + if _u.mutation.RunURLCleared() { _spec.ClearField(workflowrun.FieldRunURL, field.TypeString) } - if value, ok := wruo.mutation.RunnerType(); ok { + if value, ok := _u.mutation.RunnerType(); ok { _spec.SetField(workflowrun.FieldRunnerType, field.TypeString, value) } - if wruo.mutation.RunnerTypeCleared() { + if _u.mutation.RunnerTypeCleared() { _spec.ClearField(workflowrun.FieldRunnerType, field.TypeString) } - if value, ok := wruo.mutation.Attestation(); ok { + if value, ok := _u.mutation.Attestation(); ok { _spec.SetField(workflowrun.FieldAttestation, field.TypeJSON, value) } - if wruo.mutation.AttestationCleared() { + if _u.mutation.AttestationCleared() { _spec.ClearField(workflowrun.FieldAttestation, field.TypeJSON) } - if value, ok := wruo.mutation.AttestationDigest(); ok { + if value, ok := _u.mutation.AttestationDigest(); ok { _spec.SetField(workflowrun.FieldAttestationDigest, field.TypeString, value) } - if wruo.mutation.AttestationDigestCleared() { + if _u.mutation.AttestationDigestCleared() { _spec.ClearField(workflowrun.FieldAttestationDigest, field.TypeString) } - if value, ok := wruo.mutation.AttestationState(); ok { + if value, ok := _u.mutation.AttestationState(); ok { _spec.SetField(workflowrun.FieldAttestationState, field.TypeBytes, value) } - if wruo.mutation.AttestationStateCleared() { + if _u.mutation.AttestationStateCleared() { _spec.ClearField(workflowrun.FieldAttestationState, field.TypeBytes) } - if value, ok := wruo.mutation.ContractRevisionUsed(); ok { + if value, ok := _u.mutation.ContractRevisionUsed(); ok { _spec.SetField(workflowrun.FieldContractRevisionUsed, field.TypeInt, value) } - if value, ok := wruo.mutation.AddedContractRevisionUsed(); ok { + if value, ok := _u.mutation.AddedContractRevisionUsed(); ok { _spec.AddField(workflowrun.FieldContractRevisionUsed, field.TypeInt, value) } - if value, ok := wruo.mutation.ContractRevisionLatest(); ok { + if value, ok := _u.mutation.ContractRevisionLatest(); ok { _spec.SetField(workflowrun.FieldContractRevisionLatest, field.TypeInt, value) } - if value, ok := wruo.mutation.AddedContractRevisionLatest(); ok { + if value, ok := _u.mutation.AddedContractRevisionLatest(); ok { _spec.AddField(workflowrun.FieldContractRevisionLatest, field.TypeInt, value) } - if value, ok := wruo.mutation.HasPolicyViolations(); ok { + if value, ok := _u.mutation.HasPolicyViolations(); ok { _spec.SetField(workflowrun.FieldHasPolicyViolations, field.TypeBool, value) } - if wruo.mutation.HasPolicyViolationsCleared() { + if _u.mutation.HasPolicyViolationsCleared() { _spec.ClearField(workflowrun.FieldHasPolicyViolations, field.TypeBool) } - if wruo.mutation.ContractVersionCleared() { + if _u.mutation.ContractVersionCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1113,7 +1113,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wruo.mutation.ContractVersionIDs(); len(nodes) > 0 { + if nodes := _u.mutation.ContractVersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: false, @@ -1129,7 +1129,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wruo.mutation.CasBackendsCleared() { + if _u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -1142,7 +1142,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wruo.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !wruo.mutation.CasBackendsCleared() { + if nodes := _u.mutation.RemovedCasBackendsIDs(); len(nodes) > 0 && !_u.mutation.CasBackendsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -1158,7 +1158,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wruo.mutation.CasBackendsIDs(); len(nodes) > 0 { + if nodes := _u.mutation.CasBackendsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, Inverse: false, @@ -1174,7 +1174,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wruo.mutation.VersionCleared() { + if _u.mutation.VersionCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1187,7 +1187,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wruo.mutation.VersionIDs(); len(nodes) > 0 { + if nodes := _u.mutation.VersionIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, @@ -1203,7 +1203,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - if wruo.mutation.AttestationBundleCleared() { + if _u.mutation.AttestationBundleCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: false, @@ -1216,7 +1216,7 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := wruo.mutation.AttestationBundleIDs(); len(nodes) > 0 { + if nodes := _u.mutation.AttestationBundleIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2O, Inverse: false, @@ -1232,11 +1232,11 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } _spec.Edges.Add = append(_spec.Edges.Add, edge) } - _spec.AddModifiers(wruo.modifiers...) - _node = &WorkflowRun{config: wruo.config} + _spec.AddModifiers(_u.modifiers...) + _node = &WorkflowRun{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, wruo.driver, _spec); err != nil { + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{workflowrun.Label} } else if sqlgraph.IsConstraintError(err) { @@ -1244,6 +1244,6 @@ func (wruo *WorkflowRunUpdateOne) sqlSave(ctx context.Context) (_node *WorkflowR } return nil, err } - wruo.mutation.done = true + _u.mutation.done = true return _node, nil } diff --git a/app/controlplane/pkg/unmarshal/unmarshal.go b/app/controlplane/pkg/unmarshal/unmarshal.go index bca04a0cd..258d1a499 100644 --- a/app/controlplane/pkg/unmarshal/unmarshal.go +++ b/app/controlplane/pkg/unmarshal/unmarshal.go @@ -1,5 +1,5 @@ // -// Copyright 2024 The Chainloop Authors. +// Copyright 2024-2025 The Chainloop Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ import ( "errors" "fmt" + "buf.build/go/protovalidate" + "buf.build/go/protoyaml" "cuelang.org/go/cue/cuecontext" - "github.com/bufbuild/protovalidate-go" - "github.com/bufbuild/protoyaml-go" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "gopkg.in/yaml.v2" @@ -45,8 +45,19 @@ func (RawFormat) Values() (kinds []string) { return } +// validatorAdapter adapts protovalidate.Validator to work with protoyaml.Validator. +// protovalidate v1.1.0 changed the Validate signature to accept variadic options, +// but protoyaml v0.6.0 expects the old signature without options. +type validatorAdapter struct { + validator protovalidate.Validator +} + +func (v *validatorAdapter) Validate(msg proto.Message) error { + return v.validator.Validate(msg) +} + func FromRaw(body []byte, format RawFormat, out proto.Message, doValidate bool) error { - var validator *protovalidate.Validator + var validator protovalidate.Validator var err error if doValidate { @@ -65,7 +76,7 @@ func FromRaw(body []byte, format RawFormat, out proto.Message, doValidate bool) // protoyaml allows validating the contract while unmarshalling yamlOpts := protoyaml.UnmarshalOptions{} if doValidate { - yamlOpts.Validator = validator + yamlOpts.Validator = &validatorAdapter{validator: validator} } if err := yamlOpts.Unmarshal(body, out); err != nil { diff --git a/app/controlplane/plugins/sdk/v1/plugin/api/buf.gen.yaml b/app/controlplane/plugins/sdk/v1/plugin/api/buf.gen.yaml index 65f798252..c5ca49b88 100644 --- a/app/controlplane/plugins/sdk/v1/plugin/api/buf.gen.yaml +++ b/app/controlplane/plugins/sdk/v1/plugin/api/buf.gen.yaml @@ -1,9 +1,8 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . opt: paths=source_relative - - name: go-grpc + - local: protoc-gen-go-grpc out: . - opt: - - paths=source_relative \ No newline at end of file + opt: paths=source_relative diff --git a/app/controlplane/plugins/sdk/v1/plugin/api/buf.yaml b/app/controlplane/plugins/sdk/v1/plugin/api/buf.yaml deleted file mode 100644 index 85540300d..000000000 --- a/app/controlplane/plugins/sdk/v1/plugin/api/buf.yaml +++ /dev/null @@ -1,12 +0,0 @@ -version: v1 -breaking: - use: - - FILE -lint: - use: - - DEFAULT - ignore_only: - PACKAGE_VERSION_SUFFIX: - - ./fanout.proto - PACKAGE_DIRECTORY_MATCH: - - ./fanout.proto \ No newline at end of file diff --git a/buf.lock b/buf.lock new file mode 100644 index 000000000..ba59ed738 --- /dev/null +++ b/buf.lock @@ -0,0 +1,15 @@ +# Generated by buf. DO NOT EDIT. +version: v2 +deps: + - name: buf.build/bufbuild/protovalidate + commit: 2a1774d888024a9b93ce7eb4b59f6a83 + digest: b5:6b7f9bc919b65e5b79d7b726ffc03d6f815a412d6b792970fa6f065cae162107bd0a9d47272c8ab1a2c9514e87b13d3fbf71df614374d62d2183afb64be2d30a + - name: buf.build/googleapis/googleapis + commit: 4ed3bc159a8b4ac68fe253218760d035 + digest: b5:74a7798987b123218c004cf28543a2835e432ca04a69de99cd394a29dbad24d9ed38344f0b7c97ad6476039506c4eb38c2f4a8eef9cec3da2e38e4216a22d495 + - name: buf.build/grpc-ecosystem/grpc-gateway + commit: 4c5ba75caaf84e928b7137ae5c18c26a + digest: b5:c113e62fb3b29289af785866cae062b55ec8ae19ab3f08f3004098928fbca657730a06810b2012951294326b95669547194fa84476b9e9b688d4f8bf77a0691d + - name: buf.build/kratos-go/kratos + commit: e1d52e944e3845c6862a566db322432d + digest: b5:4f4070912e66285385e6eb8f667edb8ca6fc0fc083bb8cb02ace7f5125aeacdcad055e44b0f5dad3da12079d330934b21d180c27d55fb3c3a18918565d8725ba diff --git a/buf.work.yaml b/buf.work.yaml deleted file mode 100644 index 9abd339bc..000000000 --- a/buf.work.yaml +++ /dev/null @@ -1,9 +0,0 @@ -version: v1 -directories: - - app/controlplane/api - - app/controlplane/internal/conf - - app/controlplane/pkg/conf - - app/artifact-cas/internal/conf - - app/artifact-cas/api - - pkg/credentials/api - - pkg/attestation/crafter/api diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 000000000..766d1a0e9 --- /dev/null +++ b/buf.yaml @@ -0,0 +1,134 @@ +version: v2 +modules: + - path: app/artifact-cas/api + lint: + use: + - STANDARD + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: app/artifact-cas/internal/conf + lint: + use: + - STANDARD + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + ignore_only: + PACKAGE_DEFINED: + - app/artifact-cas/internal/conf/conf.proto + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: app/controlplane/api + lint: + use: + - STANDARD + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + ignore_only: + ENUM_VALUE_PREFIX: + - app/controlplane/api/workflowcontract/v1/crafting_schema.proto + ENUM_ZERO_VALUE_SUFFIX: + - app/controlplane/api/controlplane/v1/pagination.proto + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: app/controlplane/internal/conf + lint: + use: + - STANDARD + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + ignore_only: + PACKAGE_DEFINED: + - app/controlplane/internal/conf/conf.proto + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: app/controlplane/pkg/conf + lint: + use: + - STANDARD + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: app/controlplane/plugins/sdk/v1/plugin/api + lint: + use: + - DEFAULT + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + ignore_only: + PACKAGE_DIRECTORY_MATCH: + - app/controlplane/plugins/sdk/v1/plugin/api/fanout.proto + PACKAGE_VERSION_SUFFIX: + - app/controlplane/plugins/sdk/v1/plugin/api/fanout.proto + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: pkg/attestation/crafter/api + lint: + use: + - DEFAULT + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT + - path: pkg/credentials/api + lint: + use: + - DEFAULT + except: + - FIELD_NOT_REQUIRED + - PACKAGE_NO_IMPORT_CYCLE + disallow_comment_ignores: true + breaking: + use: + - FILE + except: + - EXTENSION_NO_DELETE + - FIELD_SAME_DEFAULT +deps: + - buf.build/bufbuild/protovalidate:v1.1.0 + - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 + - buf.build/grpc-ecosystem/grpc-gateway:v2.26.3 + - buf.build/kratos-go/kratos:e1d52e944e3845c6862a566db322432d diff --git a/go.mod b/go.mod index 1ef055b28..dab7a571d 100644 --- a/go.mod +++ b/go.mod @@ -3,33 +3,33 @@ module github.com/chainloop-dev/chainloop go 1.25.5 require ( - cloud.google.com/go/secretmanager v1.14.5 + cloud.google.com/go/secretmanager v1.15.0 code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 - cuelang.org/go v0.9.2 - entgo.io/ent v0.14.4 + cuelang.org/go v0.15.1 + entgo.io/ent v0.14.6-0.20251003170342-01063ef6395c github.com/adrg/xdg v0.4.0 - github.com/aws/aws-sdk-go-v2 v1.39.4 - github.com/aws/aws-sdk-go-v2/config v1.31.15 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.18.19 + github.com/aws/aws-sdk-go-v2 v1.40.0 + github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.2 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 - github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 - github.com/aws/smithy-go v1.23.1 + github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 + github.com/aws/smithy-go v1.24.0 github.com/cenkalti/backoff/v4 v4.3.0 - github.com/coreos/go-oidc/v3 v3.11.0 + github.com/coreos/go-oidc/v3 v3.17.0 github.com/docker/distribution v2.8.3+incompatible - github.com/docker/go-connections v0.5.0 + github.com/docker/go-connections v0.6.0 github.com/getsentry/sentry-go v0.23.0 github.com/go-kratos/kratos/contrib/log/zap/v2 v2.0.0-20230823024326-a09f4d8ebba9 github.com/go-kratos/kratos/v2 v2.7.0 github.com/golang-jwt/jwt/v4 v4.5.2 - github.com/google/go-containerregistry v0.20.3 + github.com/google/go-containerregistry v0.20.7 github.com/google/subcommands v1.2.0 github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 - github.com/googleapis/gax-go/v2 v2.14.1 + github.com/googleapis/gax-go/v2 v2.15.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 - github.com/hashicorp/vault/api v1.16.0 + github.com/hashicorp/vault/api v1.22.0 github.com/improbable-eng/grpc-web v0.15.0 github.com/in-toto/in-toto-golang v0.9.0 github.com/jedib0t/go-pretty/v6 v6.4.7 @@ -37,157 +37,184 @@ require ( github.com/lib/pq v1.10.9 github.com/moby/moby v26.1.0+incompatible github.com/opencontainers/image-spec v1.1.1 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.2 github.com/rs/zerolog v1.32.0 - github.com/secure-systems-lab/go-securesystemslib v0.9.0 - github.com/sigstore/cosign/v2 v2.4.1 - github.com/sigstore/sigstore v1.8.9 + github.com/secure-systems-lab/go-securesystemslib v0.9.1 + github.com/sigstore/sigstore v1.10.3 github.com/spdx/tools-golang v0.5.3 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 - github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.10.0 - github.com/testcontainers/testcontainers-go v0.35.0 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 + github.com/spf13/viper v1.21.0 + github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go v0.40.0 go.uber.org/automaxprocs v1.6.0 - go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f - golang.org/x/oauth2 v0.30.0 + go.uber.org/zap v1.27.1 + golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 + golang.org/x/oauth2 v0.33.0 golang.org/x/term v0.37.0 - google.golang.org/api v0.233.0 - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/grpc v1.72.2 - google.golang.org/protobuf v1.36.6 - sigs.k8s.io/yaml v1.4.0 + google.golang.org/api v0.256.0 + google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 // indirect + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 + sigs.k8s.io/yaml v1.6.0 ) require ( - buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 - cloud.google.com/go/storage v1.50.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20251209175733-2a1774d88802.1 + buf.build/go/protovalidate v1.1.0 + buf.build/go/protoyaml v0.6.0 + cloud.google.com/go/storage v1.57.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.89.0 - github.com/bufbuild/protovalidate-go v0.6.1 - github.com/bufbuild/protoyaml-go v0.1.11 + github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1 github.com/casbin/casbin/v2 v2.103.0 github.com/denisbrodbeck/machineid v1.0.1 github.com/extism/go-sdk v1.7.1 github.com/google/go-github/v66 v66.0.0 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/in-toto/attestation v1.1.0 + github.com/in-toto/attestation v1.1.2 github.com/invopop/jsonschema v0.13.0 - github.com/jackc/pgx/v5 v5.7.2 + github.com/jackc/pgx/v5 v5.7.5 github.com/muesli/reflow v0.3.0 github.com/nats-io/nats.go v1.34.0 - github.com/open-policy-agent/opa v1.6.0 + github.com/open-policy-agent/opa v1.10.1 github.com/openvex/go-vex v0.2.5 github.com/posthog/posthog-go v0.0.0-20240327112532-87b23fe11103 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 - github.com/sigstore/fulcio v1.6.3 - github.com/sigstore/protobuf-specs v0.4.1 - github.com/sigstore/sigstore-go v0.6.1 - github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 - github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 - github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 - github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 + github.com/sigstore/cosign/v3 v3.0.3 + github.com/sigstore/fulcio v1.8.3 + github.com/sigstore/protobuf-specs v0.5.0 + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 github.com/styrainc/regal v0.35.1 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 gitlab.com/gitlab-org/security-products/analyzers/report/v5 v5.3.0 - google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 - google.golang.org/genproto/googleapis/bytestream v0.0.0-20250505200425-f936aa4a68b2 + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 + google.golang.org/genproto/googleapis/bytestream v0.0.0-20251103181224-f26f9409b101 ) require ( - cel.dev/expr v0.20.0 // indirect - cloud.google.com/go/auth v0.16.1 // indirect + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/kms v1.21.2 // indirect - cloud.google.com/go/longrunning v0.6.6 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect - cloud.google.com/go/pubsub v1.47.0 // indirect + cloud.google.com/go/kms v1.23.2 // indirect + cloud.google.com/go/longrunning v0.6.7 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/pubsub v1.50.1 // indirect + cloud.google.com/go/pubsub/v2 v2.3.0 // indirect dario.cat/mergo v1.0.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect github.com/anchore/go-struct-converter v0.0.0-20230627203149-c72ef8859ca9 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/bmatcuk/doublestar/v4 v4.8.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/casbin/govaluate v1.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect github.com/cockroachdb/apd/v3 v3.2.1 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v1.0.0-rc.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/creack/pty v1.1.21 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect + github.com/ebitengine/purego v0.8.4 // indirect + github.com/emicklei/proto v1.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/go-playground/assert/v2 v2.2.0 // indirect - github.com/go-sql-driver/mysql v1.9.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/goadesign/goa v2.2.5+incompatible // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/golang-jwt/jwt/v5 v5.2.2 // indirect - github.com/google/cel-go v0.20.1 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect - github.com/google/go-github/v55 v55.0.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/google/cel-go v0.26.1 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/yamux v0.1.2 // indirect github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jellydator/ttlcache/v3 v3.3.0 // indirect + github.com/jellydator/ttlcache/v3 v3.4.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect + github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lestrrat-go/option/v2 v2.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect + github.com/natefinch/atomic v1.0.1 // indirect github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/run v1.1.0 // indirect - github.com/onsi/ginkgo/v2 v2.19.0 // indirect github.com/otiai10/copy v1.11.0 // indirect github.com/package-url/packageurl-go v0.1.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect @@ -196,56 +223,64 @@ require ( github.com/pkg/xattr v0.4.9 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/segmentio/ksuid v1.0.4 // indirect - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/shirou/gopsutil/v3 v3.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shoenig/test v0.6.6 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/sergi/go-diff v1.4.0 // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/stoewer/go-strcase v1.3.1 // indirect github.com/styrainc/roast v0.15.0 // indirect - github.com/tchap/go-patricia/v2 v2.3.2 // indirect + github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect github.com/tetratelabs/wazero v1.9.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.0.1 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.9.0 // indirect - github.com/vektah/gqlparser/v2 v2.5.28 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/valyala/fastjson v1.6.4 // indirect + github.com/vektah/gqlparser/v2 v2.5.30 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zclconf/go-cty-yaml v1.1.0 // indirect - github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect gitlab.com/gitlab-org/security-products/analyzers/common/v3 v3.2.1 // indirect gitlab.com/gitlab-org/security-products/analyzers/ruleset/v3 v3.0.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect - go.opentelemetry.io/proto/otlp v1.6.0 // indirect - go.step.sm/crypto v0.51.2 // indirect - goa.design/goa v2.2.5+incompatible // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.step.sm/crypto v0.74.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + goa.design/goa/v3 v3.22.6 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) require ( - ariga.io/atlas v0.36.1 // indirect - cloud.google.com/go v0.120.0 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect - cloud.google.com/go/iam v1.5.0 // indirect + ariga.io/atlas v0.36.2-0.20250730182955-2c6300d0a3e1 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -253,116 +288,111 @@ require ( github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 - github.com/docker/cli v27.5.0+incompatible // indirect - github.com/docker/docker v28.0.0+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fsouza/fake-gcs-server v1.47.6 - github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-git/go-git/v5 v5.16.0 - github.com/go-jose/go-jose/v3 v3.0.4 // indirect github.com/go-kratos/aegis v0.2.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect github.com/go-openapi/inflect v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect github.com/go-playground/form/v4 v4.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.2.1 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect github.com/google/go-cmp v0.7.0 github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/gorilla/mux v1.8.1 github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-plugin v1.6.3 - github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.5 // indirect - github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hashicorp/go-sockaddr v1.0.7 // indirect + github.com/hashicorp/hcl v1.0.1-vault-7 // indirect github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect - github.com/magiconair/properties v1.8.9 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.9.1 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.17 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/owenrumney/go-sarif v1.1.1 github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.63.0 - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.67.4 + github.com/prometheus/procfs v0.17.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rs/cors v1.11.0 // indirect + github.com/rs/cors v1.11.1 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/timestamp-authority v1.2.2 - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.7.1 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/timestamp-authority v1.2.9 + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -370,34 +400,32 @@ require ( github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect - github.com/xanzy/go-gitlab v0.109.0 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/zclconf/go-cty v1.16.2 // indirect - go.mongodb.org/mongo-driver v1.14.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.45.0 - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.38.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.39.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.28.6 // indirect - k8s.io/apimachinery v0.28.6 - k8s.io/client-go v0.28.6 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect nhooyr.io/websocket v1.8.10 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect ) diff --git a/go.sum b/go.sum index 6756dc905..bd39c6e7e 100644 --- a/go.sum +++ b/go.sum @@ -1,113 +1,102 @@ -ariga.io/atlas v0.36.1 h1:w0BGAHPkzxpx0n9QWUVbtu7vUUihs7cDCTPsnnw9nck= -ariga.io/atlas v0.36.1/go.mod h1:9ZAIr/V85596AVxmN8edyVHYKKpnNsDMdnHLsEliW7k= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 h1:2IGhRovxlsOIQgx2ekZWo4wTPAYpck41+18ICxs37is= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1/go.mod h1:Tgn5bgL220vkFOI0KPStlcClPeOJzAv4uT+V8JXGUnw= -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +ariga.io/atlas v0.36.2-0.20250730182955-2c6300d0a3e1 h1:NPPfBaVZgz4LKBCIc0FbMogCjvXN+yGf7CZwotOwJo8= +ariga.io/atlas v0.36.2-0.20250730182955-2c6300d0a3e1/go.mod h1:Ex5l1xHsnWQUc3wYnrJ9gD7RUEzG76P7ZRQp8wNr0wc= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20251209175733-2a1774d88802.1 h1:ZnX3qpF/pDiYrf+Q3p+/zCzZ5ELSpszy5hdVarDMSV4= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20251209175733-2a1774d88802.1/go.mod h1:fUl8CEN/6ZAMk6bP8ahBJPUJw7rbp+j4x+wCcYi2IG4= +buf.build/go/protovalidate v1.1.0 h1:pQqEQRpOo4SqS60qkvmhLTTQU9JwzEvdyiqAtXa5SeY= +buf.build/go/protovalidate v1.1.0/go.mod h1:bGZcPiAQDC3ErCHK3t74jSoJDFOs2JH3d7LWuTEIdss= +buf.build/go/protoyaml v0.6.0 h1:Nzz1lvcXF8YgNZXk+voPPwdU8FjDPTUV4ndNTXN0n2w= +buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= -cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= -cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= -cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.5.0 h1:QlLcVMhbLGOjRcGe6VTGGTyQib8dRLK2B/kYNV0+2xs= -cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo= -cloud.google.com/go/kms v1.21.2 h1:c/PRUSMNQ8zXrc1sdAUnsenWWaNXN+PzTXfXOcSFdoE= -cloud.google.com/go/kms v1.21.2/go.mod h1:8wkMtHV/9Z8mLXEXr1GK7xPSBdi6knuLXIhqjuWcI6w= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.6 h1:XJNDo5MUfMM05xK3ewpbSdmt7R2Zw+aQEMbdQR65Rbw= -cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= -cloud.google.com/go/pubsub v1.47.0 h1:Ou2Qu4INnf7ykrFjGv2ntFOjVo8Nloh/+OffF4mUu9w= -cloud.google.com/go/pubsub v1.47.0/go.mod h1:LaENesmga+2u0nDtLkIOILskxsfvn/BXX9Ak1NFxOs8= -cloud.google.com/go/secretmanager v1.14.5 h1:W++V0EL9iL6T2+ec24Dm++bIti0tI6Gx6sCosDBters= -cloud.google.com/go/secretmanager v1.14.5/go.mod h1:GXznZF3qqPZDGZQqETZwZqHw4R6KCaYVvcGiRBA+aqY= -cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= -cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.3.0 h1:DgAN907x+sP0nScYfBzneRiIhWoXcpCD8ZAut8WX9vs= +cloud.google.com/go/pubsub/v2 v2.3.0/go.mod h1:O5f0KHG9zDheZAd3z5rlCRhxt2JQtB+t/IYLKK3Bpvw= +cloud.google.com/go/secretmanager v1.15.0 h1:RtkCMgTpaBMbzozcRUGfZe46jb9a3qh5EdEtVRUATF8= +cloud.google.com/go/secretmanager v1.15.0/go.mod h1:1hQSAhKK7FldiYw//wbR/XPfPc08eQ81oBsnRUHEvUc= +cloud.google.com/go/storage v1.57.1 h1:gzao6odNJ7dR3XXYvAgPK+Iw4fVPPznEPPyNjbaVkq8= +cloud.google.com/go/storage v1.57.1/go.mod h1:329cwlpzALLgJuu8beyJ/uvQznDHpa2U5lGjWednkzg= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 h1:9G5F8zgma5v0GdDvNz6iZwwJp3RS/z0SY/aHGfVwvTo= code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4/go.mod h1:wYHCXH/gI19ujoFVuMkY48qPpPCoHLKBKXPkn67h/Yc= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo= -cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs= -cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk= +cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 h1:4k1yAtPvZJZQTu8DRY8muBo0LHv6TqtrE0AO5n6IPYs= +cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= +cuelang.org/go v0.15.1 h1:MRnjc/KJE+K42rnJ3a+425f1jqXeOOgq9SK4tYRTtWw= +cuelang.org/go v0.15.1/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -entgo.io/ent v0.14.4 h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI= -entgo.io/ent v0.14.4/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM= +entgo.io/ent v0.14.6-0.20251003170342-01063ef6395c h1:74zQRklceH5iran5LRWX4KyIHHlwRadGVpEn3JieC4A= +entgo.io/ent v0.14.6-0.20251003170342-01063ef6395c/go.mod h1:lzIEU+g4/iK78Txz6EgLmA/kkkK/k73MwaRH7xMJacs= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 h1:j8BorDEigD8UFOSZQiSqAMOOleyQOOQPnUAwV+Ls1gA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0/go.mod h1:l9rva3ApbBpEJxSNYnwT9N4CDLrWgtq3u8736C5hyJw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0 h1:xfK3bbi6F2RDtaZFtUdKO3osOBIhNb+xTs8lFW6yx9o= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -133,30 +122,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= -github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= -github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= -github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= -github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= -github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= -github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= -github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= -github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= -github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= -github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= -github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= -github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= -github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= -github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= -github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= -github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= -github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= -github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= -github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= -github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= -github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/anchore/go-struct-converter v0.0.0-20230627203149-c72ef8859ca9 h1:6COpXWpHbhWM1wgcQN95TdsmrLTba8KQfPgImBXzkjA= github.com/anchore/go-struct-converter v0.0.0-20230627203149-c72ef8859ca9/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= @@ -164,8 +129,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= @@ -183,57 +148,53 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.39.4 h1:qTsQKcdQPHnfGYBBs+Btl8QwxJeoWcOcPcixK90mRhg= -github.com/aws/aws-sdk-go-v2 v1.39.4/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.31.15 h1:gE3M4xuNXfC/9bG4hyowGm/35uQTi7bUKeYs5e/6uvU= -github.com/aws/aws-sdk-go-v2/config v1.31.15/go.mod h1:HvnvGJoE2I95KAIW8kkWVPJ4XhdrlvwJpV6pEzFQa8o= -github.com/aws/aws-sdk-go-v2/credentials v1.18.19 h1:Jc1zzwkSY1QbkEcLujwqRTXOdvW8ppND3jRBb/VhBQc= -github.com/aws/aws-sdk-go-v2/credentials v1.18.19/go.mod h1:DIfQ9fAk5H0pGtnqfqkbSIzky82qYnGvh06ASQXXg6A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 h1:X7X4YKb+c0rkI6d4uJ5tEMxXgCZ+jZ/D6mvkno8c8Uw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11/go.mod h1:EqM6vPZQsZHYvC4Cai35UDg/f5NCEU+vp0WfbVqVcZc= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.1 h1:EfS+tBgFwzrR/skkhKdyClU0pCx/VgSKSo8OIzMEiQM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.1/go.mod h1:U/PKebSFFMhuRPG10ot6Xfc2LKyCf3+sQfesRHZnzVU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 h1:7AANQZkF3ihM8fbdftpjhken0TP9sBzFbV/Ze/Y4HXA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11/go.mod h1:NTF4QCGkm6fzVwncpkFQqoquQyOolcyXfbpC98urj+c= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 h1:ShdtWUZT37LCAA4Mw2kJAJtzaszfSHFb5n25sdcv4YE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11/go.mod h1:7bUb2sSr2MZ3M/N+VyETLTQtInemHXb/Fl3s8CLzm0Y= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11 h1:bKgSxk1TW//00PGQqYmrq83c+2myGidEclp+t9pPqVI= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.11/go.mod h1:vrPYCQ6rFHL8jzQA8ppu3gWX18zxjLIDGTeqDxkBmSI= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2 h1:DGFpGybmutVsCuF6vSuLZ25Vh55E3VmsnJmFfjeBx4M= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.2/go.mod h1:hm/wU1HDvXCFEDzOLorQnZZ/CVvPXvWEmHMSmqgQRuA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 h1:GpMf3z2KJa4RnJ0ew3Hac+hRFYLZ9DDjfgXjuW+pB54= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11/go.mod h1:6MZP3ZI4QQsgUCFTwMZA2V0sEriNQ8k2hmoHF3qjimQ= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11 h1:weapBOuuFIBEQ9OX/NVW3tFQCvSutyjZYk/ga5jDLPo= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.11/go.mod h1:3C1gN4FmIVLwYSh8etngUS+f1viY6nLCDVtZmrFbDy0= -github.com/aws/aws-sdk-go-v2/service/kms v1.38.3 h1:RivOtUH3eEu6SWnUMFHKAW4MqDOzWn1vGQ3S38Y5QMg= -github.com/aws/aws-sdk-go-v2/service/kms v1.38.3/go.mod h1:cQn6tAF77Di6m4huxovNM7NVAozWTZLsDRp9t8Z/WYk= -github.com/aws/aws-sdk-go-v2/service/s3 v1.89.0 h1:JbCUlVDEjmhpvpIgXP9QN+/jW61WWWj99cGmxMC49hM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.89.0/go.mod h1:UHKgcRSx8PVtvsc1Poxb/Co3PD3wL7P+f49P0+cWtuY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 h1:itu4KHu8JK/N6NcLIISlf3LL1LccMqruLUXZ9y7yBZw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12/go.mod h1:i+6vTU3xziikTY3vcox23X8pPGW5X3wVgd1VZ7ha+x8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 h1:NEe7FaViguRQEm8zl8Ay/kC/QRsMtWUiCGZajQIsLdc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3/go.mod h1:JLuCKu5VfiLBBBl/5IzZILU7rxS0koQpHzMOCzycOJU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 h1:R3uW0iKl8rgNEXNjVGliW/oMEh9fO/LlUEV8RvIFr1I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12/go.mod h1:XEttbEr5yqsw8ebi7vlDoGJJjMXRez4/s9pibpJyL5s= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1 h1:Dq82AV+Qxpno/fG162eAhnD8d48t9S+GZCfz7yv1VeA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1/go.mod h1:MbKLznDKpf7PnSonNRUVYZzfP0CeLkRIUexeblgKcU4= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 h1:TIOEjw0i2yyhmhRry3Oeu9YtiiHWISZ6j/irS1W3gX4= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6/go.mod h1:3Ba++UwWd154xtP4FRX5pUK3Gt4up5sDHCve6kVfE+g= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 h1:M5nimZmugcZUO9wG7iVtROxPhiqyZX6ejS1lxlDPbTU= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.8/go.mod h1:mbef/pgKhtKRwrigPPs7SSSKZgytzP8PQ6P6JAAdqyM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 h1:S5GuJZpYxE0lKeMHKn+BRTz6PTFpgThyJ+5mYfux7BM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3/go.mod h1:X4OF+BTd7HIb3L+tc4UlWHVrpgwZZIVENU15pRDVTI0= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 h1:Ekml5vGg6sHSZLZJQJagefnVe6PmqC2oiRkBq4F7fU0= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.9/go.mod h1:/e15V+o1zFHWdH3u7lpI3rVBcxszktIKuHKCY2/py+k= -github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M= -github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -249,72 +210,59 @@ github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9 github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= -github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= -github.com/bufbuild/protovalidate-go v0.6.1 h1:uzW8r0CDvqApUChNj87VzZVoQSKhcVdw5UWOE605UIw= -github.com/bufbuild/protovalidate-go v0.6.1/go.mod h1:4BR3rKEJiUiTy+sqsusFn2ladOf0kYmA2Reo6BHSBgQ= -github.com/bufbuild/protoyaml-go v0.1.11 h1:Iyixd6Y5dx6ws6Uh8APgC1lMyvXt710NayoY8cY0Vj8= -github.com/bufbuild/protoyaml-go v0.1.11/go.mod h1:KCBItkvZOK/zwGueLdH1Wx1RLyFn5rCH7YjQrdty2Wc= +github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= +github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/buildkite/agent/v3 v3.81.0 h1:JVfkng2XnsXesFXwiFwLJFkuzVu4zvoJCvedfoIXD6E= -github.com/buildkite/agent/v3 v3.81.0/go.mod h1:edJeyycODRxaFvpT22rDGwaQ5oa4eB8GjtbjgX5VpFw= -github.com/buildkite/go-pipeline v0.13.1 h1:Y9p8pQIwPtauVwNrcmTDH6+XK7jE1nLuvWVaK8oymA8= -github.com/buildkite/go-pipeline v0.13.1/go.mod h1:2HHqlSFTYgHFhzedJu0LhLs9n5c9XkYnHiQFVN5HE4U= -github.com/buildkite/interpolate v0.1.3 h1:OFEhqji1rNTRg0u9DsSodg63sjJQEb1uWbENq9fUOBM= -github.com/buildkite/interpolate v0.1.3/go.mod h1:UNVe6A+UfiBNKbhAySrBbZFZFxQ+DXr9nWen6WVt/A8= -github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= -github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8= +github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.103.0 h1:dHElatNXNrr8XcseUov0ZSiWjauwmZZE6YMV3eU1yic= github.com/casbin/casbin/v2 v2.103.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco= github.com/casbin/govaluate v1.3.0 h1:VA0eSY0M2lA86dYd5kPPuNZMUD9QkWnOCnavGrw9myc= github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= -github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= -github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= -github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= -github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -325,34 +273,35 @@ github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GK github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y= -github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA= +github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs= +github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w= github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= @@ -360,20 +309,18 @@ github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQM github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM= -github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM= -github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -384,25 +331,27 @@ github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE= -github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.14.2 h1:wJPxPy2Xifja9cEMrcA/g08art5+7CGJNFNk35iXC1I= +github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -432,6 +381,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fsouza/fake-gcs-server v1.47.6 h1:/d/879q/Os9Zc5gyV3QVLfZoajN1KcWucf2zYCFeFxs= github.com/fsouza/fake-gcs-server v1.47.6/go.mod h1:ApSXKexpG1BUXJ4f2tNCxvhTKwCPFqFLBDW2UNQDODE= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -440,8 +391,8 @@ github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwv github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= -github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= -github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -458,10 +409,8 @@ github.com/go-git/go-git/v5 v5.16.0/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lo github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= -github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -485,30 +434,56 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk= github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= -github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= -github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -522,8 +497,8 @@ github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZs github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo= -github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -534,13 +509,13 @@ github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goadesign/goa v2.2.5+incompatible h1:SLgzk0V+QfFs7MVz9sbDHelbTDI9B/d4W7Hl5udTynY= -github.com/goadesign/goa v2.2.5+incompatible/go.mod h1:d/9lpuZBK7HFi/7O0oXfwvdoIl+nx2bwKqctZe/lQao= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -552,8 +527,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= -github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -563,8 +538,8 @@ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8J github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -589,14 +564,14 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= -github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= -github.com/google/certificate-transparency-go v1.2.1 h1:4iW/NwzqOqYEEoCBEFP+jPbBXbLqMpq3CifMyOnDUME= -github.com/google/certificate-transparency-go v1.2.1/go.mod h1:bvn/ytAccv+I6+DGkqpvSsEdiVGramgaSC6RD3tEmeE= +github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= +github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -610,12 +585,12 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= -github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= -github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -624,8 +599,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250422154841-e1f9c1950416 h1:1/qwHx8P72glDXdyCKesJ+/c40x71SY4q2avOxJ2iYQ= -github.com/google/pprof v0.0.0-20250422154841-e1f9c1950416/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= @@ -633,20 +608,18 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= -github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= @@ -661,14 +634,14 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -686,20 +659,18 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= -github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -707,20 +678,18 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= -github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= -github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= @@ -734,8 +703,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= -github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= -github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -748,8 +717,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= -github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -758,18 +727,16 @@ github.com/jedib0t/go-pretty/v6 v6.4.7 h1:lwiTJr1DEkAgzljsUsORmWsVn5MQjt1BPJdPCt github.com/jedib0t/go-pretty/v6 v6.4.7/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= -github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= -github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= -github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -792,8 +759,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -811,8 +778,24 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.11 h1:yEeUGNUuNjcez/Voxvr7XPTYNraSQTENJgtVTfwvG/w= +github.com/lestrrat-go/jwx/v3 v3.0.11/go.mod h1:XSOAh2SiXm0QgRe3DulLZLyt+wUuEdFo81zuKTLcvgQ= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -820,15 +803,16 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= -github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -840,14 +824,14 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -862,27 +846,29 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= github.com/moby/moby v26.1.0+incompatible h1:mjepCwMH0KpCgPvrXjqqyCeTCHgzO7p9TwZ2nQMI2qU= github.com/moby/moby v26.1.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= @@ -892,12 +878,11 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= -github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -906,6 +891,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -931,8 +918,6 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= -github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -954,8 +939,8 @@ github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxe github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -974,11 +959,11 @@ github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+q github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ= -github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4= +github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY= +github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -987,8 +972,6 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openvex/go-vex v0.2.5 h1:41utdp2rHgAGCsG+UbjmfMG5CWQxs15nGqir1eRgSrQ= github.com/openvex/go-vex v0.2.5/go.mod h1:j+oadBxSUELkrKh4NfNb+BPo77U3q7gdKME88IO/0Wo= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= @@ -1006,8 +989,6 @@ github.com/package-url/packageurl-go v0.1.1/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeG github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -1049,8 +1030,8 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1065,8 +1046,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1074,19 +1055,19 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 h1:s1LvMaU6mVwoFtbxv/rCZKE7/fwDmDY684FfUe4c1Io= +github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= -github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8= +github.com/rodaine/protogofakeit v0.1.1/go.mod h1:pXn/AstBYMaSfc1/RqH3N82pBuxtWgejz1AlYpY1mI0= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1095,8 +1076,8 @@ github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUz github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= @@ -1105,11 +1086,10 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= @@ -1118,85 +1098,83 @@ github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= -github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= -github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= -github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= -github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= -github.com/shoenig/test v0.6.6/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/cosign/v2 v2.4.1 h1:b8UXEfJFks3hmTwyxrRNrn6racpmccUycBHxDMkEPvU= -github.com/sigstore/cosign/v2 v2.4.1/go.mod h1:GvzjBeUKigI+XYnsoVQDmMAsMMc6engxztRSuxE+x9I= -github.com/sigstore/fulcio v1.6.3 h1:Mvm/bP6ELHgazqZehL8TANS1maAkRoM23CRAdkM4xQI= -github.com/sigstore/fulcio v1.6.3/go.mod h1:5SDgLn7BOUVLKe1DwOEX3wkWFu5qEmhUlWm+SFf0GH8= -github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= -github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= -github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= -github.com/sigstore/sigstore-go v0.6.1 h1:tGkkv1oDIER+QYU5MrjqlttQOVDWfSkmYwMqkJhB/cg= -github.com/sigstore/sigstore-go v0.6.1/go.mod h1:Xe5GHmUeACRFbomUWzVkf/xYCn8xVifb9DgqJrV2dIw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 h1:qp2VFyKuFQvTGmZwk5Q7m5nE4NwnF9tHwkyz0gtWAck= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5/go.mod h1:DKlQjjr+GsWljEYPycI0Sf8URLCk4EbGA9qYjF47j4g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 h1:CRZcdYn5AOptStsLRAAACudAVmb1qUbhMlzrvm7ju3o= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5/go.mod h1:b9rFfITq2fp1M3oJmq6lFFhSrAz5vOEJH1qzbMsZWN4= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 h1:7U0GsO0UGG1PdtgS6wBkRC0sMgq7BRVaFlPRwN4m1Qg= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5/go.mod h1:/2qrI0nnCy/DTIPOMFaZlFnNPWEn5UeS70P37XEM88o= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 h1:S2ukEfN1orLKw2wEQIUHDDlzk0YcylhcheeZ5TGk8LI= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5/go.mod h1:m7sQxVJmDa+rsmS1m6biQxaLX83pzNS7ThUEyjOqkCU= -github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= -github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/fulcio v1.8.3 h1:zkuAkRHbD53hhYGlBHHeAW4NRDrrTiDHumAbcfSyyFw= +github.com/sigstore/fulcio v1.8.3/go.mod h1:YxP7TTdn9H5Gg+dXOsu61X36LLYxT2ZuvODhWelMNwA= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.3 h1:s7fBYYOzW/2Vd0nND2ZdpWySb5vRF2u9eix/NZMHJm0= +github.com/sigstore/sigstore v1.10.3/go.mod h1:T26vXIkpnGEg391v3TaZ8EERcXbnjtZb/1erh5jbIQk= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority v1.2.9 h1:L9Fj070/EbMC8qUk8BchkrYCS1BT5i93Bl6McwydkFs= +github.com/sigstore/timestamp-authority v1.2.9/go.mod h1:QyRnZchz4o+xdHyK5rvCWacCHxWmpX+mgvJwB1OXcLY= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= +github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1219,8 +1197,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/styrainc/regal v0.35.1 h1:3WZqPtKEbfxx1IWLkQ1OHhTpP4Nx3v/Zd+cuoBecd7I= github.com/styrainc/regal v0.35.1/go.mod h1:1hCkmFTWMPqzqgYsbWbaRDlXIMupI6qkcSg4LYSd16E= github.com/styrainc/roast v0.15.0 h1:cEjm6AfIPp0Z6fTVHK+kW5pWevekmZ69H4XlEcXrTRk= @@ -1229,10 +1207,10 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= -github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo= -github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4= +github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc= +github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= +github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk= github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= @@ -1241,17 +1219,25 @@ github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gt github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.0.1 h1:11p9tXpq10KQEujxjcIjDSivMKCMLguls7erXHZnxJQ= -github.com/theupdateframework/go-tuf/v2 v2.0.1/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= -github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -1259,16 +1245,18 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= -github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY= -github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= +github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= -github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1298,8 +1286,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= -github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= @@ -1307,55 +1295,55 @@ github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6 github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= gitlab.com/gitlab-org/security-products/analyzers/common/v3 v3.2.1 h1:XX5DKq473xrw/TnJB069cVRDhsN/AFX6ZJi0LQWmK74= gitlab.com/gitlab-org/security-products/analyzers/common/v3 v3.2.1/go.mod h1:MaLrVtJgCo/X9jxNkZUkZPCa7sc5lFAMqJVjsKFMUpc= gitlab.com/gitlab-org/security-products/analyzers/report/v5 v5.3.0 h1:raTSihUYSva9irNTmY2exbLSlqdsHX/TZjIDxqySfxM= gitlab.com/gitlab-org/security-products/analyzers/report/v5 v5.3.0/go.mod h1:eKb4+X3zesTGdFkT3ypYsV9YyW+uP0XCN1aTzDEY1vI= gitlab.com/gitlab-org/security-products/analyzers/ruleset/v3 v3.0.0 h1:RE5vKPiyJ1QUJgcTG/d9b5tHfYuDWEJyHN7pnUo0P4I= gitlab.com/gitlab-org/security-products/analyzers/ruleset/v3 v3.0.0/go.mod h1:U4aks9AhboVLdFywym1hAMEW0x/xgucm4pH5/xvX7n4= -go.einride.tech/aip v0.68.1 h1:16/AfSxcQISGN5z9C5lM+0mLYXihrHbQ1onvYTr93aQ= -go.einride.tech/aip v0.68.1/go.mod h1:XaFtaj4HuA3Zwk9xoBtTWgNubZ0ZZXv9BZJCkuKuWbg= +go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= +go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= -go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= -go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= -go.step.sm/crypto v0.51.2 h1:5EiCGIMg7IvQTGmJrwRosbXeprtT80OhoS/PJarg60o= -go.step.sm/crypto v0.51.2/go.mod h1:QK7czLjN2k+uqVp5CHXxJbhc70kVRSP+0CQF3zsR5M0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1374,10 +1362,14 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -goa.design/goa v2.2.5+incompatible h1:mjAtiy7ZdZIkj974hpFxCR6bL69qprfV00Veu3Vybts= -goa.design/goa v2.2.5+incompatible/go.mod h1:NnzBwdNktihbNek+pPiFMQP9PPFsUt8MMPPyo9opDSo= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +goa.design/goa/v3 v3.22.6 h1:D2qDkAvdpf6ePr2iXKT+Ple5WDrjyes3iOfYD2yCpw0= +goa.design/goa/v3 v3.22.6/go.mod h1:rhssEXxox3+sKnYp18hPNFCz65I4hLWHEtJKewoNJWk= golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1396,14 +1388,13 @@ golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIi golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= -golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1426,8 +1417,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1477,8 +1468,8 @@ golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1563,7 +1554,6 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1580,7 +1570,6 @@ golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1602,8 +1591,8 @@ golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1634,16 +1623,18 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.233.0 h1:iGZfjXAJiUFSSaekVB7LzXl6tRfEKhUN7FkZN++07tI= -google.golang.org/api v0.233.0/go.mod h1:TCIVLLlcwunlMpZIhIp7Ltk77W+vUSdUKAAIlbxY44c= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1656,14 +1647,14 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= -google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250505200425-f936aa4a68b2 h1:DbpkGFGRkd4GORg+IWQW2EhxUaa/My/PM8d1CGyTDMY= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20251103181224-f26f9409b101 h1:yPJt1QyhbMgVYk1uHU1fzFDusVK69zmYfO7uupO0/QE= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20251103181224-f26f9409b101/go.mod h1:ejCb7yLmK6GCVHp5qpeKbm4KZew/ldg+9b8kq5MONgk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1679,8 +1670,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= -google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1694,9 +1685,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1706,6 +1696,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1732,38 +1724,38 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.28.6 h1:yy6u9CuIhmg55YvF/BavPBBXB+5QicB64njJXxVnzLo= -k8s.io/api v0.28.6/go.mod h1:AM6Ys6g9MY3dl/XNaNfg/GePI0FT7WBGu8efU/lirAo= -k8s.io/apimachinery v0.28.6 h1:RsTeR4z6S07srPg6XYrwXpTJVMXsjPXn0ODakMytSW0= -k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= -k8s.io/client-go v0.28.6 h1:Gge6ziyIdafRchfoBKcpaARuz7jfrK1R1azuwORIsQI= -k8s.io/client-go v0.28.6/go.mod h1:+nu0Yp21Oeo/cBCsprNVXB2BfJTV51lFfe5tXl2rUL8= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/release-utils v0.8.4 h1:4QVr3UgbyY/d9p74LBhg0njSVQofUsAZqYOzVZBhdBw= -sigs.k8s.io/release-utils v0.8.4/go.mod h1:m1bHfscTemQp+z+pLCZnkXih9n0+WukIUU70n6nFnU0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/pkg/attestation/crafter/api/attestation/v1/crafting_state.pb.go b/pkg/attestation/crafter/api/attestation/v1/crafting_state.pb.go index 881295be0..1e01ac71f 100644 --- a/pkg/attestation/crafter/api/attestation/v1/crafting_state.pb.go +++ b/pkg/attestation/crafter/api/attestation/v1/crafting_state.pb.go @@ -2001,7 +2001,7 @@ var file_attestation_v1_crafting_state_proto_rawDesc = []byte{ 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x1a, 0x0a, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x1a, 0x0a, 0x0b, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, @@ -2190,240 +2190,240 @@ var file_attestation_v1_crafting_state_proto_rawDesc = []byte{ 0x56, 0x61, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xd6, 0x01, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x48, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xd5, 0x01, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x47, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x41, - 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x42, 0x09, 0xba, 0x48, 0x06, 0x82, 0x01, 0x03, 0x22, - 0x01, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, - 0x64, 0x22, 0x6b, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x15, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x55, 0x54, 0x48, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, - 0x41, 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x54, 0x4f, - 0x4b, 0x45, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x46, 0x45, 0x44, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x67, - 0x0a, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x36, 0x0a, 0x17, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, - 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x61, 0x22, 0xe6, 0x01, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x6e, - 0x65, 0x72, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2c, 0x0a, - 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x65, - 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, - 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x35, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, - 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x22, 0xa1, 0x0c, 0x0a, 0x10, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, - 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, - 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, - 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, - 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, - 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, - 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, - 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, - 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x69, - 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x77, - 0x69, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, - 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, - 0x55, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, - 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0e, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, - 0x4b, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x12, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, - 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x0a, 0x72, 0x61, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x67, 0x61, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x67, 0x61, 0x74, 0x65, - 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x37, 0x0a, 0x09, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4f, 0x0a, 0x09, 0x56, 0x69, 0x6f, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, - 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0xfc, 0x01, 0x0a, 0x09, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, - 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, - 0x3a, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, - 0x6c, 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, - 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, - 0x6e, 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, - 0x30, 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, - 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x19, - 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x09, 0x52, 0x61, 0x77, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x22, 0xde, 0x02, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, - 0x1b, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, - 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x28, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x07, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x1a, 0x40, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x1b, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x0d, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0c, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x44, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x32, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, - 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x48, 0x00, 0x52, 0x08, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x12, 0x3d, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, - 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x42, - 0x08, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xa3, 0x03, 0x0a, 0x10, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x65, 0x61, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, - 0x12, 0x28, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x72, 0x65, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x7d, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, - 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x61, 0x73, 0x5f, - 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x6d, 0x61, 0x72, 0x6b, 0x41, 0x73, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x22, 0xde, - 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x46, 0x0a, 0x06, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, - 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, - 0x11, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, + 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x82, 0x01, 0x02, 0x20, + 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, + 0x22, 0x6b, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, + 0x41, 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x55, 0x54, 0x48, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, + 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x54, 0x4f, 0x4b, + 0x45, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x45, 0x44, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x67, 0x0a, + 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x36, 0x0a, 0x17, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x15, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, + 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x61, 0x22, 0xe6, 0x01, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x6e, 0x65, + 0x72, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x35, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, + 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, + 0xa1, 0x0c, 0x0a, 0x10, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, 0x6d, 0x75, 0x73, + 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6c, + 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x73, + 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, + 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, + 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, 0x2d, 0x7a, 0x30, + 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x69, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x04, 0x77, 0x69, 0x74, 0x68, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x77, 0x69, + 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x39, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x4b, + 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x12, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x45, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x0a, 0x72, 0x61, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x61, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x67, 0x61, 0x74, 0x65, 0x1a, + 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, - 0x4f, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x72, 0x61, 0x66, 0x74, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x37, 0x0a, 0x09, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4f, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0xfc, 0x01, 0x0a, 0x09, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x82, 0x01, 0xba, 0x48, 0x7f, 0xba, 0x01, 0x7c, 0x0a, + 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x64, 0x6e, 0x73, 0x2d, 0x31, 0x31, 0x32, 0x33, 0x12, 0x3a, + 0x6d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x6c, + 0x79, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6c, 0x65, 0x74, 0x74, + 0x65, 0x72, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73, 0x2e, 0x1a, 0x2f, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x28, 0x27, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x30, + 0x2d, 0x39, 0x5d, 0x28, 0x5b, 0x2d, 0x61, 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x5b, 0x61, + 0x2d, 0x7a, 0x30, 0x2d, 0x39, 0x5d, 0x29, 0x3f, 0x24, 0x27, 0x29, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x19, 0x0a, + 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x09, 0x52, 0x61, 0x77, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x22, 0xde, 0x02, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1b, + 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x28, + 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x07, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x1a, 0x40, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x0d, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, + 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x44, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x32, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x61, 0x66, 0x74, 0x69, + 0x6e, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x48, 0x00, 0x52, 0x08, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x56, 0x32, 0x12, 0x3d, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x74, + 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x42, 0x08, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xa3, 0x03, 0x0a, 0x10, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x65, 0x61, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x12, + 0x28, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, + 0x64, 0x12, 0x30, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x72, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, + 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x72, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6d, + 0x61, 0x72, 0x6b, 0x41, 0x73, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x22, 0xde, 0x02, + 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x46, 0x0a, 0x06, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x74, 0x74, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, + 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x4f, + 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x6c, 0x6f, 0x6f, 0x70, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x6c, 0x6f, 0x6f, 0x70, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x72, 0x61, 0x66, 0x74, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/attestation/crafter/api/attestation/v1/crafting_state_validations.go b/pkg/attestation/crafter/api/attestation/v1/crafting_state_validations.go index 1a5f32a17..80ecbc5b8 100644 --- a/pkg/attestation/crafter/api/attestation/v1/crafting_state_validations.go +++ b/pkg/attestation/crafter/api/attestation/v1/crafting_state_validations.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" ) // Custom validations diff --git a/pkg/attestation/crafter/api/buf.gen.yaml b/pkg/attestation/crafter/api/buf.gen.yaml index efba7ab33..7b15ad0f9 100644 --- a/pkg/attestation/crafter/api/buf.gen.yaml +++ b/pkg/attestation/crafter/api/buf.gen.yaml @@ -1,5 +1,5 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . - opt: paths=source_relative \ No newline at end of file + opt: paths=source_relative diff --git a/pkg/attestation/crafter/api/buf.lock b/pkg/attestation/crafter/api/buf.lock deleted file mode 100644 index 25a4677ba..000000000 --- a/pkg/attestation/crafter/api/buf.lock +++ /dev/null @@ -1,13 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: b983156c5e994cc9892e0ce3e64e17e0 - digest: shake256:fb47a62989d38c2529bcc5cd86ded43d800eb84cee82b42b9e8a9e815d4ee8134a0fb9d0ce8299b27c2d2bbb7d6ade0c4ad5a8a4d467e1e2c7ca619ae9f634e2 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 7a6bc1e3207144b38e9066861e1de0ff - digest: shake256:de26a277fc28b8b411ecf58729d78d32fcf15090ffd998a4469225b17889bfb51442eaab04bb7a8d88d203ecdf0a9febd4ffd52c18ed1c2229160c7bd353ca95 diff --git a/pkg/attestation/crafter/api/buf.yaml b/pkg/attestation/crafter/api/buf.yaml deleted file mode 100644 index fe7089ac2..000000000 --- a/pkg/attestation/crafter/api/buf.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 -lint: - use: - - DEFAULT diff --git a/pkg/attestation/crafter/crafter.go b/pkg/attestation/crafter/crafter.go index 2e1fbb9b1..ab31913f9 100644 --- a/pkg/attestation/crafter/crafter.go +++ b/pkg/attestation/crafter/crafter.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" v1 "github.com/chainloop-dev/chainloop/app/controlplane/api/controlplane/v1" schemaapi "github.com/chainloop-dev/chainloop/app/controlplane/api/workflowcontract/v1" "github.com/chainloop-dev/chainloop/internal/ociauth" @@ -68,7 +68,7 @@ type Crafter struct { stateManager StateManager // Authn is used to authenticate with the OCI registry ociRegistryAuth authn.Keychain - validator *protovalidate.Validator + validator protovalidate.Validator // attestation client is used to load chainloop policies attClient v1.AttestationServiceClient diff --git a/pkg/attestation/crafter/materials/materials.go b/pkg/attestation/crafter/materials/materials.go index d88b0040e..3ea891c4a 100644 --- a/pkg/attestation/crafter/materials/materials.go +++ b/pkg/attestation/crafter/materials/materials.go @@ -24,8 +24,8 @@ import ( "os" "time" + "buf.build/go/protovalidate" "code.cloudfoundry.org/bytefmt" - "github.com/bufbuild/protovalidate-go" schemaapi "github.com/chainloop-dev/chainloop/app/controlplane/api/workflowcontract/v1" api "github.com/chainloop-dev/chainloop/pkg/attestation/crafter/api/attestation/v1" "github.com/chainloop-dev/chainloop/pkg/casclient" diff --git a/pkg/attestation/crafter/materials/oci_image.go b/pkg/attestation/crafter/materials/oci_image.go index 8b3e71df9..dc61f9e75 100644 --- a/pkg/attestation/crafter/materials/oci_image.go +++ b/pkg/attestation/crafter/materials/oci_image.go @@ -35,7 +35,7 @@ import ( "github.com/google/go-containerregistry/pkg/v1/layout" "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/rs/zerolog" - cosigntypes "github.com/sigstore/cosign/v2/pkg/types" + cosigntypes "github.com/sigstore/cosign/v3/pkg/types" ) const ( diff --git a/pkg/attestation/signer/cosign/cosign.go b/pkg/attestation/signer/cosign/cosign.go index 2c745e13d..11724716b 100644 --- a/pkg/attestation/signer/cosign/cosign.go +++ b/pkg/attestation/signer/cosign/cosign.go @@ -25,7 +25,7 @@ import ( "syscall" "github.com/rs/zerolog" - "github.com/sigstore/cosign/v2/pkg/signature" + "github.com/sigstore/cosign/v3/pkg/signature" sigstoresigner "github.com/sigstore/sigstore/pkg/signature" "golang.org/x/term" ) diff --git a/pkg/attestation/verifier/timestamp.go b/pkg/attestation/verifier/timestamp.go index ba42f6740..f601e2848 100644 --- a/pkg/attestation/verifier/timestamp.go +++ b/pkg/attestation/verifier/timestamp.go @@ -87,7 +87,7 @@ func VerifyTimestamps(sb *bundle.Bundle, tr *TrustedRoot) error { return fmt.Errorf("could not get verification material: %w", err) } // verify signing certificate issuing time - if vc != nil && vc.GetCertificate() != nil && !vc.ValidAtTime(ts.Time, nil) { + if vc != nil && vc.Certificate() != nil && !vc.ValidAtTime(ts.Time, nil) { continue } verifiedTimestamps = append(verifiedTimestamps, ts) diff --git a/pkg/attestation/verifier/verifier.go b/pkg/attestation/verifier/verifier.go index 4db79b0c6..e0dacb2f8 100644 --- a/pkg/attestation/verifier/verifier.go +++ b/pkg/attestation/verifier/verifier.go @@ -24,7 +24,7 @@ import ( "github.com/chainloop-dev/chainloop/pkg/attestation" "github.com/secure-systems-lab/go-securesystemslib/dsse" - "github.com/sigstore/cosign/v2/pkg/cosign" + "github.com/sigstore/cosign/v3/pkg/cosign" protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" sigstorebundle "github.com/sigstore/sigstore-go/pkg/bundle" sigdsee "github.com/sigstore/sigstore/pkg/signature/dsse" @@ -63,9 +63,9 @@ func VerifyBundle(ctx context.Context, bundleBytes []byte, tr *TrustedRoot) erro } } - if vc != nil && vc.GetCertificate() != nil { + if vc != nil && vc.Certificate() != nil { hasVerificationMaterial = true - signingCert := vc.GetCertificate() + signingCert := vc.Certificate() aki := fmt.Sprintf("%x", sha256.Sum256(signingCert.AuthorityKeyId)) chain, ok := tr.Keys[aki] diff --git a/pkg/credentials/api/buf.gen.yaml b/pkg/credentials/api/buf.gen.yaml index efba7ab33..7b15ad0f9 100644 --- a/pkg/credentials/api/buf.gen.yaml +++ b/pkg/credentials/api/buf.gen.yaml @@ -1,5 +1,5 @@ -version: v1 +version: v2 plugins: - - name: go + - local: protoc-gen-go out: . - opt: paths=source_relative \ No newline at end of file + opt: paths=source_relative diff --git a/pkg/credentials/api/buf.lock b/pkg/credentials/api/buf.lock deleted file mode 100644 index 0620a5988..000000000 --- a/pkg/credentials/api/buf.lock +++ /dev/null @@ -1,13 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: bufbuild - repository: protovalidate - commit: b983156c5e994cc9892e0ce3e64e17e0 - digest: shake256:fb47a62989d38c2529bcc5cd86ded43d800eb84cee82b42b9e8a9e815d4ee8134a0fb9d0ce8299b27c2d2bbb7d6ade0c4ad5a8a4d467e1e2c7ca619ae9f634e2 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 4ed3bc159a8b4ac68fe253218760d035 - digest: shake256:7149cf5e9955c692d381e557830555d4e93f205a0f1b8e2dfdae46d029369aa3fc1980e35df0d310f7cc3b622f93e19ad276769a283a967dd3065ddfd3a40e13 diff --git a/pkg/credentials/api/buf.yaml b/pkg/credentials/api/buf.yaml deleted file mode 100644 index 2953bde9b..000000000 --- a/pkg/credentials/api/buf.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v1 -breaking: - use: - - FILE -deps: - - buf.build/googleapis/googleapis:4ed3bc159a8b4ac68fe253218760d035 - - buf.build/bufbuild/protovalidate:b983156c5e994cc9892e0ce3e64e17e0 -lint: - use: - - DEFAULT \ No newline at end of file diff --git a/pkg/credentials/manager/manager.go b/pkg/credentials/manager/manager.go index af531f084..580f19080 100644 --- a/pkg/credentials/manager/manager.go +++ b/pkg/credentials/manager/manager.go @@ -20,7 +20,7 @@ import ( "fmt" "io" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" "github.com/chainloop-dev/chainloop/pkg/credentials" api "github.com/chainloop-dev/chainloop/pkg/credentials/api/credentials/v1" "github.com/chainloop-dev/chainloop/pkg/credentials/aws" diff --git a/pkg/policies/policies.go b/pkg/policies/policies.go index d82170c2b..5e8c67964 100644 --- a/pkg/policies/policies.go +++ b/pkg/policies/policies.go @@ -24,12 +24,12 @@ import ( "slices" "strings" - "github.com/bufbuild/protovalidate-go" + "buf.build/go/protovalidate" v13 "github.com/chainloop-dev/chainloop/app/controlplane/api/controlplane/v1" "github.com/chainloop-dev/chainloop/pkg/templates" intoto "github.com/in-toto/attestation/go/v1" "github.com/rs/zerolog" - "github.com/sigstore/cosign/v2/pkg/blob" + "github.com/sigstore/cosign/v3/pkg/blob" "google.golang.org/grpc" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto"