From d2a0ecea33471389f8683dd562fd1553a1140afc Mon Sep 17 00:00:00 2001 From: Antonin Stefanutti Date: Mon, 21 Aug 2023 11:24:00 +0200 Subject: [PATCH 1/3] Move APIs to codeflare.dev groups --- Makefile | 12 +- ...=> quota.codeflare.dev_quotasubtrees.yaml} | 4 +- .../workload.codeflare.dev_appwrappers.yaml | 149 +++++----- ...orkload.codeflare.dev_schedulingspecs.yaml | 4 +- config/crd/kustomization.yaml | 6 +- ...=> quota.codeflare.dev_quotasubtrees.yaml} | 4 +- .../workload.codeflare.dev_appwrappers.yaml | 149 +++++----- ...orkload.codeflare.dev_schedulingspecs.yaml | 4 +- .../mcad-controller/templates/deployment.yaml | 4 +- doc/usage/aw-01.yaml | 2 +- doc/usage/aw-02.yaml | 2 +- doc/usage/examples/aw-1-k8s-job1.yaml | 2 +- doc/usage/examples/aw-1-k8s-job2.yaml | 2 +- doc/usage/examples/aw-1-k8s-job3.yaml | 2 +- .../kuberay/config/aw-kuberay-glue.yaml | 2 +- .../kuberay/config/aw-raycluster.yaml | 2 +- .../kuberay/config/xqueuejob-controller.yaml | 2 +- doc/usage/quota_management/quickstart.md | 6 +- doc/usage/tutorial.md | 6 +- pkg/apis/controller/v1beta1/doc.go | 2 +- pkg/apis/controller/v1beta1/register.go | 2 +- pkg/apis/quotaplugins/quotasubtree/v1/doc.go | 2 +- .../quotaplugins/quotasubtree/v1/register.go | 2 +- pkg/client/clientset/versioned/clientset.go | 36 +-- .../versioned/fake/clientset_generated.go | 20 +- .../clientset/versioned/fake/register.go | 8 +- .../clientset/versioned/scheme/register.go | 8 +- .../typed/controller/v1beta1/appwrapper.go | 2 +- .../controller/v1beta1/controller_client.go | 26 +- .../v1beta1/fake/fake_appwrapper.go | 6 +- .../v1beta1/fake/fake_controller_client.go | 6 +- .../quotasubtree/v1/fake/fake_quotasubtree.go | 6 +- .../v1/fake/fake_quotasubtree_client.go | 6 +- .../typed/quotasubtree/v1/quotasubtree.go | 2 +- .../quotasubtree/v1/quotasubtree_client.go | 26 +- .../controller/v1beta1/appwrapper.go | 4 +- .../informers/externalversions/factory.go | 8 +- .../informers/externalversions/generic.go | 8 +- .../quotasubtree/v1/quotasubtree.go | 4 +- .../queuejob/queuejob_controller_ex.go | 100 ++++--- .../queuejobdispatch/queuejobagent.go | 6 +- .../genericresource/genericresource.go | 27 +- .../quotasubtmgr/quota_subtree_manager.go | 4 +- .../steps/00-assert.yaml | 2 +- .../steps/01-assert.yaml | 2 +- .../steps/01-install.yaml | 2 +- .../steps/02-assert.yaml | 2 +- .../steps/02-install.yaml | 2 +- .../steps/03-assert.yaml | 2 +- .../steps/03-install.yaml | 2 +- .../steps/04-assert.yaml | 4 +- .../steps/04-install.yaml | 2 +- .../steps/05-assert.yaml | 4 +- .../steps/05-install-single-quota-tree.yaml | 4 +- .../steps/06-assert.yaml | 2 +- .../steps/06-install.yaml | 2 +- .../steps/07-assert.yaml | 2 +- .../steps/08-assert.yaml | 2 +- .../steps/08-install.yaml | 2 +- .../steps/00-assert.yaml | 8 +- .../steps/01-assert.yaml | 2 +- .../steps/01-install.yaml | 2 +- .../steps/02-assert.yaml | 2 +- .../steps/02-install.yaml | 2 +- .../steps/04-assert.yaml | 2 +- .../steps/04-install.yaml | 2 +- .../steps/00-assert.yaml | 8 +- .../steps/01-assert.yaml | 2 +- .../steps/01-install.yaml | 2 +- .../steps/02-assert.yaml | 2 +- .../steps/02-install.yaml | 2 +- .../steps/03-assert.yaml | 10 +- .../steps/03-install-new-quota-node.yaml | 4 +- .../steps/04-assert.yaml | 2 +- .../steps/04-install.yaml | 2 +- .../steps/05-assert.yaml | 2 +- .../steps/05-install.yaml | 2 +- test/e2e-kuttl/install-quota-subtree.yaml | 8 +- test/e2e-kuttl/quota-errors/00-assert.yaml | 4 +- test/e2e-kuttl/quota-errors/01-assert.yaml | 6 +- test/e2e-kuttl/quota-errors/03-assert.yaml | 2 +- test/e2e-kuttl/quota-errors/03-install.yaml | 2 +- test/e2e-kuttl/quota-forest/00-assert.yaml | 4 +- test/e2e-kuttl/quota-forest/01-assert.yaml | 8 +- test/e2e-kuttl/quota-forest/04-assert.yaml | 2 +- test/e2e-kuttl/quota-forest/04-install.yaml | 2 +- test/e2e-kuttl/quota-forest/05-assert.yaml | 2 +- test/e2e-kuttl/quota-forest/05-install.yaml | 2 +- test/e2e-kuttl/quota-forest/06-assert.yaml | 6 +- test/e2e-kuttl/quota-forest/06-install.yaml | 2 +- test/e2e-kuttl/quota-forest/07-assert.yaml | 8 +- test/e2e-kuttl/quota-forest/07-install.yaml | 2 +- test/e2e-kuttl/quota-forest/08-assert.yaml | 10 +- test/e2e-kuttl/quota-forest/08-install.yaml | 2 +- test/e2e-kuttl/quota-forest/09-assert.yaml | 4 +- test/e2e-kuttl/quota-forest/09-install.yaml | 4 +- test/e2e-kuttl/quota-forest/10-assert.yaml | 2 +- test/e2e-kuttl/quota-forest/10-install.yaml | 2 +- test/e2e-kuttl/quota-forest/11-assert.yaml | 2 +- test/e2e-kuttl/quota-forest/11-install.yaml | 2 +- test/e2e/e2e_test.go | 16 +- test/e2e/job.go | 264 ------------------ test/e2e/predicates.go | 204 -------------- test/e2e/queue.go | 37 +-- test/e2e/quota.go | 22 +- test/e2e/util.go | 71 ++--- test/kuttl-test-deployment-02.yaml | 2 +- test/kuttl-test-deployment-03.yaml | 2 +- test/kuttl-test.yaml | 2 +- test/perf-test/preempt-exp-kwok.yaml | 2 +- test/perf-test/preempt-exp.yaml | 2 +- test/yaml/0001-aw-generic-deployment-3.yaml | 2 +- test/yaml/0002-aw-job-quota.yaml | 2 +- test/yaml/0003-aw-job-no-quota.yaml | 2 +- test/yaml/0004-aw-large-job-no-quota.yaml | 2 +- test/yaml/0005-aw-two-quota-jobs.yaml | 4 +- test/yaml/0006-aw-init-containers.yaml | 2 +- test/yaml/0008-aw-default.yaml | 2 +- 118 files changed, 515 insertions(+), 994 deletions(-) rename config/crd/bases/{ibm.com_quotasubtrees.yaml => quota.codeflare.dev_quotasubtrees.yaml} (98%) rename deployment/mcad-controller/crds/mcad.ibm.com_appwrappers.yaml => config/crd/bases/workload.codeflare.dev_appwrappers.yaml (86%) rename deployment/mcad-controller/crds/mcad.ibm.com_schedulingspecs.yaml => config/crd/bases/workload.codeflare.dev_schedulingspecs.yaml (98%) rename deployment/mcad-controller/crds/{ibm.com_quotasubtrees.yaml => quota.codeflare.dev_quotasubtrees.yaml} (98%) rename config/crd/bases/mcad.ibm.com_appwrappers.yaml => deployment/mcad-controller/crds/workload.codeflare.dev_appwrappers.yaml (86%) rename config/crd/bases/mcad.ibm.com_schedulingspecs.yaml => deployment/mcad-controller/crds/workload.codeflare.dev_schedulingspecs.yaml (98%) delete mode 100644 test/e2e/job.go delete mode 100644 test/e2e/predicates.go diff --git a/Makefile b/Makefile index b1de384cb..c1e8ec7c9 100644 --- a/Makefile +++ b/Makefile @@ -217,15 +217,15 @@ clean: #CRD file maintenance rules DEPLOYMENT_CRD_DIR=deployment/mcad-controller/crds CRD_BASE_DIR=config/crd/bases -MCAD_CRDS= ${DEPLOYMENT_CRD_DIR}/ibm.com_quotasubtrees.yaml \ - ${DEPLOYMENT_CRD_DIR}/mcad.ibm.com_appwrappers.yaml \ - ${DEPLOYMENT_CRD_DIR}/mcad.ibm.com_schedulingspecs.yaml +MCAD_CRDS= ${DEPLOYMENT_CRD_DIR}/quota.codeflare.dev_quotasubtrees.yaml \ + ${DEPLOYMENT_CRD_DIR}/workload.codeflare.dev_appwrappers.yaml \ + ${DEPLOYMENT_CRD_DIR}/workload.codeflare.dev_schedulingspecs.yaml update-deployment-crds: ${MCAD_CRDS} -${DEPLOYMENT_CRD_DIR}/ibm.com_quotasubtrees.yaml : ${CRD_BASE_DIR}/ibm.com_quotasubtrees.yaml -${DEPLOYMENT_CRD_DIR}/mcad.ibm.com_appwrappers.yaml : ${CRD_BASE_DIR}/mcad.ibm.com_appwrappers.yaml -${DEPLOYMENT_CRD_DIR}/mcad.ibm.com_schedulingspecs.yaml : ${CRD_BASE_DIR}/mcad.ibm.com_schedulingspecs.yaml +${DEPLOYMENT_CRD_DIR}/quota.codeflare.dev_quotasubtrees.yaml : ${CRD_BASE_DIR}/quota.codeflare.dev_quotasubtrees.yaml +${DEPLOYMENT_CRD_DIR}/workload.codeflare.dev_appwrappers.yaml : ${CRD_BASE_DIR}/workload.codeflare.dev_appwrappers.yaml +${DEPLOYMENT_CRD_DIR}/workload.codeflare.dev_schedulingspecs.yaml : ${CRD_BASE_DIR}/workload.codeflare.dev_schedulingspecs.yaml $(DEPLOYMENT_CRD_DIR)/%: ${CRD_BASE_DIR}/% cp $< $@ diff --git a/config/crd/bases/ibm.com_quotasubtrees.yaml b/config/crd/bases/quota.codeflare.dev_quotasubtrees.yaml similarity index 98% rename from config/crd/bases/ibm.com_quotasubtrees.yaml rename to config/crd/bases/quota.codeflare.dev_quotasubtrees.yaml index a410ab505..b66294494 100644 --- a/config/crd/bases/ibm.com_quotasubtrees.yaml +++ b/config/crd/bases/quota.codeflare.dev_quotasubtrees.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev spec: - group: ibm.com + group: quota.codeflare.dev names: kind: QuotaSubtree listKind: QuotaSubtreeList diff --git a/deployment/mcad-controller/crds/mcad.ibm.com_appwrappers.yaml b/config/crd/bases/workload.codeflare.dev_appwrappers.yaml similarity index 86% rename from deployment/mcad-controller/crds/mcad.ibm.com_appwrappers.yaml rename to config/crd/bases/workload.codeflare.dev_appwrappers.yaml index cf3032a5e..f5c640e8d 100644 --- a/deployment/mcad-controller/crds/mcad.ibm.com_appwrappers.yaml +++ b/config/crd/bases/workload.codeflare.dev_appwrappers.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev spec: - group: mcad.ibm.com + group: workload.codeflare.dev names: kind: AppWrapper listKind: AppWrapperList @@ -259,11 +259,11 @@ spec: description: allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer - does not rely on NodePorts. allocateLoadBalancerNodePorts - may only be set for services with type LoadBalancer and - will be cleared if the type is changed to any other type. - This field is alpha-level and is only honored by servers - that enable the ServiceLBNodePortControl feature. + does not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests will be + respected, regardless of this field. This field may only + be set for services with type LoadBalancer and will be cleared + if the type is changed to any other type. type: boolean clusterIP: description: 'clusterIP is the IP address of the service and @@ -306,14 +306,11 @@ spec: a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] - and clusterIP have the same value. \n Unless the \"IPv6DualStack\" - feature gate is enabled, this field is limited to one value, - which must be the same as the clusterIP field. If the feature - gate is enabled, this field may hold a maximum of two entries - (dual-stack IPs, in either order). These IPs must correspond - to the values of the ipFamilies field. Both clusterIPs and - ipFamilies are governed by the ipFamilyPolicy field. More - info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + and clusterIP have the same value. \n This field may hold + a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies + field. Both clusterIPs and ipFamilies are governed by the + ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" items: type: string type: array @@ -333,16 +330,25 @@ spec: mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - and requires Type to be + and requires `type` to be "ExternalName". type: string externalTrafficPolicy: - description: externalTrafficPolicy denotes if this Service - desires to route external traffic to node-local or cluster-wide - endpoints. "Local" preserves the client source IP and avoids - a second hop for LoadBalancer and Nodeport type services, - but risks potentially imbalanced traffic spreading. "Cluster" - obscures the client source IP and may cause a second hop - to another node, but should have good overall load-spreading. + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no endpoints + will be dropped.) The default value, "Cluster", uses the + standard behavior of routing to all endpoints evenly (possibly + modified by topology and other features). Note that traffic + sent to an External IP or LoadBalancer IP from within the + cluster will always get "Cluster" semantics, but clients + sending to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. type: string healthCheckNodePort: description: healthCheckNodePort specifies the healthcheck @@ -355,23 +361,33 @@ spec: for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to - no longer need it (e.g. changing type). + no longer need it (e.g. changing type). This field cannot + be updated once set. format: int32 type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set to + "Local", the proxy will assume that pods only want to talk + to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The + default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology + and other features). + type: string ipFamilies: description: "IPFamilies is a list of IP families (e.g. IPv4, - IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" - feature gate. This field is usually assigned automatically - based on cluster configuration and the ipFamilyPolicy field. - If this field is specified manually, the requested family - is available in the cluster, and ipFamilyPolicy allows it, - it will be used; otherwise creation of the service will - fail. This field is conditionally mutable: it allows for + IPv6) assigned to this service. This field is usually assigned + automatically based on cluster configuration and the ipFamilyPolicy + field. If this field is specified manually, the requested + family is available in the cluster, and ipFamilyPolicy allows + it, it will be used; otherwise creation of the service will + fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not - allow changing the primary IP family of the Service. Valid + allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, - and does apply to \"headless\" services. This field will + and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. \n This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond @@ -387,31 +403,52 @@ spec: x-kubernetes-list-type: atomic ipFamilyPolicy: description: IPFamilyPolicy represents the dual-stack-ness - requested or required by this Service, and is gated by the - "IPv6DualStack" feature gate. If there is no value provided, - then this field will be set to SingleStack. Services can - be "SingleStack" (a single IP family), "PreferDualStack" + requested or required by this Service. If there is no value + provided, then this field will be set to SingleStack. Services + can be "SingleStack" (a single IP family), "PreferDualStack" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or "RequireDualStack" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the - value of this field. This field will be wiped when updating + value of this field. This field will be wiped when updating a service to type ExternalName. type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load balancer + implementation this Service belongs to. If specified, the + value of this field must be a label-style identifier, with + an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string loadBalancerIP: - description: 'Only applies to Service Type: LoadBalancer LoadBalancer - will get created with the IP specified in this field. This - feature depends on whether the underlying cloud-provider + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider - does not support the feature.' + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations, + and it cannot support dual-stack. As of Kubernetes v1.24, + users are encouraged to use implementation-specific annotations + when available. This field may be removed in a future API + version.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the - feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' items: type: string type: array @@ -426,11 +463,9 @@ spec: description: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. This is a beta - field that is guarded by the ServiceAppProtocol feature - gate and enabled by default. + as mycompany.com/my-custom-protocol. type: string name: description: The name of this port within the service. @@ -507,6 +542,7 @@ spec: to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' type: object + x-kubernetes-map-type: atomic sessionAffinity: description: 'Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. @@ -529,25 +565,6 @@ spec: type: integer type: object type: object - topologyKeys: - description: topologyKeys is a preference-order list of topology - keys which implementations of services should use to preferentially - sort endpoints when accessing this Service, it can not be - used at the same time as externalTrafficPolicy=Local. Topology - keys must be valid label keys and at most 16 keys may be - specified. Endpoints are chosen based on the first topology - key with available backends. If this field is specified - and all entries have no backends that match the topology - of the client, the service has no backends for that client - and connections should fail. The special value "*" may be - used to mean "any topology". This catch-all value, if used, - only makes sense as the last value in the list. If this - is not specified or empty, no topology constraints will - be applied. This field is alpha-level and is only honored - by servers that enable the ServiceTopology feature. - items: - type: string - type: array type: description: 'type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, diff --git a/deployment/mcad-controller/crds/mcad.ibm.com_schedulingspecs.yaml b/config/crd/bases/workload.codeflare.dev_schedulingspecs.yaml similarity index 98% rename from deployment/mcad-controller/crds/mcad.ibm.com_schedulingspecs.yaml rename to config/crd/bases/workload.codeflare.dev_schedulingspecs.yaml index e91a18ffd..6330ddb0f 100644 --- a/deployment/mcad-controller/crds/mcad.ibm.com_schedulingspecs.yaml +++ b/config/crd/bases/workload.codeflare.dev_schedulingspecs.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: schedulingspecs.mcad.ibm.com + name: schedulingspecs.workload.codeflare.dev spec: - group: mcad.ibm.com + group: workload.codeflare.dev names: kind: SchedulingSpec listKind: SchedulingSpecList diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6baa0d4cc..157b04401 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,6 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - bases/ibm.com_quotasubtrees.yaml - - bases/mcad.ibm.com_appwrappers.yaml - - bases/mcad.ibm.com_schedulingspecs.yaml + - bases/quota.codeflare.dev_quotasubtrees.yaml + - bases/workload.codeflare.dev_appwrappers.yaml + - bases/workload.codeflare.dev_schedulingspecs.yaml diff --git a/deployment/mcad-controller/crds/ibm.com_quotasubtrees.yaml b/deployment/mcad-controller/crds/quota.codeflare.dev_quotasubtrees.yaml similarity index 98% rename from deployment/mcad-controller/crds/ibm.com_quotasubtrees.yaml rename to deployment/mcad-controller/crds/quota.codeflare.dev_quotasubtrees.yaml index a410ab505..b66294494 100644 --- a/deployment/mcad-controller/crds/ibm.com_quotasubtrees.yaml +++ b/deployment/mcad-controller/crds/quota.codeflare.dev_quotasubtrees.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev spec: - group: ibm.com + group: quota.codeflare.dev names: kind: QuotaSubtree listKind: QuotaSubtreeList diff --git a/config/crd/bases/mcad.ibm.com_appwrappers.yaml b/deployment/mcad-controller/crds/workload.codeflare.dev_appwrappers.yaml similarity index 86% rename from config/crd/bases/mcad.ibm.com_appwrappers.yaml rename to deployment/mcad-controller/crds/workload.codeflare.dev_appwrappers.yaml index cf3032a5e..f5c640e8d 100644 --- a/config/crd/bases/mcad.ibm.com_appwrappers.yaml +++ b/deployment/mcad-controller/crds/workload.codeflare.dev_appwrappers.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev spec: - group: mcad.ibm.com + group: workload.codeflare.dev names: kind: AppWrapper listKind: AppWrapperList @@ -259,11 +259,11 @@ spec: description: allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer - does not rely on NodePorts. allocateLoadBalancerNodePorts - may only be set for services with type LoadBalancer and - will be cleared if the type is changed to any other type. - This field is alpha-level and is only honored by servers - that enable the ServiceLBNodePortControl feature. + does not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests will be + respected, regardless of this field. This field may only + be set for services with type LoadBalancer and will be cleared + if the type is changed to any other type. type: boolean clusterIP: description: 'clusterIP is the IP address of the service and @@ -306,14 +306,11 @@ spec: a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] - and clusterIP have the same value. \n Unless the \"IPv6DualStack\" - feature gate is enabled, this field is limited to one value, - which must be the same as the clusterIP field. If the feature - gate is enabled, this field may hold a maximum of two entries - (dual-stack IPs, in either order). These IPs must correspond - to the values of the ipFamilies field. Both clusterIPs and - ipFamilies are governed by the ipFamilyPolicy field. More - info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + and clusterIP have the same value. \n This field may hold + a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies + field. Both clusterIPs and ipFamilies are governed by the + ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" items: type: string type: array @@ -333,16 +330,25 @@ spec: mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - and requires Type to be + and requires `type` to be "ExternalName". type: string externalTrafficPolicy: - description: externalTrafficPolicy denotes if this Service - desires to route external traffic to node-local or cluster-wide - endpoints. "Local" preserves the client source IP and avoids - a second hop for LoadBalancer and Nodeport type services, - but risks potentially imbalanced traffic spreading. "Cluster" - obscures the client source IP and may cause a second hop - to another node, but should have good overall load-spreading. + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no endpoints + will be dropped.) The default value, "Cluster", uses the + standard behavior of routing to all endpoints evenly (possibly + modified by topology and other features). Note that traffic + sent to an External IP or LoadBalancer IP from within the + cluster will always get "Cluster" semantics, but clients + sending to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. type: string healthCheckNodePort: description: healthCheckNodePort specifies the healthcheck @@ -355,23 +361,33 @@ spec: for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to - no longer need it (e.g. changing type). + no longer need it (e.g. changing type). This field cannot + be updated once set. format: int32 type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set to + "Local", the proxy will assume that pods only want to talk + to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The + default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology + and other features). + type: string ipFamilies: description: "IPFamilies is a list of IP families (e.g. IPv4, - IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" - feature gate. This field is usually assigned automatically - based on cluster configuration and the ipFamilyPolicy field. - If this field is specified manually, the requested family - is available in the cluster, and ipFamilyPolicy allows it, - it will be used; otherwise creation of the service will - fail. This field is conditionally mutable: it allows for + IPv6) assigned to this service. This field is usually assigned + automatically based on cluster configuration and the ipFamilyPolicy + field. If this field is specified manually, the requested + family is available in the cluster, and ipFamilyPolicy allows + it, it will be used; otherwise creation of the service will + fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not - allow changing the primary IP family of the Service. Valid + allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, - and does apply to \"headless\" services. This field will + and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. \n This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond @@ -387,31 +403,52 @@ spec: x-kubernetes-list-type: atomic ipFamilyPolicy: description: IPFamilyPolicy represents the dual-stack-ness - requested or required by this Service, and is gated by the - "IPv6DualStack" feature gate. If there is no value provided, - then this field will be set to SingleStack. Services can - be "SingleStack" (a single IP family), "PreferDualStack" + requested or required by this Service. If there is no value + provided, then this field will be set to SingleStack. Services + can be "SingleStack" (a single IP family), "PreferDualStack" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or "RequireDualStack" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the - value of this field. This field will be wiped when updating + value of this field. This field will be wiped when updating a service to type ExternalName. type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load balancer + implementation this Service belongs to. If specified, the + value of this field must be a label-style identifier, with + an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string loadBalancerIP: - description: 'Only applies to Service Type: LoadBalancer LoadBalancer - will get created with the IP specified in this field. This - feature depends on whether the underlying cloud-provider + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider - does not support the feature.' + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations, + and it cannot support dual-stack. As of Kubernetes v1.24, + users are encouraged to use implementation-specific annotations + when available. This field may be removed in a future API + version.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the - feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' items: type: string type: array @@ -426,11 +463,9 @@ spec: description: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. This is a beta - field that is guarded by the ServiceAppProtocol feature - gate and enabled by default. + as mycompany.com/my-custom-protocol. type: string name: description: The name of this port within the service. @@ -507,6 +542,7 @@ spec: to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' type: object + x-kubernetes-map-type: atomic sessionAffinity: description: 'Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. @@ -529,25 +565,6 @@ spec: type: integer type: object type: object - topologyKeys: - description: topologyKeys is a preference-order list of topology - keys which implementations of services should use to preferentially - sort endpoints when accessing this Service, it can not be - used at the same time as externalTrafficPolicy=Local. Topology - keys must be valid label keys and at most 16 keys may be - specified. Endpoints are chosen based on the first topology - key with available backends. If this field is specified - and all entries have no backends that match the topology - of the client, the service has no backends for that client - and connections should fail. The special value "*" may be - used to mean "any topology". This catch-all value, if used, - only makes sense as the last value in the list. If this - is not specified or empty, no topology constraints will - be applied. This field is alpha-level and is only honored - by servers that enable the ServiceTopology feature. - items: - type: string - type: array type: description: 'type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, diff --git a/config/crd/bases/mcad.ibm.com_schedulingspecs.yaml b/deployment/mcad-controller/crds/workload.codeflare.dev_schedulingspecs.yaml similarity index 98% rename from config/crd/bases/mcad.ibm.com_schedulingspecs.yaml rename to deployment/mcad-controller/crds/workload.codeflare.dev_schedulingspecs.yaml index e91a18ffd..6330ddb0f 100644 --- a/config/crd/bases/mcad.ibm.com_schedulingspecs.yaml +++ b/deployment/mcad-controller/crds/workload.codeflare.dev_schedulingspecs.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: schedulingspecs.mcad.ibm.com + name: schedulingspecs.workload.codeflare.dev spec: - group: mcad.ibm.com + group: workload.codeflare.dev names: kind: SchedulingSpec listKind: SchedulingSpecList diff --git a/deployment/mcad-controller/templates/deployment.yaml b/deployment/mcad-controller/templates/deployment.yaml index cfaa0a887..1ef381460 100644 --- a/deployment/mcad-controller/templates/deployment.yaml +++ b/deployment/mcad-controller/templates/deployment.yaml @@ -110,8 +110,8 @@ metadata: kubernetes.io/bootstrapping: rbac-defaults rules: - apiGroups: - - mcad.ibm.com - - ibm.com + - quota.codeflare.dev + - workload.codeflare.dev resources: - xqueuejobs - queuejobs diff --git a/doc/usage/aw-01.yaml b/doc/usage/aw-01.yaml index 1f7239119..c5ccf22f6 100644 --- a/doc/usage/aw-01.yaml +++ b/doc/usage/aw-01.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: 0001-aw-generic-deployment-1 diff --git a/doc/usage/aw-02.yaml b/doc/usage/aw-02.yaml index f25e5b60d..5f7c94c9b 100644 --- a/doc/usage/aw-02.yaml +++ b/doc/usage/aw-02.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: 0002-aw-generic-deployment-2 diff --git a/doc/usage/examples/aw-1-k8s-job1.yaml b/doc/usage/examples/aw-1-k8s-job1.yaml index 35e3b372e..387e0dab4 100644 --- a/doc/usage/examples/aw-1-k8s-job1.yaml +++ b/doc/usage/examples/aw-1-k8s-job1.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: aw-generic-statefulset-2 diff --git a/doc/usage/examples/aw-1-k8s-job2.yaml b/doc/usage/examples/aw-1-k8s-job2.yaml index 675ac9ef8..e2ec9bc50 100644 --- a/doc/usage/examples/aw-1-k8s-job2.yaml +++ b/doc/usage/examples/aw-1-k8s-job2.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: aw-generic-statefulset-2 diff --git a/doc/usage/examples/aw-1-k8s-job3.yaml b/doc/usage/examples/aw-1-k8s-job3.yaml index 56fabb609..652ca419e 100644 --- a/doc/usage/examples/aw-1-k8s-job3.yaml +++ b/doc/usage/examples/aw-1-k8s-job3.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: aw-generic-statefulset-2 diff --git a/doc/usage/examples/kuberay/config/aw-kuberay-glue.yaml b/doc/usage/examples/kuberay/config/aw-kuberay-glue.yaml index 6b5089ee9..47ee17d6d 100644 --- a/doc/usage/examples/kuberay/config/aw-kuberay-glue.yaml +++ b/doc/usage/examples/kuberay/config/aw-kuberay-glue.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: raycluster-glue diff --git a/doc/usage/examples/kuberay/config/aw-raycluster.yaml b/doc/usage/examples/kuberay/config/aw-raycluster.yaml index bbef20d0d..b68af3c95 100644 --- a/doc/usage/examples/kuberay/config/aw-raycluster.yaml +++ b/doc/usage/examples/kuberay/config/aw-raycluster.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: raycluster-autoscaler diff --git a/doc/usage/examples/kuberay/config/xqueuejob-controller.yaml b/doc/usage/examples/kuberay/config/xqueuejob-controller.yaml index b754cd613..c914a83ea 100644 --- a/doc/usage/examples/kuberay/config/xqueuejob-controller.yaml +++ b/doc/usage/examples/kuberay/config/xqueuejob-controller.yaml @@ -11,7 +11,7 @@ metadata: name: system:controller:xqueuejob-controller rules: - apiGroups: - - mcad.ibm.com + - workload.codeflare.dev resources: - xqueuejobs - queuejobs diff --git a/doc/usage/quota_management/quickstart.md b/doc/usage/quota_management/quickstart.md index 406e349a7..a52a36c8e 100644 --- a/doc/usage/quota_management/quickstart.md +++ b/doc/usage/quota_management/quickstart.md @@ -21,7 +21,7 @@ optimize resource utilization, avoid resource starvation, and ensure quality of ## Example QuotaSubtree ```yaml -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -37,7 +37,7 @@ spec: cpu: 2000m memory: 8000Mi --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children @@ -64,7 +64,7 @@ spec: ## Using quota trees in your AppWrappers ```yaml -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: myGangScheduledApp diff --git a/doc/usage/tutorial.md b/doc/usage/tutorial.md index 67881ffbf..c77340879 100644 --- a/doc/usage/tutorial.md +++ b/doc/usage/tutorial.md @@ -19,7 +19,7 @@ Refer to [deployment instructions](../deploy/deployment.md) on how to deploy the After successfully deploying the __Multi-Cluster Application Dispatcher__ Controller, create an AppWrapper custom resource in a file named `aw-01.yaml` with the following content: ```yaml -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: 0001-aw-generic-deployment-1 @@ -202,7 +202,7 @@ The next step is to create a second `AppWrapper` job that has resource demands t Create an `AppWrapper` job in a file named `aw-02.yaml` with the following content: ```yaml -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: 0002-aw-generic-deployment-2 @@ -312,7 +312,7 @@ Delete the first `AppWrapper` job. ```bash $ kubectl delete -f aw-01.yaml -appwrapper.mcad.ibm.com "0001-aw-generic-deployment-1" deleted +appwrapper.workload.codeflare.dev "0001-aw-generic-deployment-1" deleted ``` Check the pods status of the `AppWrapper` jobs. The new pods from the second `AppWrapper` job: `0002-aw-generic-deployment-2` job should now be deployed and running. diff --git a/pkg/apis/controller/v1beta1/doc.go b/pkg/apis/controller/v1beta1/doc.go index e064d70e7..5a0545238 100644 --- a/pkg/apis/controller/v1beta1/doc.go +++ b/pkg/apis/controller/v1beta1/doc.go @@ -16,6 +16,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +kubebuilder:object:generate=true -// +groupName=mcad.ibm.com +// +groupName=workload.codeflare.dev package v1beta1 diff --git a/pkg/apis/controller/v1beta1/register.go b/pkg/apis/controller/v1beta1/register.go index 50f0e8607..59e050aaa 100644 --- a/pkg/apis/controller/v1beta1/register.go +++ b/pkg/apis/controller/v1beta1/register.go @@ -28,7 +28,7 @@ var ( ) // GroupName is the group name used in this package. -const GroupName = "mcad.ibm.com" +const GroupName = "workload.codeflare.dev" // SchemeGroupVersion is the group version used to register these objects. var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} diff --git a/pkg/apis/quotaplugins/quotasubtree/v1/doc.go b/pkg/apis/quotaplugins/quotasubtree/v1/doc.go index a5f1a2300..6c01b2b54 100755 --- a/pkg/apis/quotaplugins/quotasubtree/v1/doc.go +++ b/pkg/apis/quotaplugins/quotasubtree/v1/doc.go @@ -16,6 +16,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +kubebuilder:object:generate=true -// +groupName=ibm.com +// +groupName=quota.codeflare.dev package v1 diff --git a/pkg/apis/quotaplugins/quotasubtree/v1/register.go b/pkg/apis/quotaplugins/quotasubtree/v1/register.go index 09e66191d..a83b6e73d 100755 --- a/pkg/apis/quotaplugins/quotasubtree/v1/register.go +++ b/pkg/apis/quotaplugins/quotasubtree/v1/register.go @@ -13,7 +13,7 @@ var ( const ( // GroupName is the group name used in this package. - GroupName = "ibm.com" + GroupName = "quota.codeflare.dev" // GroupVersion is the version of scheduling group GroupVersion = "v1" diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index e79521648..768fcdf94 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -21,8 +21,8 @@ package versioned import ( "fmt" - mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1" - ibmv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1" + workloadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1" + quotav1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -30,26 +30,26 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - McadV1beta1() mcadv1beta1.McadV1beta1Interface - IbmV1() ibmv1.IbmV1Interface + WorkloadV1beta1() workloadv1beta1.WorkloadV1beta1Interface + QuotaV1() quotav1.QuotaV1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - mcadV1beta1 *mcadv1beta1.McadV1beta1Client - ibmV1 *ibmv1.IbmV1Client + workloadV1beta1 *workloadv1beta1.WorkloadV1beta1Client + quotaV1 *quotav1.QuotaV1Client } -// McadV1beta1 retrieves the McadV1beta1Client -func (c *Clientset) McadV1beta1() mcadv1beta1.McadV1beta1Interface { - return c.mcadV1beta1 +// WorkloadV1beta1 retrieves the WorkloadV1beta1Client +func (c *Clientset) WorkloadV1beta1() workloadv1beta1.WorkloadV1beta1Interface { + return c.workloadV1beta1 } -// IbmV1 retrieves the IbmV1Client -func (c *Clientset) IbmV1() ibmv1.IbmV1Interface { - return c.ibmV1 +// QuotaV1 retrieves the QuotaV1Client +func (c *Clientset) QuotaV1() quotav1.QuotaV1Interface { + return c.quotaV1 } // Discovery retrieves the DiscoveryClient @@ -73,11 +73,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.mcadV1beta1, err = mcadv1beta1.NewForConfig(&configShallowCopy) + cs.workloadV1beta1, err = workloadv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.ibmV1, err = ibmv1.NewForConfig(&configShallowCopy) + cs.quotaV1, err = quotav1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -93,8 +93,8 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.mcadV1beta1 = mcadv1beta1.NewForConfigOrDie(c) - cs.ibmV1 = ibmv1.NewForConfigOrDie(c) + cs.workloadV1beta1 = workloadv1beta1.NewForConfigOrDie(c) + cs.quotaV1 = quotav1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -103,8 +103,8 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.mcadV1beta1 = mcadv1beta1.New(c) - cs.ibmV1 = ibmv1.New(c) + cs.workloadV1beta1 = workloadv1beta1.New(c) + cs.quotaV1 = quotav1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 53b34208e..5fc7acd1d 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -20,10 +20,10 @@ package fake import ( clientset "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned" - mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1" - fakemcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1/fake" - ibmv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1" - fakeibmv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake" + workloadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1" + fakeworkloadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/controller/v1beta1/fake" + quotav1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1" + fakequotav1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -78,12 +78,12 @@ func (c *Clientset) Tracker() testing.ObjectTracker { var _ clientset.Interface = &Clientset{} -// McadV1beta1 retrieves the McadV1beta1Client -func (c *Clientset) McadV1beta1() mcadv1beta1.McadV1beta1Interface { - return &fakemcadv1beta1.FakeMcadV1beta1{Fake: &c.Fake} +// WorkloadV1beta1 retrieves the WorkloadV1beta1Client +func (c *Clientset) WorkloadV1beta1() workloadv1beta1.WorkloadV1beta1Interface { + return &fakeworkloadv1beta1.FakeWorkloadV1beta1{Fake: &c.Fake} } -// IbmV1 retrieves the IbmV1Client -func (c *Clientset) IbmV1() ibmv1.IbmV1Interface { - return &fakeibmv1.FakeIbmV1{Fake: &c.Fake} +// QuotaV1 retrieves the QuotaV1Client +func (c *Clientset) QuotaV1() quotav1.QuotaV1Interface { + return &fakequotav1.FakeQuotaV1{Fake: &c.Fake} } diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 1fc6e7e86..6ffb0c808 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -19,8 +19,8 @@ limitations under the License. package fake import ( - mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" - ibmv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/quotaplugins/quotasubtree/v1" + workloadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" + quotav1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/quotaplugins/quotasubtree/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,8 +32,8 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - mcadv1beta1.AddToScheme, - ibmv1.AddToScheme, + workloadv1beta1.AddToScheme, + quotav1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index f7fd7e88d..6cdecec3a 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -19,8 +19,8 @@ limitations under the License. package scheme import ( - mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" - ibmv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/quotaplugins/quotasubtree/v1" + workloadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" + quotav1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/quotaplugins/quotasubtree/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,8 +32,8 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - mcadv1beta1.AddToScheme, - ibmv1.AddToScheme, + workloadv1beta1.AddToScheme, + quotav1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/typed/controller/v1beta1/appwrapper.go b/pkg/client/clientset/versioned/typed/controller/v1beta1/appwrapper.go index ba3453d98..487619647 100644 --- a/pkg/client/clientset/versioned/typed/controller/v1beta1/appwrapper.go +++ b/pkg/client/clientset/versioned/typed/controller/v1beta1/appwrapper.go @@ -57,7 +57,7 @@ type appWrappers struct { } // newAppWrappers returns a AppWrappers -func newAppWrappers(c *McadV1beta1Client, namespace string) *appWrappers { +func newAppWrappers(c *WorkloadV1beta1Client, namespace string) *appWrappers { return &appWrappers{ client: c.RESTClient(), ns: namespace, diff --git a/pkg/client/clientset/versioned/typed/controller/v1beta1/controller_client.go b/pkg/client/clientset/versioned/typed/controller/v1beta1/controller_client.go index 3309fdf2c..d1f6e18f8 100644 --- a/pkg/client/clientset/versioned/typed/controller/v1beta1/controller_client.go +++ b/pkg/client/clientset/versioned/typed/controller/v1beta1/controller_client.go @@ -24,22 +24,22 @@ import ( rest "k8s.io/client-go/rest" ) -type McadV1beta1Interface interface { +type WorkloadV1beta1Interface interface { RESTClient() rest.Interface AppWrappersGetter } -// McadV1beta1Client is used to interact with features provided by the mcad.ibm.com group. -type McadV1beta1Client struct { +// WorkloadV1beta1Client is used to interact with features provided by the workload.codeflare.dev group. +type WorkloadV1beta1Client struct { restClient rest.Interface } -func (c *McadV1beta1Client) AppWrappers(namespace string) AppWrapperInterface { +func (c *WorkloadV1beta1Client) AppWrappers(namespace string) AppWrapperInterface { return newAppWrappers(c, namespace) } -// NewForConfig creates a new McadV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*McadV1beta1Client, error) { +// NewForConfig creates a new WorkloadV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*WorkloadV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +48,12 @@ func NewForConfig(c *rest.Config) (*McadV1beta1Client, error) { if err != nil { return nil, err } - return &McadV1beta1Client{client}, nil + return &WorkloadV1beta1Client{client}, nil } -// NewForConfigOrDie creates a new McadV1beta1Client for the given config and +// NewForConfigOrDie creates a new WorkloadV1beta1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *McadV1beta1Client { +func NewForConfigOrDie(c *rest.Config) *WorkloadV1beta1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +61,9 @@ func NewForConfigOrDie(c *rest.Config) *McadV1beta1Client { return client } -// New creates a new McadV1beta1Client for the given RESTClient. -func New(c rest.Interface) *McadV1beta1Client { - return &McadV1beta1Client{c} +// New creates a new WorkloadV1beta1Client for the given RESTClient. +func New(c rest.Interface) *WorkloadV1beta1Client { + return &WorkloadV1beta1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *McadV1beta1Client) RESTClient() rest.Interface { +func (c *WorkloadV1beta1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_appwrapper.go b/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_appwrapper.go index 6d998f201..b62e4ce2e 100644 --- a/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_appwrapper.go +++ b/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_appwrapper.go @@ -32,13 +32,13 @@ import ( // FakeAppWrappers implements AppWrapperInterface type FakeAppWrappers struct { - Fake *FakeMcadV1beta1 + Fake *FakeWorkloadV1beta1 ns string } -var appwrappersResource = schema.GroupVersionResource{Group: "mcad.ibm.com", Version: "v1beta1", Resource: "appwrappers"} +var appwrappersResource = schema.GroupVersionResource{Group: "workload.codeflare.dev", Version: "v1beta1", Resource: "appwrappers"} -var appwrappersKind = schema.GroupVersionKind{Group: "mcad.ibm.com", Version: "v1beta1", Kind: "AppWrapper"} +var appwrappersKind = schema.GroupVersionKind{Group: "workload.codeflare.dev", Version: "v1beta1", Kind: "AppWrapper"} // Get takes name of the appWrapper, and returns the corresponding appWrapper object, and an error if there is any. func (c *FakeAppWrappers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.AppWrapper, err error) { diff --git a/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_controller_client.go b/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_controller_client.go index 606bd0de7..82059b4fc 100644 --- a/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_controller_client.go +++ b/pkg/client/clientset/versioned/typed/controller/v1beta1/fake/fake_controller_client.go @@ -24,17 +24,17 @@ import ( testing "k8s.io/client-go/testing" ) -type FakeMcadV1beta1 struct { +type FakeWorkloadV1beta1 struct { *testing.Fake } -func (c *FakeMcadV1beta1) AppWrappers(namespace string) v1beta1.AppWrapperInterface { +func (c *FakeWorkloadV1beta1) AppWrappers(namespace string) v1beta1.AppWrapperInterface { return &FakeAppWrappers{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeMcadV1beta1) RESTClient() rest.Interface { +func (c *FakeWorkloadV1beta1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree.go b/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree.go index 0b5341ce9..734cda544 100644 --- a/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree.go +++ b/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree.go @@ -32,13 +32,13 @@ import ( // FakeQuotaSubtrees implements QuotaSubtreeInterface type FakeQuotaSubtrees struct { - Fake *FakeIbmV1 + Fake *FakeQuotaV1 ns string } -var quotasubtreesResource = schema.GroupVersionResource{Group: "ibm.com", Version: "v1", Resource: "quotasubtrees"} +var quotasubtreesResource = schema.GroupVersionResource{Group: "quota.codeflare.dev", Version: "v1", Resource: "quotasubtrees"} -var quotasubtreesKind = schema.GroupVersionKind{Group: "ibm.com", Version: "v1", Kind: "QuotaSubtree"} +var quotasubtreesKind = schema.GroupVersionKind{Group: "quota.codeflare.dev", Version: "v1", Kind: "QuotaSubtree"} // Get takes name of the quotaSubtree, and returns the corresponding quotaSubtree object, and an error if there is any. func (c *FakeQuotaSubtrees) Get(ctx context.Context, name string, options v1.GetOptions) (result *quotasubtreev1.QuotaSubtree, err error) { diff --git a/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree_client.go b/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree_client.go index 9046c1b1d..a4a68ac32 100644 --- a/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree_client.go +++ b/pkg/client/clientset/versioned/typed/quotasubtree/v1/fake/fake_quotasubtree_client.go @@ -24,17 +24,17 @@ import ( testing "k8s.io/client-go/testing" ) -type FakeIbmV1 struct { +type FakeQuotaV1 struct { *testing.Fake } -func (c *FakeIbmV1) QuotaSubtrees(namespace string) v1.QuotaSubtreeInterface { +func (c *FakeQuotaV1) QuotaSubtrees(namespace string) v1.QuotaSubtreeInterface { return &FakeQuotaSubtrees{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeIbmV1) RESTClient() rest.Interface { +func (c *FakeQuotaV1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree.go b/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree.go index 8c194d63f..b8b5c0eba 100644 --- a/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree.go +++ b/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree.go @@ -57,7 +57,7 @@ type quotaSubtrees struct { } // newQuotaSubtrees returns a QuotaSubtrees -func newQuotaSubtrees(c *IbmV1Client, namespace string) *quotaSubtrees { +func newQuotaSubtrees(c *QuotaV1Client, namespace string) *quotaSubtrees { return "aSubtrees{ client: c.RESTClient(), ns: namespace, diff --git a/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree_client.go b/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree_client.go index 91e98a3d6..6db1d71ee 100644 --- a/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree_client.go +++ b/pkg/client/clientset/versioned/typed/quotasubtree/v1/quotasubtree_client.go @@ -24,22 +24,22 @@ import ( rest "k8s.io/client-go/rest" ) -type IbmV1Interface interface { +type QuotaV1Interface interface { RESTClient() rest.Interface QuotaSubtreesGetter } -// IbmV1Client is used to interact with features provided by the ibm.com group. -type IbmV1Client struct { +// QuotaV1Client is used to interact with features provided by the quota.codeflare.dev group. +type QuotaV1Client struct { restClient rest.Interface } -func (c *IbmV1Client) QuotaSubtrees(namespace string) QuotaSubtreeInterface { +func (c *QuotaV1Client) QuotaSubtrees(namespace string) QuotaSubtreeInterface { return newQuotaSubtrees(c, namespace) } -// NewForConfig creates a new IbmV1Client for the given config. -func NewForConfig(c *rest.Config) (*IbmV1Client, error) { +// NewForConfig creates a new QuotaV1Client for the given config. +func NewForConfig(c *rest.Config) (*QuotaV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +48,12 @@ func NewForConfig(c *rest.Config) (*IbmV1Client, error) { if err != nil { return nil, err } - return &IbmV1Client{client}, nil + return &QuotaV1Client{client}, nil } -// NewForConfigOrDie creates a new IbmV1Client for the given config and +// NewForConfigOrDie creates a new QuotaV1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *IbmV1Client { +func NewForConfigOrDie(c *rest.Config) *QuotaV1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +61,9 @@ func NewForConfigOrDie(c *rest.Config) *IbmV1Client { return client } -// New creates a new IbmV1Client for the given RESTClient. -func New(c rest.Interface) *IbmV1Client { - return &IbmV1Client{c} +// New creates a new QuotaV1Client for the given RESTClient. +func New(c rest.Interface) *QuotaV1Client { + return &QuotaV1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *IbmV1Client) RESTClient() rest.Interface { +func (c *QuotaV1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/pkg/client/informers/externalversions/controller/v1beta1/appwrapper.go b/pkg/client/informers/externalversions/controller/v1beta1/appwrapper.go index 0fe752c88..a1b8470f8 100644 --- a/pkg/client/informers/externalversions/controller/v1beta1/appwrapper.go +++ b/pkg/client/informers/externalversions/controller/v1beta1/appwrapper.go @@ -62,13 +62,13 @@ func NewFilteredAppWrapperInformer(client versioned.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.McadV1beta1().AppWrappers(namespace).List(context.TODO(), options) + return client.WorkloadV1beta1().AppWrappers(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.McadV1beta1().AppWrappers(namespace).Watch(context.TODO(), options) + return client.WorkloadV1beta1().AppWrappers(namespace).Watch(context.TODO(), options) }, }, &controllerv1beta1.AppWrapper{}, diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 63513c4a5..2cc4c0e47 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -173,14 +173,14 @@ type SharedInformerFactory interface { ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - Mcad() controller.Interface - Ibm() quotasubtree.Interface + Workload() controller.Interface + Quota() quotasubtree.Interface } -func (f *sharedInformerFactory) Mcad() controller.Interface { +func (f *sharedInformerFactory) Workload() controller.Interface { return controller.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Ibm() quotasubtree.Interface { +func (f *sharedInformerFactory) Quota() quotasubtree.Interface { return quotasubtree.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 2c3eef75f..b9b46f1e9 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -53,13 +53,13 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=ibm.com, Version=v1 + // Group=quota.codeflare.dev, Version=v1 case v1.SchemeGroupVersion.WithResource("quotasubtrees"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ibm().V1().QuotaSubtrees().Informer()}, nil + return &genericInformer{resource: resource.GroupResource(), informer: f.Quota().V1().QuotaSubtrees().Informer()}, nil - // Group=mcad.ibm.com, Version=v1beta1 + // Group=workload.codeflare.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("appwrappers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Mcad().V1beta1().AppWrappers().Informer()}, nil + return &genericInformer{resource: resource.GroupResource(), informer: f.Workload().V1beta1().AppWrappers().Informer()}, nil } diff --git a/pkg/client/informers/externalversions/quotasubtree/v1/quotasubtree.go b/pkg/client/informers/externalversions/quotasubtree/v1/quotasubtree.go index 0b07f1130..9ed08847f 100644 --- a/pkg/client/informers/externalversions/quotasubtree/v1/quotasubtree.go +++ b/pkg/client/informers/externalversions/quotasubtree/v1/quotasubtree.go @@ -62,13 +62,13 @@ func NewFilteredQuotaSubtreeInformer(client versioned.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.IbmV1().QuotaSubtrees(namespace).List(context.TODO(), options) + return client.QuotaV1().QuotaSubtrees(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.IbmV1().QuotaSubtrees(namespace).Watch(context.TODO(), options) + return client.QuotaV1().QuotaSubtrees(namespace).Watch(context.TODO(), options) }, }, "asubtreev1.QuotaSubtree{}, diff --git a/pkg/controller/queuejob/queuejob_controller_ex.go b/pkg/controller/queuejob/queuejob_controller_ex.go index 04e0aa278..1823e18a8 100644 --- a/pkg/controller/queuejob/queuejob_controller_ex.go +++ b/pkg/controller/queuejob/queuejob_controller_ex.go @@ -77,9 +77,9 @@ type XController struct { appwrapperInformer arbinformers.AppWrapperInformer // resources registered for the AppWrapper - //qjobRegisteredResources queuejobresources.RegisteredResources + // qjobRegisteredResources queuejobresources.RegisteredResources // controllers for these resources - //qjobResControls map[arbv1.ResourceType]queuejobresources.Interface + // qjobResControls map[arbv1.ResourceType]queuejobresources.Interface // Captures all available resources in the cluster genericresources *genericresource.GenericResources @@ -93,10 +93,10 @@ type XController struct { // QueueJobs that need to be initialized // Add labels and selectors to AppWrapper - //initQueue *cache.FIFO + // initQueue *cache.FIFO // QueueJobs that need to sync up after initialization - //updateQueue *cache.FIFO + // updateQueue *cache.FIFO // eventQueue that need to sync up eventQueue *cache.FIFO @@ -104,9 +104,9 @@ type XController struct { // QJ queue that needs to be allocated qjqueue SchedulingQueue - //TODO: Do we need this local cache? + // TODO: Do we need this local cache? // our own local cache, used for computing total amount of resources - //cache clusterstatecache.Cache + // cache clusterstatecache.Cache // is dispatcher or deployer? isDispatcher bool @@ -214,8 +214,8 @@ func (qjm *XController) allocatableCapacity() *clusterstateapi.Resource { var specNodeName = "spec.nodeName" labelSelector := fmt.Sprintf("%s=%s", specNodeName, node.Name) podList, err := qjm.clients.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{FieldSelector: labelSelector}) - //TODO: when no pods are listed, do we send entire node capacity as available - //this will cause false positive dispatch. + // TODO: when no pods are listed, do we send entire node capacity as available + // this will cause false positive dispatch. if err != nil { klog.Errorf("[allocatableCapacity] Error listing pods %v", err) } @@ -241,23 +241,23 @@ func NewJobController(config *rest.Config, serverOption *options.ServerOption) * arbclients: clientset.NewForConfigOrDie(config), eventQueue: cache.NewFIFO(GetQueueJobKey), agentEventQueue: cache.NewFIFO(GetQueueJobKey), - //initQueue: cache.NewFIFO(GetQueueJobKey), - //updateQueue: cache.NewFIFO(GetQueueJobKey), + // initQueue: cache.NewFIFO(GetQueueJobKey), + // updateQueue: cache.NewFIFO(GetQueueJobKey), qjqueue: NewSchedulingQueue(), - //cache is turned-off, issue: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/588 - //cache: clusterstatecache.New(config), + // cache is turned-off, issue: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/588 + // cache: clusterstatecache.New(config), schedulingAW: nil, } - //TODO: work on enabling metrics adapter for correct MCAD mode - //metrics adapter is implemented through dynamic client which looks at all the - //resources installed in the cluster to construct cache. May be this is need in - //multi-cluster mode, so for now it is turned-off: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/585 - //cc.metricsAdapter = adapter.New(serverOption, config, cc.cache) + // TODO: work on enabling metrics adapter for correct MCAD mode + // metrics adapter is implemented through dynamic client which looks at all the + // resources installed in the cluster to construct cache. May be this is need in + // multi-cluster mode, so for now it is turned-off: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/585 + // cc.metricsAdapter = adapter.New(serverOption, config, cc.cache) cc.genericresources = genericresource.NewAppWrapperGenericResource(config) - //cc.qjobResControls = map[arbv1.ResourceType]queuejobresources.Interface{} - //RegisterAllQueueJobResourceTypes(&cc.qjobRegisteredResources) + // cc.qjobResControls = map[arbv1.ResourceType]queuejobresources.Interface{} + // RegisterAllQueueJobResourceTypes(&cc.qjobRegisteredResources) // initialize pod sub-resource control // resControlPod, found, err := cc.qjobRegisteredResources.InitQueueJobResource(arbv1.ResourceTypePod, config) @@ -275,7 +275,7 @@ func NewJobController(config *rest.Config, serverOption *options.ServerOption) * if err != nil { klog.Fatalf("Could not instantiate k8s client, err=%v", err) } - cc.appwrapperInformer = informerFactory.NewSharedInformerFactory(appWrapperClient, 0).Mcad().V1beta1().AppWrappers() + cc.appwrapperInformer = informerFactory.NewSharedInformerFactory(appWrapperClient, 0).Workload().V1beta1().AppWrappers() cc.appwrapperInformer.Informer().AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { @@ -745,7 +745,7 @@ func (qjm *XController) getDispatchedAppWrappers() (map[string]*clusterstateapi. klog.Errorf("[getDispatchedAppWrappers] Failure creating client for initialization informer err=%#v", err) return awrRetVal, awsRetVal } - queueJobInformer := informerFactory.NewSharedInformerFactory(appWrapperClient, 0).Mcad().V1beta1().AppWrappers() + queueJobInformer := informerFactory.NewSharedInformerFactory(appWrapperClient, 0).Workload().V1beta1().AppWrappers() queueJobInformer.Informer().AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { @@ -806,7 +806,7 @@ func (qjm *XController) addTotalSnapshotResourcesConsumedByAw(totalgpu int32, to func (qjm *XController) getAggregatedAvailableResourcesPriority(unallocatedClusterResources *clusterstateapi. Resource, targetpr float64, requestingJob *arbv1.AppWrapper, agentId string) (*clusterstateapi.Resource, []*arbv1.AppWrapper) { - //get available free resources in the cluster. + // get available free resources in the cluster. r := unallocatedClusterResources.Clone() // Track preemption resources preemptable := clusterstateapi.EmptyResource() @@ -821,9 +821,9 @@ func (qjm *XController) getAggregatedAvailableResourcesPriority(unallocatedClust klog.Errorf("[getAggAvaiResPri] Unable to obtain the list of queueJobs %+v", err) return r, nil } - //for all AWs that have canRun status are true - //in non-preemption mode, we reserve resources for AWs - //reserving is done by subtracting total AW resources from pods owned by AW that are running or completed. + // for all AWs that have canRun status are true + // in non-preemption mode, we reserve resources for AWs + // reserving is done by subtracting total AW resources from pods owned by AW that are running or completed. // AW can be running but items owned by it can be completed or there might be new set of pods yet to be spawned for _, value := range queueJobs { klog.V(10).Infof("[getAggAvaiResPri] %s: Evaluating job: %s to calculate aggregated resources.", time.Now().String(), value.Name) @@ -1005,14 +1005,14 @@ func (qjm *XController) nodeChecks(histograms map[string]*dto.Metric, aw *arbv1. func (qjm *XController) ScheduleNext(qj *arbv1.AppWrapper) { ctx := context.Background() var err error = nil - //TODO: do we really need locking now since we have a single thread processing an AW ? + // TODO: do we really need locking now since we have a single thread processing an AW ? qjm.schedulingMutex.Lock() qjm.schedulingAW = qj qjm.schedulingMutex.Unlock() // ensure that current active appwrapper is reset at the end of this function, to prevent // the appwrapper from being added in syncjob defer qjm.schedulingAWAtomicSet(nil) - //TODO: Retry value is set to 1, do we really need retries? + // TODO: Retry value is set to 1, do we really need retries? scheduleNextRetrier := retrier.New(retrier.ExponentialBackoff(1, 100*time.Millisecond), &EtcdErrorClassifier{}) scheduleNextRetrier.SetJitter(0.05) // Retry the execution @@ -1177,8 +1177,8 @@ func (qjm *XController) ScheduleNext(qj *arbv1.AppWrapper) { if qjm.serverOption.DynamicPriority { priorityindex = -math.MaxFloat64 } - //cache now is a method inside the controller. - //The reimplementation should fix issue : https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/550 + // cache now is a method inside the controller. + // The reimplementation should fix issue : https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/550 var unallocatedResources = clusterstateapi.EmptyResource() unallocatedResources = qjm.allocatableCapacity() for unallocatedResources.IsEmpty() { @@ -1320,7 +1320,7 @@ func (qjm *XController) ScheduleNext(qj *arbv1.AppWrapper) { forwarded = true } - //TODO: Remove schedulingTimeExpired flag: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/586 + // TODO: Remove schedulingTimeExpired flag: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/586 schedulingTimeExpired := false if forwarded { break @@ -1369,7 +1369,7 @@ func (cc *XController) updateEtcd(ctx context.Context, currentAppwrapper *arbv1. klog.V(4).Infof("[updateEtcd] trying to update '%s/%s' called by '%s'", currentAppwrapper.Namespace, currentAppwrapper.Name, caller) currentAppwrapper.Status.Sender = "before " + caller // set Sender string to indicate code location currentAppwrapper.Status.Local = false // for Informer FilterFunc to pickup - updatedAppwrapper, err := cc.arbclients.McadV1beta1().AppWrappers(currentAppwrapper.Namespace).Update(ctx, currentAppwrapper, metav1.UpdateOptions{}) + updatedAppwrapper, err := cc.arbclients.WorkloadV1beta1().AppWrappers(currentAppwrapper.Namespace).Update(ctx, currentAppwrapper, metav1.UpdateOptions{}) if err != nil { return nil, err } @@ -1385,7 +1385,7 @@ func (cc *XController) updateEtcd(ctx context.Context, currentAppwrapper *arbv1. func (cc *XController) updateStatusInEtcd(ctx context.Context, currentAppwrapper *arbv1.AppWrapper, caller string) error { klog.V(4).Infof("[updateStatusInEtcd] trying to update '%s/%s' called by '%s'", currentAppwrapper.Namespace, currentAppwrapper.Name, caller) currentAppwrapper.Status.Sender = "before " + caller // set Sender string to indicate code location - updatedAppwrapper, err := cc.arbclients.McadV1beta1().AppWrappers(currentAppwrapper.Namespace).UpdateStatus(ctx, currentAppwrapper, metav1.UpdateOptions{}) + updatedAppwrapper, err := cc.arbclients.WorkloadV1beta1().AppWrappers(currentAppwrapper.Namespace).UpdateStatus(ctx, currentAppwrapper, metav1.UpdateOptions{}) if err != nil { return err } @@ -1406,7 +1406,7 @@ func (cc *XController) updateStatusInEtcdWithRetry(ctx context.Context, source * updatedAW := source.DeepCopy() err := updateStatusRetrierRetrier.RunCtx(ctx, func(localContext context.Context) error { var retryErr error - updatedAW, retryErr = cc.arbclients.McadV1beta1().AppWrappers(updatedAW.Namespace).UpdateStatus(localContext, updatedAW, metav1.UpdateOptions{}) + updatedAW, retryErr = cc.arbclients.WorkloadV1beta1().AppWrappers(updatedAW.Namespace).UpdateStatus(localContext, updatedAW, metav1.UpdateOptions{}) if retryErr != nil && apierrors.IsConflict(retryErr) { dest, retryErr := cc.getAppWrapper(source.Namespace, source.Name, caller) if retryErr != nil && !apierrors.IsNotFound(retryErr) { @@ -1494,13 +1494,13 @@ func (qjm *XController) backoff(ctx context.Context, q *arbv1.AppWrapper, reason func (cc *XController) Run(stopCh <-chan struct{}) { go cc.appwrapperInformer.Informer().Run(stopCh) - //go cc.qjobResControls[arbv1.ResourceTypePod].Run(stopCh) + // go cc.qjobResControls[arbv1.ResourceTypePod].Run(stopCh) cache.WaitForCacheSync(stopCh, cc.appWrapperSynced) - //cache is turned off, issue: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/588 + // cache is turned off, issue: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/588 // update snapshot of ClientStateCache every second - //cc.cache.Run(stopCh) + // cc.cache.Run(stopCh) // start preempt thread is used to preempt AWs that have partial pods or have reached dispatch duration go wait.Until(cc.PreemptQueueJobs, 60*time.Second, stopCh) @@ -1575,7 +1575,7 @@ func (qjm *XController) UpdateQueueJobs() { } err := qjm.updateStatusInEtcdWithRetry(context.Background(), updateQj, "[UpdateQueueJobs] setRunningHoldCompletion") if err != nil { - //TODO: implement retry + // TODO: implement retry klog.Errorf("[UpdateQueueJobs] Error updating status 'setRunningHoldCompletion' for AppWrapper: '%s/%s',Status=%+v, err=%+v.", newjob.Namespace, newjob.Name, newjob.Status, err) } } @@ -1601,13 +1601,13 @@ func (qjm *XController) UpdateQueueJobs() { if qjm.quotaManager != nil { qjm.quotaManager.Release(updateQj) } - //TODO: Implement retry + // TODO: Implement retry klog.Errorf("[UpdateQueueJobs] Error updating status 'setCompleted' AppWrapper: '%s/%s',Status=%+v, err=%+v.", newjob.Namespace, newjob.Name, newjob.Status, err) } if qjm.quotaManager != nil { qjm.quotaManager.Release(updateQj) } - //Delete AW from both queue's + // Delete AW from both queue's qjm.eventQueue.Delete(updateQj) qjm.qjqueue.Delete(updateQj) } @@ -1672,7 +1672,7 @@ func (cc *XController) updateQueueJob(oldObj, newObj interface{}) { } klog.V(6).Infof("[Informer-updateQJ] '%s/%s' *Delay=%.6f seconds normal enqueue Version=%s Status=%v", newQJ.Namespace, newQJ.Name, time.Now().Sub(newQJ.Status.ControllerFirstTimestamp.Time).Seconds(), newQJ.ResourceVersion, newQJ.Status) - //cc.eventQueue.Delete(oldObj) + // cc.eventQueue.Delete(oldObj) cc.enqueue(newQJ) } @@ -1784,7 +1784,7 @@ func (cc *XController) updateQueueJobStatus(ctx context.Context, queueJobFromAge } new_flag := queueJobFromAgent.Status.State queueJobInEtcd.Status.State = new_flag - _, err = cc.arbclients.McadV1beta1().AppWrappers(queueJobInEtcd.Namespace).Update(ctx, queueJobInEtcd, metav1.UpdateOptions{}) + _, err = cc.arbclients.WorkloadV1beta1().AppWrappers(queueJobInEtcd.Namespace).Update(ctx, queueJobInEtcd, metav1.UpdateOptions{}) if err != nil { return err } @@ -1817,8 +1817,8 @@ func (cc *XController) worker() { return nil } - //asmalvan - starts - //TODO: Should this be part of ScheduleNext() method? + // asmalvan - starts + // TODO: Should this be part of ScheduleNext() method? if queuejob.Status.State == arbv1.AppWrapperStateCompleted { return nil } @@ -1861,13 +1861,13 @@ func (cc *XController) worker() { return nil } - //scheduleNext method takes a dispatched AW which has not been evaluated, extract resources requested by AW - //compares it with available unallocated cluster resources, performs quota check - //if everything passes then CanRun is set to true and AW is ready for dispatch + // scheduleNext method takes a dispatched AW which has not been evaluated, extract resources requested by AW + // compares it with available unallocated cluster resources, performs quota check + // if everything passes then CanRun is set to true and AW is ready for dispatch if !queuejob.Status.CanRun && (queuejob.Status.State != arbv1.AppWrapperStateActive) { cc.ScheduleNext(queuejob) - //When an AW passes ScheduleNext gate then we want to progress AW to Running to begin with - //Sync queuejob will not unwrap an AW to spawn genericItems + // When an AW passes ScheduleNext gate then we want to progress AW to Running to begin with + // Sync queuejob will not unwrap an AW to spawn genericItems if queuejob.Status.CanRun { // errs := make(chan error, 1) @@ -1883,11 +1883,9 @@ func (cc *XController) worker() { // If any error, requeue it. return err } - } - } - //asmalvan- ends + // asmalvan- ends klog.V(10).Infof("[worker] Ending %s Delay=%.6f seconds &newQJ=%p Version=%s Status=%+v", queuejob.Name, time.Now().Sub(queuejob.Status.ControllerFirstTimestamp.Time).Seconds(), queuejob, queuejob.ResourceVersion, queuejob.Status) diff --git a/pkg/controller/queuejobdispatch/queuejobagent.go b/pkg/controller/queuejobdispatch/queuejobagent.go index a53067619..067b626d5 100644 --- a/pkg/controller/queuejobdispatch/queuejobagent.go +++ b/pkg/controller/queuejobdispatch/queuejobagent.go @@ -95,7 +95,7 @@ func NewJobClusterAgent(config string, agentEventQueue *cache.FIFO) *JobClusterA func(opt *metav1.ListOptions) { opt.LabelSelector = "IsDispatched=true" }, - ).Mcad().V1beta1().AppWrappers() + ).Workload().V1beta1().AppWrappers() qa.jobInformer.Informer().AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { @@ -161,7 +161,7 @@ func (qa *JobClusterAgent) Run(stopCh <-chan struct{}) { func (qa *JobClusterAgent) DeleteJob(ctx context.Context, cqj *arbv1.AppWrapper) { qj_temp := cqj.DeepCopy() klog.V(2).Infof("[Dispatcher: Agent] Request deletion of XQJ %s to Agent %s\n", qj_temp.Name, qa.AgentId) - qa.queuejobclients.McadV1beta1().AppWrappers(qj_temp.Namespace).Delete(ctx, qj_temp.Name, metav1.DeleteOptions{}) + qa.queuejobclients.WorkloadV1beta1().AppWrappers(qj_temp.Namespace).Delete(ctx, qj_temp.Name, metav1.DeleteOptions{}) } func (qa *JobClusterAgent) CreateJob(ctx context.Context, cqj *arbv1.AppWrapper) { @@ -183,7 +183,7 @@ func (qa *JobClusterAgent) CreateJob(ctx context.Context, cqj *arbv1.AppWrapper) agent_qj.Labels["IsDispatched"] = "true" klog.V(2).Infof("[Dispatcher: Agent] Create XQJ: %s (Status: %+v) in Agent %s\n", agent_qj.Name, agent_qj.Status, qa.AgentId) - qa.queuejobclients.McadV1beta1().AppWrappers(agent_qj.Namespace).Create(ctx, agent_qj, metav1.CreateOptions{}) + qa.queuejobclients.WorkloadV1beta1().AppWrappers(agent_qj.Namespace).Create(ctx, agent_qj, metav1.CreateOptions{}) } type ClusterMetricsList struct { diff --git a/pkg/controller/queuejobresources/genericresource/genericresource.go b/pkg/controller/queuejobresources/genericresource/genericresource.go index 250b6272f..3905b0d09 100644 --- a/pkg/controller/queuejobresources/genericresource/genericresource.go +++ b/pkg/controller/queuejobresources/genericresource/genericresource.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package genericresource import ( @@ -85,7 +86,7 @@ func (gr *GenericResources) Cleanup(aw *arbv1.AppWrapper, awr *arbv1.AppWrapperG name := "" namespaced := true - //todo:DELETEME dd := common.KubeClient.Discovery() + // todo:DELETEME dd := common.KubeClient.Discovery() dd := gr.clients.Discovery() apigroups, err := restmapper.GetAPIGroupResources(dd) if err != nil { @@ -106,7 +107,7 @@ func (gr *GenericResources) Cleanup(aw *arbv1.AppWrapper, awr *arbv1.AppWrapperG return name, gvk, err } - //todo:DELETEME restconfig := common.KubeConfig + // todo:DELETEME restconfig := common.KubeConfig restconfig := gr.kubeClientConfig restconfig.GroupVersion = &schema.GroupVersion{ Group: mapping.GroupVersionKind.Group, @@ -148,7 +149,7 @@ func (gr *GenericResources) Cleanup(aw *arbv1.AppWrapper, awr *arbv1.AppWrapperG return name, gvk, err } - unstruct.Object = blob.(map[string]interface{}) //set object to the content of the blob after Unmarshalling + unstruct.Object = blob.(map[string]interface{}) // set object to the content of the blob after Unmarshalling namespace := "" if md, ok := unstruct.Object["metadata"]; ok { @@ -202,7 +203,7 @@ func (gr *GenericResources) SyncQueueJob(aw *arbv1.AppWrapper, awr *arbv1.AppWra }() namespaced := true - //todo:DELETEME dd := common.KubeClient.Discovery() + // todo:DELETEME dd := common.KubeClient.Discovery() dd := gr.clients.Discovery() apigroups, err := restmapper.GetAPIGroupResources(dd) if err != nil { @@ -211,8 +212,8 @@ func (gr *GenericResources) SyncQueueJob(aw *arbv1.AppWrapper, awr *arbv1.AppWra } ext := awr.GenericTemplate restmapper := restmapper.NewDiscoveryRESTMapper(apigroups) - //versions := &unstructured.Unstructured{} - //_, gvk, err := unstructured.UnstructuredJSONScheme.Decode(ext.Raw, nil, versions) + // versions := &unstructured.Unstructured{} + // _, gvk, err := unstructured.UnstructuredJSONScheme.Decode(ext.Raw, nil, versions) _, gvk, err := unstructured.UnstructuredJSONScheme.Decode(ext.Raw, nil, nil) if err != nil { klog.Errorf("Decoding error, please check your CR! Aborting handling the resource creation, err: `%v`", err) @@ -224,7 +225,7 @@ func (gr *GenericResources) SyncQueueJob(aw *arbv1.AppWrapper, awr *arbv1.AppWra return []*v1.Pod{}, err } - //todo:DELETEME restconfig := common.KubeConfig + // todo:DELETEME restconfig := common.KubeConfig restconfig := gr.kubeClientConfig restconfig.GroupVersion = &schema.GroupVersion{ Group: mapping.GroupVersionKind.Group, @@ -270,7 +271,7 @@ func (gr *GenericResources) SyncQueueJob(aw *arbv1.AppWrapper, awr *arbv1.AppWra return []*v1.Pod{}, err } ownerRef := metav1.NewControllerRef(aw, appWrapperKind) - unstruct.Object = blob.(map[string]interface{}) //set object to the content of the blob after Unmarshalling + unstruct.Object = blob.(map[string]interface{}) // set object to the content of the blob after Unmarshalling unstruct.SetOwnerReferences(append(unstruct.GetOwnerReferences(), *ownerRef)) namespace := "default" name := "" @@ -655,7 +656,7 @@ func (gr *GenericResources) IsItemCompleted(awgr *arbv1.AppWrapperGenericResourc } for _, job := range inEtcd.Items { - //job.UnstructuredContent() has status information + // job.UnstructuredContent() has status information unstructuredObjectName := job.GetName() if unstructuredObjectName != genericItemName { continue @@ -672,8 +673,8 @@ func (gr *GenericResources) IsItemCompleted(awgr *arbv1.AppWrapperGenericResourc continue } - //check with a false status field - //check also conditions object + // check with a false status field + // check also conditions object jobMap := job.UnstructuredContent() if jobMap == nil { continue @@ -683,14 +684,14 @@ func (gr *GenericResources) IsItemCompleted(awgr *arbv1.AppWrapperGenericResourc status := job.Object["status"].(map[string]interface{}) if status["conditions"] != nil { conditions, ok := job.Object["status"].(map[string]interface{})["conditions"].([]interface{}) - //if condition not found skip for this interation + // if condition not found skip for this interation if !ok { klog.Errorf("[IsItemCompleted] Error processing of unstructured object %v in namespace %v with labels %v, err: %v", job.GetName(), job.GetNamespace(), job.GetLabels(), err) continue } for _, item := range conditions { completionType := fmt.Sprint(item.(map[string]interface{})["type"]) - //Move this to utils package? + // Move this to utils package? userSpecfiedCompletionConditions := strings.Split(awgr.CompletionStatus, ",") for _, condition := range userSpecfiedCompletionConditions { if strings.Contains(strings.ToLower(completionType), strings.ToLower(condition)) { diff --git a/pkg/controller/quota/quotaforestmanager/qm_lib_backend_with_quotasubt_mgr/quotasubtmgr/quota_subtree_manager.go b/pkg/controller/quota/quotaforestmanager/qm_lib_backend_with_quotasubt_mgr/quotasubtmgr/quota_subtree_manager.go index fb859ff2e..bb3dd5da2 100644 --- a/pkg/controller/quota/quotaforestmanager/qm_lib_backend_with_quotasubt_mgr/quotasubtmgr/quota_subtree_manager.go +++ b/pkg/controller/quota/quotaforestmanager/qm_lib_backend_with_quotasubt_mgr/quotasubtmgr/quota_subtree_manager.go @@ -73,7 +73,7 @@ func newQuotaSubtreeManager(config *rest.Config, quotaManagerBackend *qmlib.Mana qstinformers.WithTweakListOptions(func(opt *metav1.ListOptions) { opt.LabelSelector = util.URMTreeLabel })) - qstm.quotaSubtreeInformer = qstInformerFactory.Ibm().V1().QuotaSubtrees() + qstm.quotaSubtreeInformer = qstInformerFactory.Quota().V1().QuotaSubtrees() // Add event handle for resource plans qstm.quotaSubtreeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -182,7 +182,7 @@ func (qstm *QuotaSubtreeManager) createTreeNodesFromQST(qst *qstv1.QuotaSubtree) klog.V(4).Infof("[createTreeNodesFromQST] Created node: %s=%#v for QuotaSubtree %s completed.", child_key, *node, qst.Name) - //Add to the list of nodes from this quotasubtree + // Add to the list of nodes from this quotasubtree nodeSpecs[child_key] = node } diff --git a/test/e2e-kuttl-deployment-01/steps/00-assert.yaml b/test/e2e-kuttl-deployment-01/steps/00-assert.yaml index e413d9441..9439274bc 100644 --- a/test/e2e-kuttl-deployment-01/steps/00-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper diff --git a/test/e2e-kuttl-deployment-01/steps/01-assert.yaml b/test/e2e-kuttl-deployment-01/steps/01-assert.yaml index ddb71cbb5..cbf358496 100644 --- a/test/e2e-kuttl-deployment-01/steps/01-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/01-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper was dispatched and pod was created -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-01 diff --git a/test/e2e-kuttl-deployment-01/steps/01-install.yaml b/test/e2e-kuttl-deployment-01/steps/01-install.yaml index b66520159..401e07b32 100644 --- a/test/e2e-kuttl-deployment-01/steps/01-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/01-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-01 diff --git a/test/e2e-kuttl-deployment-01/steps/02-assert.yaml b/test/e2e-kuttl-deployment-01/steps/02-assert.yaml index dadd02589..f8bc8fdcd 100644 --- a/test/e2e-kuttl-deployment-01/steps/02-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/02-assert.yaml @@ -1,5 +1,5 @@ --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-job-02 diff --git a/test/e2e-kuttl-deployment-01/steps/02-install.yaml b/test/e2e-kuttl-deployment-01/steps/02-install.yaml index 75df53b35..556ad04a1 100644 --- a/test/e2e-kuttl-deployment-01/steps/02-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/02-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-job-02 diff --git a/test/e2e-kuttl-deployment-01/steps/03-assert.yaml b/test/e2e-kuttl-deployment-01/steps/03-assert.yaml index 525ef6c2e..fb5d6ba6e 100644 --- a/test/e2e-kuttl-deployment-01/steps/03-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/03-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: hold-completion-job-03 diff --git a/test/e2e-kuttl-deployment-01/steps/03-install.yaml b/test/e2e-kuttl-deployment-01/steps/03-install.yaml index 63ef37e2f..78c9566ac 100644 --- a/test/e2e-kuttl-deployment-01/steps/03-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/03-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: hold-completion-job-03 diff --git a/test/e2e-kuttl-deployment-01/steps/04-assert.yaml b/test/e2e-kuttl-deployment-01/steps/04-assert.yaml index 0473127bf..89ff22ddb 100644 --- a/test/e2e-kuttl-deployment-01/steps/04-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/04-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -15,7 +15,7 @@ status: apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev status: acceptedNames: kind: QuotaSubtree diff --git a/test/e2e-kuttl-deployment-01/steps/04-install.yaml b/test/e2e-kuttl-deployment-01/steps/04-install.yaml index de6e967f1..9659a1aae 100644 --- a/test/e2e-kuttl-deployment-01/steps/04-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/04-install.yaml @@ -7,5 +7,5 @@ unitTest: false delete: [] commands: - script: helm delete -n kube-system mcad-controller - - script: helm upgrade --install mcad-controller ${ROOT_DIR}/deployment/mcad-controller --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=ibm.com --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' + - script: helm upgrade --install mcad-controller ${ROOT_DIR}/deployment/mcad-controller --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=quota.codeflare.dev --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' diff --git a/test/e2e-kuttl-deployment-01/steps/05-assert.yaml b/test/e2e-kuttl-deployment-01/steps/05-assert.yaml index 43c634b20..e0816a7f3 100644 --- a/test/e2e-kuttl-deployment-01/steps/05-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/05-assert.yaml @@ -1,5 +1,5 @@ --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -10,7 +10,7 @@ spec: children: - name: context-root --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl-deployment-01/steps/05-install-single-quota-tree.yaml b/test/e2e-kuttl-deployment-01/steps/05-install-single-quota-tree.yaml index 2d207e5a4..6b963f86e 100644 --- a/test/e2e-kuttl-deployment-01/steps/05-install-single-quota-tree.yaml +++ b/test/e2e-kuttl-deployment-01/steps/05-install-single-quota-tree.yaml @@ -1,5 +1,5 @@ --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -14,7 +14,7 @@ spec: cpu: 1950m memory: 1500Mi --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl-deployment-01/steps/06-assert.yaml b/test/e2e-kuttl-deployment-01/steps/06-assert.yaml index 9da54bf8f..56f8e5c05 100644 --- a/test/e2e-kuttl-deployment-01/steps/06-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/06-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-job-06 diff --git a/test/e2e-kuttl-deployment-01/steps/06-install.yaml b/test/e2e-kuttl-deployment-01/steps/06-install.yaml index 2f9268645..d623c272e 100644 --- a/test/e2e-kuttl-deployment-01/steps/06-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/06-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-job-06 diff --git a/test/e2e-kuttl-deployment-01/steps/07-assert.yaml b/test/e2e-kuttl-deployment-01/steps/07-assert.yaml index d411aec2c..dc0ade421 100644 --- a/test/e2e-kuttl-deployment-01/steps/07-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/07-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-job-06 diff --git a/test/e2e-kuttl-deployment-01/steps/08-assert.yaml b/test/e2e-kuttl-deployment-01/steps/08-assert.yaml index 665133238..f736ef0d4 100644 --- a/test/e2e-kuttl-deployment-01/steps/08-assert.yaml +++ b/test/e2e-kuttl-deployment-01/steps/08-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-08 diff --git a/test/e2e-kuttl-deployment-01/steps/08-install.yaml b/test/e2e-kuttl-deployment-01/steps/08-install.yaml index 043d3e0b6..530a1e83b 100644 --- a/test/e2e-kuttl-deployment-01/steps/08-install.yaml +++ b/test/e2e-kuttl-deployment-01/steps/08-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-08 diff --git a/test/e2e-kuttl-deployment-02/steps/00-assert.yaml b/test/e2e-kuttl-deployment-02/steps/00-assert.yaml index e3e806177..b6664f723 100644 --- a/test/e2e-kuttl-deployment-02/steps/00-assert.yaml +++ b/test/e2e-kuttl-deployment-02/steps/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -19,7 +19,7 @@ metadata: name: start-up-02 --- # Verify subtree creations -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -27,7 +27,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -35,7 +35,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl-deployment-02/steps/01-assert.yaml b/test/e2e-kuttl-deployment-02/steps/01-assert.yaml index a5894385a..69102d040 100644 --- a/test/e2e-kuttl-deployment-02/steps/01-assert.yaml +++ b/test/e2e-kuttl-deployment-02/steps/01-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-01 diff --git a/test/e2e-kuttl-deployment-02/steps/01-install.yaml b/test/e2e-kuttl-deployment-02/steps/01-install.yaml index 16bb0c321..c7b6a1c40 100644 --- a/test/e2e-kuttl-deployment-02/steps/01-install.yaml +++ b/test/e2e-kuttl-deployment-02/steps/01-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-01 diff --git a/test/e2e-kuttl-deployment-02/steps/02-assert.yaml b/test/e2e-kuttl-deployment-02/steps/02-assert.yaml index 999724877..aa9a6f7e8 100644 --- a/test/e2e-kuttl-deployment-02/steps/02-assert.yaml +++ b/test/e2e-kuttl-deployment-02/steps/02-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper was dispatched and pod was created -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-02 diff --git a/test/e2e-kuttl-deployment-02/steps/02-install.yaml b/test/e2e-kuttl-deployment-02/steps/02-install.yaml index be7d6f95e..a8800f4c2 100644 --- a/test/e2e-kuttl-deployment-02/steps/02-install.yaml +++ b/test/e2e-kuttl-deployment-02/steps/02-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-02 diff --git a/test/e2e-kuttl-deployment-02/steps/04-assert.yaml b/test/e2e-kuttl-deployment-02/steps/04-assert.yaml index d7ba4063a..8005394a1 100644 --- a/test/e2e-kuttl-deployment-02/steps/04-assert.yaml +++ b/test/e2e-kuttl-deployment-02/steps/04-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: gold-quota-job-04 diff --git a/test/e2e-kuttl-deployment-02/steps/04-install.yaml b/test/e2e-kuttl-deployment-02/steps/04-install.yaml index b1b10b321..f6f464178 100644 --- a/test/e2e-kuttl-deployment-02/steps/04-install.yaml +++ b/test/e2e-kuttl-deployment-02/steps/04-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: gold-quota-job-04 diff --git a/test/e2e-kuttl-deployment-03/steps/00-assert.yaml b/test/e2e-kuttl-deployment-03/steps/00-assert.yaml index 22ceab87c..136b00361 100644 --- a/test/e2e-kuttl-deployment-03/steps/00-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -19,7 +19,7 @@ metadata: name: start-up-03 --- # Verify subtree creations -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -27,7 +27,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -35,7 +35,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl-deployment-03/steps/01-assert.yaml b/test/e2e-kuttl-deployment-03/steps/01-assert.yaml index 30eb2fee6..64710687e 100644 --- a/test/e2e-kuttl-deployment-03/steps/01-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/01-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-01 diff --git a/test/e2e-kuttl-deployment-03/steps/01-install.yaml b/test/e2e-kuttl-deployment-03/steps/01-install.yaml index 96fda72bd..faee9e33f 100644 --- a/test/e2e-kuttl-deployment-03/steps/01-install.yaml +++ b/test/e2e-kuttl-deployment-03/steps/01-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-quota-job-01 diff --git a/test/e2e-kuttl-deployment-03/steps/02-assert.yaml b/test/e2e-kuttl-deployment-03/steps/02-assert.yaml index 063b2c32a..8551bdf61 100644 --- a/test/e2e-kuttl-deployment-03/steps/02-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/02-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper was dispatched and pod was created -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-02 diff --git a/test/e2e-kuttl-deployment-03/steps/02-install.yaml b/test/e2e-kuttl-deployment-03/steps/02-install.yaml index ce187860c..d987b1cc9 100644 --- a/test/e2e-kuttl-deployment-03/steps/02-install.yaml +++ b/test/e2e-kuttl-deployment-03/steps/02-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: no-quota-deployment-02 diff --git a/test/e2e-kuttl-deployment-03/steps/03-assert.yaml b/test/e2e-kuttl-deployment-03/steps/03-assert.yaml index 33ab6ebb2..47f7e8f6b 100644 --- a/test/e2e-kuttl-deployment-03/steps/03-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/03-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: actinides-root @@ -9,7 +9,7 @@ spec: children: - name: actinides-root --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: actinides-children @@ -22,7 +22,7 @@ spec: - name: plutonium - name: lawrencium --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -30,7 +30,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -38,7 +38,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl-deployment-03/steps/03-install-new-quota-node.yaml b/test/e2e-kuttl-deployment-03/steps/03-install-new-quota-node.yaml index 8848c856d..eac904314 100644 --- a/test/e2e-kuttl-deployment-03/steps/03-install-new-quota-node.yaml +++ b/test/e2e-kuttl-deployment-03/steps/03-install-new-quota-node.yaml @@ -1,4 +1,4 @@ -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: actinides-root @@ -13,7 +13,7 @@ spec: cpu: 1075m memory: 1045Mi --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: actinides-children diff --git a/test/e2e-kuttl-deployment-03/steps/04-assert.yaml b/test/e2e-kuttl-deployment-03/steps/04-assert.yaml index 231b67ab2..13259f3a0 100644 --- a/test/e2e-kuttl-deployment-03/steps/04-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/04-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: gold-quota-job-04 diff --git a/test/e2e-kuttl-deployment-03/steps/04-install.yaml b/test/e2e-kuttl-deployment-03/steps/04-install.yaml index d48fcdfe8..aa99097ea 100644 --- a/test/e2e-kuttl-deployment-03/steps/04-install.yaml +++ b/test/e2e-kuttl-deployment-03/steps/04-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: gold-quota-job-04 diff --git a/test/e2e-kuttl-deployment-03/steps/05-assert.yaml b/test/e2e-kuttl-deployment-03/steps/05-assert.yaml index aed7d1eb6..18393a449 100644 --- a/test/e2e-kuttl-deployment-03/steps/05-assert.yaml +++ b/test/e2e-kuttl-deployment-03/steps/05-assert.yaml @@ -1,5 +1,5 @@ #Verify AppWrappers finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: plutonium-quota-job-05 diff --git a/test/e2e-kuttl-deployment-03/steps/05-install.yaml b/test/e2e-kuttl-deployment-03/steps/05-install.yaml index 90d5b8737..a6c83b01e 100644 --- a/test/e2e-kuttl-deployment-03/steps/05-install.yaml +++ b/test/e2e-kuttl-deployment-03/steps/05-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: plutonium-quota-job-05 diff --git a/test/e2e-kuttl/install-quota-subtree.yaml b/test/e2e-kuttl/install-quota-subtree.yaml index 236f6d009..1fc73cd8c 100644 --- a/test/e2e-kuttl/install-quota-subtree.yaml +++ b/test/e2e-kuttl/install-quota-subtree.yaml @@ -1,5 +1,5 @@ --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -15,7 +15,7 @@ spec: memory: 1045Mi nvidia.com/gpu: 16 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -31,7 +31,7 @@ spec: memory: 1045Mi nvidia.com/gpu: 16 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children @@ -70,7 +70,7 @@ spec: memory: 0Mi nvidia.com/gpu: 0 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root-children diff --git a/test/e2e-kuttl/quota-errors/00-assert.yaml b/test/e2e-kuttl/quota-errors/00-assert.yaml index 853505926..596b14e54 100644 --- a/test/e2e-kuttl/quota-errors/00-assert.yaml +++ b/test/e2e-kuttl/quota-errors/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -15,7 +15,7 @@ status: apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev status: acceptedNames: kind: QuotaSubtree diff --git a/test/e2e-kuttl/quota-errors/01-assert.yaml b/test/e2e-kuttl/quota-errors/01-assert.yaml index 13ef4b424..2fea2b95c 100644 --- a/test/e2e-kuttl/quota-errors/01-assert.yaml +++ b/test/e2e-kuttl/quota-errors/01-assert.yaml @@ -1,5 +1,5 @@ # Verify subtree creations -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -7,7 +7,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -15,7 +15,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children diff --git a/test/e2e-kuttl/quota-errors/03-assert.yaml b/test/e2e-kuttl/quota-errors/03-assert.yaml index c01c7801a..629127c4b 100644 --- a/test/e2e-kuttl/quota-errors/03-assert.yaml +++ b/test/e2e-kuttl/quota-errors/03-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper was dispatched and pod was created -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: deployment-silver-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-errors/03-install.yaml b/test/e2e-kuttl/quota-errors/03-install.yaml index 867aa1d12..bd10b3414 100644 --- a/test/e2e-kuttl/quota-errors/03-install.yaml +++ b/test/e2e-kuttl/quota-errors/03-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: deployment-silver-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/00-assert.yaml b/test/e2e-kuttl/quota-forest/00-assert.yaml index 853505926..596b14e54 100644 --- a/test/e2e-kuttl/quota-forest/00-assert.yaml +++ b/test/e2e-kuttl/quota-forest/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -15,7 +15,7 @@ status: apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev status: acceptedNames: kind: QuotaSubtree diff --git a/test/e2e-kuttl/quota-forest/01-assert.yaml b/test/e2e-kuttl/quota-forest/01-assert.yaml index 175462a90..8af089b8c 100644 --- a/test/e2e-kuttl/quota-forest/01-assert.yaml +++ b/test/e2e-kuttl/quota-forest/01-assert.yaml @@ -1,5 +1,5 @@ # Verify subtree creations -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -7,7 +7,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -15,7 +15,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children @@ -23,7 +23,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root-children diff --git a/test/e2e-kuttl/quota-forest/04-assert.yaml b/test/e2e-kuttl/quota-forest/04-assert.yaml index 9827e9c03..139ad28b3 100644 --- a/test/e2e-kuttl/quota-forest/04-assert.yaml +++ b/test/e2e-kuttl/quota-forest/04-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper was dispatched and pod was created -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/04-install.yaml b/test/e2e-kuttl/quota-forest/04-install.yaml index 882e7c4db..0869d8381 100644 --- a/test/e2e-kuttl/quota-forest/04-install.yaml +++ b/test/e2e-kuttl/quota-forest/04-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/05-assert.yaml b/test/e2e-kuttl/quota-forest/05-assert.yaml index df07213d2..49e92a207 100644 --- a/test/e2e-kuttl/quota-forest/05-assert.yaml +++ b/test/e2e-kuttl/quota-forest/05-assert.yaml @@ -1,5 +1,5 @@ # Verify that quota management is enabled by checking the queuing is happening (e.g. no available quota for quota ids provided in AW) -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-bad-quota-id-10replica diff --git a/test/e2e-kuttl/quota-forest/05-install.yaml b/test/e2e-kuttl/quota-forest/05-install.yaml index 6cc9cbba9..39ca0dbfa 100644 --- a/test/e2e-kuttl/quota-forest/05-install.yaml +++ b/test/e2e-kuttl/quota-forest/05-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-bad-quota-id-10replica diff --git a/test/e2e-kuttl/quota-forest/06-assert.yaml b/test/e2e-kuttl/quota-forest/06-assert.yaml index 091c2b24b..056eca716 100644 --- a/test/e2e-kuttl/quota-forest/06-assert.yaml +++ b/test/e2e-kuttl/quota-forest/06-assert.yaml @@ -1,5 +1,5 @@ # Verify that quota management preempted lower priority job -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-high-pri-1replica @@ -16,7 +16,7 @@ metadata: name: job-gold-high-pri-1replica-0 namespace: test --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-bad-quota-id-10replica @@ -27,7 +27,7 @@ metadata: status: state: Pending --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/06-install.yaml b/test/e2e-kuttl/quota-forest/06-install.yaml index c42f6bf9c..000771d0c 100644 --- a/test/e2e-kuttl/quota-forest/06-install.yaml +++ b/test/e2e-kuttl/quota-forest/06-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-high-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/07-assert.yaml b/test/e2e-kuttl/quota-forest/07-assert.yaml index 3f91e0b11..72d47c0ba 100644 --- a/test/e2e-kuttl/quota-forest/07-assert.yaml +++ b/test/e2e-kuttl/quota-forest/07-assert.yaml @@ -1,5 +1,5 @@ # Verify that quota management is enabled using different quota ids -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-1-bronze-1replica @@ -16,7 +16,7 @@ metadata: name: job-1-bronze-1replica-0 namespace: test --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-high-pri-1replica @@ -33,7 +33,7 @@ metadata: name: job-gold-high-pri-1replica-0 namespace: test --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-bad-quota-id-10replica @@ -44,7 +44,7 @@ metadata: status: state: Pending --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/07-install.yaml b/test/e2e-kuttl/quota-forest/07-install.yaml index 702177491..275069ddd 100644 --- a/test/e2e-kuttl/quota-forest/07-install.yaml +++ b/test/e2e-kuttl/quota-forest/07-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-1-bronze-1replica diff --git a/test/e2e-kuttl/quota-forest/08-assert.yaml b/test/e2e-kuttl/quota-forest/08-assert.yaml index 4a3dff17a..c4399f3b9 100644 --- a/test/e2e-kuttl/quota-forest/08-assert.yaml +++ b/test/e2e-kuttl/quota-forest/08-assert.yaml @@ -1,5 +1,5 @@ # Verify that quota management hard limit -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-2-bronze-1replica @@ -10,7 +10,7 @@ metadata: status: state: Pending --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-1-bronze-1replica @@ -27,7 +27,7 @@ metadata: name: job-1-bronze-1replica-0 namespace: test --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-high-pri-1replica @@ -44,7 +44,7 @@ metadata: name: job-gold-high-pri-1replica-0 namespace: test --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-bad-quota-id-10replica @@ -55,7 +55,7 @@ metadata: status: state: Pending --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-gold-lo-pri-1replica diff --git a/test/e2e-kuttl/quota-forest/08-install.yaml b/test/e2e-kuttl/quota-forest/08-install.yaml index 6eb041280..2747d8c5f 100644 --- a/test/e2e-kuttl/quota-forest/08-install.yaml +++ b/test/e2e-kuttl/quota-forest/08-install.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-2-bronze-1replica diff --git a/test/e2e-kuttl/quota-forest/09-assert.yaml b/test/e2e-kuttl/quota-forest/09-assert.yaml index d7b5983db..84a9a1134 100644 --- a/test/e2e-kuttl/quota-forest/09-assert.yaml +++ b/test/e2e-kuttl/quota-forest/09-assert.yaml @@ -1,6 +1,6 @@ # Verify AppWrappers finished successfully --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-1 @@ -8,7 +8,7 @@ metadata: status: state: Completed --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-2 diff --git a/test/e2e-kuttl/quota-forest/09-install.yaml b/test/e2e-kuttl/quota-forest/09-install.yaml index b2ad30a57..a7579ef34 100644 --- a/test/e2e-kuttl/quota-forest/09-install.yaml +++ b/test/e2e-kuttl/quota-forest/09-install.yaml @@ -4,7 +4,7 @@ kind: TestStep commands: - command: kubectl -n test delete appwrappers,jobs --all --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-1 @@ -68,7 +68,7 @@ spec: nvidia.com/gpu: 0 memory: 300Mi --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-2 diff --git a/test/e2e-kuttl/quota-forest/10-assert.yaml b/test/e2e-kuttl/quota-forest/10-assert.yaml index df4e36ecd..d18be6c21 100644 --- a/test/e2e-kuttl/quota-forest/10-assert.yaml +++ b/test/e2e-kuttl/quota-forest/10-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper without quota finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-without-labels diff --git a/test/e2e-kuttl/quota-forest/10-install.yaml b/test/e2e-kuttl/quota-forest/10-install.yaml index 39b07eb00..17d8e7264 100644 --- a/test/e2e-kuttl/quota-forest/10-install.yaml +++ b/test/e2e-kuttl/quota-forest/10-install.yaml @@ -1,5 +1,5 @@ --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-without-labels diff --git a/test/e2e-kuttl/quota-forest/11-assert.yaml b/test/e2e-kuttl/quota-forest/11-assert.yaml index cba3bc8df..b8d04171a 100644 --- a/test/e2e-kuttl/quota-forest/11-assert.yaml +++ b/test/e2e-kuttl/quota-forest/11-assert.yaml @@ -1,5 +1,5 @@ # Verify AppWrapper without quota finished successfully -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-with-labels-no-quota diff --git a/test/e2e-kuttl/quota-forest/11-install.yaml b/test/e2e-kuttl/quota-forest/11-install.yaml index a58052760..8e7b04fdd 100644 --- a/test/e2e-kuttl/quota-forest/11-install.yaml +++ b/test/e2e-kuttl/quota-forest/11-install.yaml @@ -1,5 +1,5 @@ --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: job-with-labels-no-quota diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index e23a7e321..c6a65dc92 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1,19 +1,4 @@ /* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package e2e import ( diff --git a/test/e2e/job.go b/test/e2e/job.go deleted file mode 100644 index f07ec6116..000000000 --- a/test/e2e/job.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* -Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package e2e - -import ( - . "github.com/onsi/ginkgo" -) - -var _ = Describe("AppWrapper E2E Test", func() { -/* - It("Create AppWrapper - Pod Only", func() { - context := initTestContext() - defer cleanupTestContext(context) - rep := clusterSize(context, oneCPU) - - _, aw := createJobEx(context, &jobSpec{ - name: "qj-1", - tasks: []taskSpec{ - { - img: "busybox", - req: oneCPU, - min: 2, - rep: rep, - }, - }, - }) - - err := waitAWPodsReady(context, aw) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Schedule Multiple Jobs", func() { - context := initTestContext() - defer cleanupTestContext(context) - - rep := clusterSize(context, oneCPU) - - job := &jobSpec{ - tasks: []taskSpec{ - { - img: "busybox", - req: oneCPU, - min: 2, - rep: rep, - }, - }, - } - - job.name = "mqj-1" - _, pg1 := createJobEx(context, job) - job.name = "mqj-2" - _, pg2 := createJobEx(context, job) - job.name = "mqj-3" - _, pg3 := createJobEx(context, job) - - err := waitPodGroupReady(context, pg1) - Expect(err).NotTo(HaveOccurred()) - - err = waitPodGroupReady(context, pg2) - Expect(err).NotTo(HaveOccurred()) - - err = waitPodGroupReady(context, pg3) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Gang scheduling", func() { - context := initTestContext() - defer cleanupTestContext(context) - rep := clusterSize(context, oneCPU)/2 + 1 - - replicaset := createReplicaSet(context, "rs-1", rep, "nginx", oneCPU) - err := waitReplicaSetReady(context, replicaset.Name) - Expect(err).NotTo(HaveOccurred()) - - job := &jobSpec{ - name: "gang-qj", - namespace: "test", - tasks: []taskSpec{ - { - img: "busybox", - req: oneCPU, - min: rep, - rep: rep, - }, - }, - } - - _, pg := createJobEx(context, job) - err = waitPodGroupPending(context, pg) - Expect(err).NotTo(HaveOccurred()) - - waitPodGroupUnschedulable(context, pg) - Expect(err).NotTo(HaveOccurred()) - - err = deleteReplicaSet(context, replicaset.Name) - Expect(err).NotTo(HaveOccurred()) - - err = waitPodGroupReady(context, pg) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Gang scheduling: Full Occupied", func() { - context := initTestContext() - defer cleanupTestContext(context) - rep := clusterSize(context, oneCPU) - - job := &jobSpec{ - namespace: "test", - tasks: []taskSpec{ - { - img: "nginx", - req: oneCPU, - min: rep, - rep: rep, - }, - }, - } - - job.name = "gang-fq-qj1" - _, pg1 := createJobEx(context, job) - err := waitPodGroupReady(context, pg1) - Expect(err).NotTo(HaveOccurred()) - - job.name = "gang-fq-qj2" - _, pg2 := createJobEx(context, job) - err = waitPodGroupPending(context, pg2) - Expect(err).NotTo(HaveOccurred()) - - err = waitPodGroupReady(context, pg1) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Preemption", func() { - context := initTestContext() - defer cleanupTestContext(context) - - slot := oneCPU - rep := clusterSize(context, slot) - - job := &jobSpec{ - tasks: []taskSpec{ - { - img: "nginx", - req: slot, - min: 1, - rep: rep, - }, - }, - } - - job.name = "preemptee-qj" - _, pg1 := createJobEx(context, job) - err := waitTasksReadyEx(context, pg1, int(rep)) - Expect(err).NotTo(HaveOccurred()) - - job.name = "preemptor-qj" - _, pg2 := createJobEx(context, job) - err = waitTasksReadyEx(context, pg1, int(rep)/2) - Expect(err).NotTo(HaveOccurred()) - - err = waitTasksReadyEx(context, pg2, int(rep)/2) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Multiple Preemption", func() { - context := initTestContext() - defer cleanupTestContext(context) - - slot := oneCPU - rep := clusterSize(context, slot) - - job := &jobSpec{ - tasks: []taskSpec{ - { - img: "nginx", - req: slot, - min: 1, - rep: rep, - }, - }, - } - - job.name = "preemptee-qj" - _, pg1 := createJobEx(context, job) - err := waitTasksReadyEx(context, pg1, int(rep)) - Expect(err).NotTo(HaveOccurred()) - - job.name = "preemptor-qj1" - _, pg2 := createJobEx(context, job) - Expect(err).NotTo(HaveOccurred()) - - job.name = "preemptor-qj2" - _, pg3 := createJobEx(context, job) - Expect(err).NotTo(HaveOccurred()) - - err = waitTasksReadyEx(context, pg1, int(rep)/3) - Expect(err).NotTo(HaveOccurred()) - - err = waitTasksReadyEx(context, pg2, int(rep)/3) - Expect(err).NotTo(HaveOccurred()) - - err = waitTasksReadyEx(context, pg3, int(rep)/3) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Schedule BestEffort Job", func() { - context := initTestContext() - defer cleanupTestContext(context) - - slot := oneCPU - rep := clusterSize(context, slot) - - job := &jobSpec{ - name: "test", - tasks: []taskSpec{ - { - img: "nginx", - req: slot, - min: 2, - rep: rep, - }, - { - img: "nginx", - min: 2, - rep: rep / 2, - }, - }, - } - - _, pg := createJobEx(context, job) - - err := waitPodGroupReady(context, pg) - Expect(err).NotTo(HaveOccurred()) - }) -*/ -}) diff --git a/test/e2e/predicates.go b/test/e2e/predicates.go deleted file mode 100644 index 4e26813e3..000000000 --- a/test/e2e/predicates.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* -Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package e2e - -import ( - . "github.com/onsi/ginkgo" - // "k8s.io/kubernetes/pkg/scheduler/algorithm" -) - -var _ = Describe("Predicates E2E Test", func() { -/* - It("NodeAffinity", func() { - context := initTestContext() - defer cleanupTestContext(context) - - slot := oneCPU - nodeName, rep := computeNode(context, oneCPU) - Expect(rep).NotTo(Equal(0)) - - affinity := &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchFields: []v1.NodeSelectorRequirement{ - { - Key: algorithm.NodeFieldSelectorKeyNodeName, - Operator: v1.NodeSelectorOpIn, - Values: []string{nodeName}, - }, - }, - }, - }, - }, - }, - } - - job := &jobSpec{ - name: "na-job", - tasks: []taskSpec{ - { - img: "nginx", - req: slot, - min: 1, - rep: 1, - affinity: affinity, - }, - }, - } - - _, pg := createJobEx(context, job) - err := waitPodGroupReady(context, pg) - Expect(err).NotTo(HaveOccurred()) - - pods := getPodOfPodGroup(context, pg) - for _, pod := range pods { - Expect(pod.Spec.NodeName).To(Equal(nodeName)) - } - }) - - It("Hostport", func() { - context := initTestContext() - defer cleanupTestContext(context) - - nn := clusterNodeNumber(context) - - job := &jobSpec{ - name: "hp-job", - tasks: []taskSpec{ - { - img: "nginx", - min: int32(nn), - req: oneCPU, - rep: int32(nn * 2), - hostport: 28080, - }, - }, - } - - _, pg := createJobEx(context, job) - - err := waitTasksReadyEx(context, pg, nn) - Expect(err).NotTo(HaveOccurred()) - - err = waitTasksPendingEx(context, pg, nn) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Pod Affinity", func() { - context := initTestContext() - defer cleanupTestContext(context) - - slot := oneCPU - _, rep := computeNode(context, oneCPU) - Expect(rep).NotTo(Equal(0)) - - labels := map[string]string{"foo": "bar"} - - affinity := &v1.Affinity{ - PodAffinity: &v1.PodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - } - - job := &jobSpec{ - name: "pa-job", - tasks: []taskSpec{ - { - img: "nginx", - req: slot, - min: rep, - rep: rep, - affinity: affinity, - labels: labels, - }, - }, - } - - _, aw := createJobEx(context, job) - err := waitAWPodsReady(context, aw) - Expect(err).NotTo(HaveOccurred()) - - pods := getPodOfAppWrapper(context, aw) - // All pods should be scheduled to the same node. - nodeName := pods[0].Spec.NodeName - for _, pod := range pods { - Expect(pod.Spec.NodeName).To(Equal(nodeName)) - } - }) - - It("Taints/Tolerations", func() { - context := initTestContext() - defer cleanupTestContext(context) - - taints := []v1.Taint{ - { - Key: "test-taint-key", - Value: "test-taint-val", - Effect: v1.TaintEffectNoSchedule, - }, - } - - err := taintAllNodes(context, taints) - Expect(err).NotTo(HaveOccurred()) - - job := &jobSpec{ - name: "tt-job", - tasks: []taskSpec{ - { - img: "nginx", - req: oneCPU, - min: 1, - rep: 1, - }, - }, - } - - _, aw := createJobEx(context, job) - err = waitAWPending(context, aw) - Expect(err).NotTo(HaveOccurred()) - - err = removeTaintsFromAllNodes(context, taints) - Expect(err).NotTo(HaveOccurred()) - - err = waitAWPodsReady(context, aw) - Expect(err).NotTo(HaveOccurred()) - }) -*/ -}) diff --git a/test/e2e/queue.go b/test/e2e/queue.go index 182471883..2809f7219 100644 --- a/test/e2e/queue.go +++ b/test/e2e/queue.go @@ -1,21 +1,5 @@ //go:build !private -// +build !private -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ /* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. @@ -31,6 +15,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package e2e import ( @@ -127,7 +112,7 @@ var _ = Describe("AppWrapper E2E Test", func() { aw2 := createDeploymentAWwith426CPU(context, appendRandomString("aw-deployment-2-426cpu")) appwrappers = append(appwrappers, aw2) err = waitAWAnyPodsExists(context, aw2) - //With improved accounting, no pods will be spawned + // With improved accounting, no pods will be spawned Expect(err).To(HaveOccurred()) // This should fit on cluster, initially queued because of aw2 above but should eventually @@ -237,7 +222,7 @@ var _ = Describe("AppWrapper E2E Test", func() { Expect(err).NotTo(HaveOccurred()) }) - //NOTE: Recommend this test not to be the last test in the test suite it may pass + // NOTE: Recommend this test not to be the last test in the test suite it may pass // the local test but may cause controller to fail which is not // part of this test's validation. @@ -297,7 +282,7 @@ var _ = Describe("AppWrapper E2E Test", func() { Expect(err).NotTo(HaveOccurred()) pass := false for true { - aw1, err := context.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) + aw1, err := context.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) if err != nil { fmt.Fprint(GinkgoWriter, "Error getting status") } @@ -373,7 +358,7 @@ var _ = Describe("AppWrapper E2E Test", func() { // This test is flawed, the namespace created by this appwrapper is not cleaned up. // FIXME https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/471 // Leaving it here so that the builds no longer fail - //TODO: Below two tests are turned off, please refer to github issue here: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/598 + // TODO: Below two tests are turned off, please refer to github issue here: https://github.com/project-codeflare/multi-cluster-app-dispatcher/issues/598 // It("Create AppWrapper - Namespace Only - 0 Pods", func() { // fmt.Fprintf(os.Stdout, "[e2e] Create AppWrapper - Namespace Only - 0 Pods - Started.\n") // context := initTestContext() @@ -471,7 +456,7 @@ var _ = Describe("AppWrapper E2E Test", func() { // Make sure pods from AW aw-deployment-1-850-cpu have preempted var pass = false for true { - aw2Update, err := context.karclient.McadV1beta1().AppWrappers(aw2.Namespace).Get(context.ctx, aw2.Name, metav1.GetOptions{}) + aw2Update, err := context.karclient.WorkloadV1beta1().AppWrappers(aw2.Namespace).Get(context.ctx, aw2.Name, metav1.GetOptions{}) if err != nil { fmt.Fprintf(GinkgoWriter, "[e2e] MCAD Scheduling Fail Fast Preemption Test - Error getting AW update %v", err) } @@ -535,8 +520,8 @@ var _ = Describe("AppWrapper E2E Test", func() { Expect(err).NotTo(HaveOccurred(), "Waiting for pods to be ready for app wrapper: aw-deployment-2-550cpu") // This should fit on cluster but customPodResources is incorrect so AW pods are not created - //NOTE: with deployment controlled removed this test case is invalid. - //Users should keep custompodresources equal to container resources. + // NOTE: with deployment controlled removed this test case is invalid. + // Users should keep custompodresources equal to container resources. aw2 := createGenericDeploymentCustomPodResourcesWithCPUAW( context, appendRandomString("aw-deployment-2-427-vs-425-cpu"), "4270m", "425m", 2, 60) @@ -559,11 +544,11 @@ var _ = Describe("AppWrapper E2E Test", func() { Expect(err1).NotTo(HaveOccurred(), "Expecting pods to be ready for app wrapper: aw-test-jobtimeout-with-comp-1") var aw1 *arbv1.AppWrapper var err error - aw1, err = context.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) + aw1, err = context.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "Expecting no error when getting app wrapper status") fmt.Fprintf(GinkgoWriter, "[e2e] status of app wrapper: %v.\n", aw1.Status) for aw1.Status.State != arbv1.AppWrapperStateFailed { - aw1, err = context.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) + aw1, err = context.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(context.ctx, aw.Name, metav1.GetOptions{}) if aw.Status.State == arbv1.AppWrapperStateFailed { break } @@ -654,7 +639,6 @@ var _ = Describe("AppWrapper E2E Test", func() { Eventually(AppWrapper(context, aw.Namespace, aw.Name), 2*time.Minute).Should(WithTransform(AppWrapperState, Equal(arbv1.AppWrapperStateEnqueued))) appwrappers = append(appwrappers, aw) fmt.Fprintf(os.Stdout, "[e2e] MCAD Job Large Compute Requirement Test - Completed.\n") - }) It("MCAD CPU Accounting Queuing Test", func() { @@ -707,7 +691,6 @@ var _ = Describe("AppWrapper E2E Test", func() { Eventually(AppWrapper(context, aw.Namespace, aw.Name), 2*time.Minute).Should(WithTransform(AppWrapperState, Equal(arbv1.AppWrapperStateActive))) appwrappers = append(appwrappers, aw) fmt.Fprintf(os.Stdout, "[e2e] MCAD Service no RuningHoldCompletion or Complete Test - Completed.\n") - }) It("Create AppWrapper - Generic 50 Deployment Only - 2 pods each", func() { diff --git a/test/e2e/quota.go b/test/e2e/quota.go index c9d585be0..a0678549d 100644 --- a/test/e2e/quota.go +++ b/test/e2e/quota.go @@ -1,20 +1,6 @@ +//go:build private // +build private -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ /* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. @@ -30,12 +16,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package e2e import ( - arbv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + + arbv1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" ) var _ = Describe("Quota E2E Test", func() { @@ -53,7 +41,6 @@ var _ = Describe("Quota E2E Test", func() { Expect(err).NotTo(HaveOccurred()) }) - It("Create AppWrapper - Generic Pod Only - Insufficient Quota 1 Tree", func() { context := initTestContext() var appwrappers []*arbv1.AppWrapper @@ -67,5 +54,4 @@ var _ = Describe("Quota E2E Test", func() { Expect(err).To(HaveOccurred()) }) - }) diff --git a/test/e2e/util.go b/test/e2e/util.go index 8d15ada9f..363a208e8 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package e2e import ( @@ -248,7 +249,7 @@ func createGenericAWTimeoutWithStatus(context *context, name string) *arbv1.AppW }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -390,7 +391,7 @@ func awPodPhase(ctx *context, aw *arbv1.AppWrapper, phase []v1.PodPhase, taskNum return func() (bool, error) { defer GinkgoRecover() - aw, err := ctx.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) + aw, err := ctx.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) podList, err := ctx.kubeclient.CoreV1().Pods(aw.Namespace).List(gcontext.Background(), metav1.ListOptions{}) @@ -459,7 +460,7 @@ func waitAWNamespaceActive(ctx *context, aw *arbv1.AppWrapper) error { func awNamespacePhase(ctx *context, aw *arbv1.AppWrapper, phase []v1.NamespacePhase) wait.ConditionFunc { return func() (bool, error) { - aw, err := ctx.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) + aw, err := ctx.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) namespaces, err := ctx.kubeclient.CoreV1().Namespaces().List(gcontext.Background(), metav1.ListOptions{}) @@ -639,7 +640,7 @@ func createJobAWWithInitContainer(context *context, name string, requeuingTimeIn }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -710,7 +711,7 @@ func createDeploymentAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -786,7 +787,7 @@ func createDeploymentAWwith550CPU(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -862,7 +863,7 @@ func createDeploymentAWwith350CPU(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -938,7 +939,7 @@ func createDeploymentAWwith426CPU(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1014,7 +1015,7 @@ func createDeploymentAWwith425CPU(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1086,7 +1087,7 @@ func createGenericDeploymentAW(context *context, name string) *arbv1.AppWrapper }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1165,7 +1166,7 @@ func createGenericJobAWWithStatus(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1295,7 +1296,7 @@ func createGenericJobAWWithMultipleStatus(context *context, name string) *arbv1. }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1339,7 +1340,7 @@ func createAWGenericItemWithoutStatus(context *context, name string) *arbv1.AppW }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1417,7 +1418,7 @@ func createGenericJobAWWithScheduleSpec(context *context, name string) *arbv1.Ap }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1498,7 +1499,7 @@ func createGenericJobAWtWithLargeCompute(context *context, name string) *arbv1.A }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1569,7 +1570,7 @@ func createGenericServiceAWWithNoStatus(context *context, name string) *arbv1.Ap }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1701,7 +1702,7 @@ func createGenericDeploymentAWWithMultipleItems(context *context, name string) * }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1779,7 +1780,7 @@ func createGenericDeploymentWithCPUAW(context *context, name string, cpuDemand s }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1866,7 +1867,7 @@ func createGenericDeploymentCustomPodResourcesWithCPUAW(context *context, name s }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1904,7 +1905,7 @@ func createNamespaceAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -1938,7 +1939,7 @@ func createGenericNamespaceAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2010,7 +2011,7 @@ func createStatefulSetAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2081,7 +2082,7 @@ func createGenericStatefulSetAW(context *context, name string) *arbv1.AppWrapper }, }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2141,7 +2142,7 @@ func createBadPodTemplateAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2223,7 +2224,7 @@ func createPodTemplateAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2287,7 +2288,7 @@ func createPodCheckFailedStatusAW(context *context, name string) *arbv1.AppWrapp }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2358,7 +2359,7 @@ func createGenericPodAWCustomDemand(context *context, name string, cpuDemand str }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2428,7 +2429,7 @@ func createGenericPodAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2500,7 +2501,7 @@ func createGenericPodTooBigAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2555,7 +2556,7 @@ func createBadGenericPodAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2586,7 +2587,7 @@ func createBadGenericItemAW(context *context, name string) *arbv1.AppWrapper { }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) return appwrapper @@ -2648,20 +2649,20 @@ func createBadGenericPodTemplateAW(context *context, name string) (*arbv1.AppWra }, } - appwrapper, err := context.karclient.McadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) + appwrapper, err := context.karclient.WorkloadV1beta1().AppWrappers(context.namespace).Create(context.ctx, aw, metav1.CreateOptions{}) Expect(err).To(HaveOccurred()) return appwrapper, err } func deleteAppWrapper(ctx *context, name string) error { foreground := metav1.DeletePropagationForeground - return ctx.karclient.McadV1beta1().AppWrappers(ctx.namespace).Delete(ctx.ctx, name, metav1.DeleteOptions{ + return ctx.karclient.WorkloadV1beta1().AppWrappers(ctx.namespace).Delete(ctx.ctx, name, metav1.DeleteOptions{ PropagationPolicy: &foreground, }) } func getPodsOfAppWrapper(ctx *context, aw *arbv1.AppWrapper) []*v1.Pod { - aw, err := ctx.karclient.McadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) + aw, err := ctx.karclient.WorkloadV1beta1().AppWrappers(aw.Namespace).Get(ctx.ctx, aw.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pods, err := ctx.kubeclient.CoreV1().Pods(aw.Namespace).List(gcontext.Background(), metav1.ListOptions{}) @@ -2695,7 +2696,7 @@ func appendRandomString(value string) string { func AppWrapper(context *context, namespace string, name string) func(g gomega.Gomega) *arbv1.AppWrapper { return func(g gomega.Gomega) *arbv1.AppWrapper { - aw, err := context.karclient.McadV1beta1().AppWrappers(namespace).Get(context.ctx, name, metav1.GetOptions{}) + aw, err := context.karclient.WorkloadV1beta1().AppWrappers(namespace).Get(context.ctx, name, metav1.GetOptions{}) g.Expect(err).NotTo(gomega.HaveOccurred()) return aw } diff --git a/test/kuttl-test-deployment-02.yaml b/test/kuttl-test-deployment-02.yaml index 88ed90b07..1e55e68da 100644 --- a/test/kuttl-test-deployment-02.yaml +++ b/test/kuttl-test-deployment-02.yaml @@ -5,6 +5,6 @@ testDirs: timeout: 300 artifactsDir: _output/logs commands: - - script: helm upgrade --install mcad-controller ${ROOT_DIR}/deployment/mcad-controller --namespace kube-system --wait --timeout 2m0s --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=ibm.com --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' + - script: helm upgrade --install mcad-controller ${ROOT_DIR}/deployment/mcad-controller --namespace kube-system --wait --timeout 2m0s --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=quota.codeflare.dev --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' - script: kubectl apply -f ${ROOT_DIR}/test/e2e-kuttl/install-quota-subtree.yaml \ No newline at end of file diff --git a/test/kuttl-test-deployment-03.yaml b/test/kuttl-test-deployment-03.yaml index ff9485ec6..3d8a658f8 100644 --- a/test/kuttl-test-deployment-03.yaml +++ b/test/kuttl-test-deployment-03.yaml @@ -7,4 +7,4 @@ crdDir: config/crd/bases artifactsDir: _output/logs commands: - script: kubectl apply -f ${ROOT_DIR}/test/e2e-kuttl/install-quota-subtree.yaml - - script: helm upgrade --install mcad-controller deployment/mcad-controller --skip-crds --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=ibm.com --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' + - script: helm upgrade --install mcad-controller deployment/mcad-controller --skip-crds --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=quota.codeflare.dev --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' diff --git a/test/kuttl-test.yaml b/test/kuttl-test.yaml index 98ce0fdf3..48cf28d63 100644 --- a/test/kuttl-test.yaml +++ b/test/kuttl-test.yaml @@ -5,6 +5,6 @@ testDirs: timeout: 420 artifactsDir: _output/logs commands: - - script: helm upgrade --install mcad-controller deployment/mcad-controller --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=ibm.com --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' + - script: helm upgrade --install mcad-controller deployment/mcad-controller --namespace kube-system --wait --set loglevel=${LOG_LEVEL} --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY --set configMap.quotaEnabled='"true"' --set quotaManagement.rbac.apiGroup=quota.codeflare.dev --set quotaManagement.rbac.resource=quotasubtrees --set configMap.name=mcad-controller-configmap --set configMap.preemptionEnabled='"true"' - script: kubectl apply -f ${ROOT_DIR}/test/e2e-kuttl/install-quota-subtree.yaml \ No newline at end of file diff --git a/test/perf-test/preempt-exp-kwok.yaml b/test/perf-test/preempt-exp-kwok.yaml index 8f7729fc4..0bab8c857 100644 --- a/test/perf-test/preempt-exp-kwok.yaml +++ b/test/perf-test/preempt-exp-kwok.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: fake-defaultaw-schd-spec-with-timeout-1 diff --git a/test/perf-test/preempt-exp.yaml b/test/perf-test/preempt-exp.yaml index d74c00c51..35063c5db 100644 --- a/test/perf-test/preempt-exp.yaml +++ b/test/perf-test/preempt-exp.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: defaultaw-schd-spec-with-timeout-1 diff --git a/test/yaml/0001-aw-generic-deployment-3.yaml b/test/yaml/0001-aw-generic-deployment-3.yaml index 5b4038841..fd913dd93 100644 --- a/test/yaml/0001-aw-generic-deployment-3.yaml +++ b/test/yaml/0001-aw-generic-deployment-3.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: 0001-aw-generic-deployment-3 diff --git a/test/yaml/0002-aw-job-quota.yaml b/test/yaml/0002-aw-job-quota.yaml index 36cfbb31c..e454fac28 100644 --- a/test/yaml/0002-aw-job-quota.yaml +++ b/test/yaml/0002-aw-job-quota.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-1 diff --git a/test/yaml/0003-aw-job-no-quota.yaml b/test/yaml/0003-aw-job-no-quota.yaml index a0f8452c3..8aca96902 100644 --- a/test/yaml/0003-aw-job-no-quota.yaml +++ b/test/yaml/0003-aw-job-no-quota.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-no-quota-job-0003 diff --git a/test/yaml/0004-aw-large-job-no-quota.yaml b/test/yaml/0004-aw-large-job-no-quota.yaml index 30cf44efd..e5013d9cf 100644 --- a/test/yaml/0004-aw-large-job-no-quota.yaml +++ b/test/yaml/0004-aw-large-job-no-quota.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: large-job-no-quota diff --git a/test/yaml/0005-aw-two-quota-jobs.yaml b/test/yaml/0005-aw-two-quota-jobs.yaml index 36d35a6b1..447afee7f 100644 --- a/test/yaml/0005-aw-two-quota-jobs.yaml +++ b/test/yaml/0005-aw-two-quota-jobs.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-job-0005-01 @@ -62,7 +62,7 @@ spec: nvidia.com/gpu: 0 memory: 300Mi --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronze-job-0005-02 diff --git a/test/yaml/0006-aw-init-containers.yaml b/test/yaml/0006-aw-init-containers.yaml index 4b6c7a690..75ed22dad 100644 --- a/test/yaml/0006-aw-init-containers.yaml +++ b/test/yaml/0006-aw-init-containers.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: bronnze-init-job-0006 diff --git a/test/yaml/0008-aw-default.yaml b/test/yaml/0008-aw-default.yaml index 94653d0c0..4bbf0c5ce 100644 --- a/test/yaml/0008-aw-default.yaml +++ b/test/yaml/0008-aw-default.yaml @@ -1,4 +1,4 @@ -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: defaultaw-schd-spec-with-timeout-1 From 6f371076627e622387711aabd17b6886d3284525 Mon Sep 17 00:00:00 2001 From: Antonin Stefanutti Date: Mon, 21 Aug 2023 14:41:58 +0200 Subject: [PATCH 2/3] Remove unused code --- .../clusterstate/api/cluster_info.go | 16 +-- pkg/controller/clusterstate/api/helpers.go | 82 +---------- .../clusterstate/api/histogram_info.go | 30 ++-- pkg/controller/clusterstate/api/node_info.go | 16 +-- .../clusterstate/api/node_info_test.go | 45 ------ pkg/controller/clusterstate/api/test_utils.go | 132 ------------------ pkg/controller/clusterstate/api/types.go | 16 +-- pkg/controller/clusterstate/cache/cache.go | 18 +-- .../clusterstate/cache/cache_test.go | 120 ---------------- .../quota/quotaforestmanager/util/utils.go | 61 -------- 10 files changed, 21 insertions(+), 515 deletions(-) delete mode 100644 pkg/controller/clusterstate/api/node_info_test.go delete mode 100644 pkg/controller/clusterstate/api/test_utils.go delete mode 100644 pkg/controller/clusterstate/cache/cache_test.go delete mode 100644 pkg/controller/quota/quotaforestmanager/util/utils.go diff --git a/pkg/controller/clusterstate/api/cluster_info.go b/pkg/controller/clusterstate/api/cluster_info.go index 2bd382893..f366d6c13 100644 --- a/pkg/controller/clusterstate/api/cluster_info.go +++ b/pkg/controller/clusterstate/api/cluster_info.go @@ -1,19 +1,4 @@ /* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api // ClusterInfo is a snapshot of cluster by cache. diff --git a/pkg/controller/clusterstate/api/helpers.go b/pkg/controller/clusterstate/api/helpers.go index 00a43f5b7..069e0925b 100644 --- a/pkg/controller/clusterstate/api/helpers.go +++ b/pkg/controller/clusterstate/api/helpers.go @@ -1,19 +1,4 @@ /* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,78 +13,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api import ( - "fmt" - v1 "k8s.io/api/core/v1" ) -func getTaskStatus(pod *v1.Pod) TaskStatus { - switch pod.Status.Phase { - case v1.PodRunning: - if pod.DeletionTimestamp != nil { - return Releasing - } - - return Running - case v1.PodPending: - if pod.DeletionTimestamp != nil { - return Releasing - } - - if len(pod.Spec.NodeName) == 0 { - return Pending - } - return Bound - case v1.PodUnknown: - return Unknown - case v1.PodSucceeded: - return Succeeded - case v1.PodFailed: - return Failed - } - - return Unknown -} - -func AllocatedStatus(status TaskStatus) bool { - switch status { - case Bound, Binding, Running, Allocated: - return true - default: - return false - } -} - -func MergeErrors(errs ...error) error { - msg := "errors: " - - foundErr := false - i := 1 - - for _, e := range errs { - if e != nil { - if foundErr { - msg = fmt.Sprintf("%s, %d: ", msg, i) - } else { - msg = fmt.Sprintf("%s %d: ", msg, i) - } - - msg = fmt.Sprintf("%s%v", msg, e) - foundErr = true - i++ - } - } - - if foundErr { - return fmt.Errorf("%s", msg) - } - - return nil -} - func NewStringsMap(source map[string]string) map[string]string { target := make(map[string]string) diff --git a/pkg/controller/clusterstate/api/histogram_info.go b/pkg/controller/clusterstate/api/histogram_info.go index 31d3994a7..62703f746 100644 --- a/pkg/controller/clusterstate/api/histogram_info.go +++ b/pkg/controller/clusterstate/api/histogram_info.go @@ -13,19 +13,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api import ( - "github.com/prometheus/client_golang/prometheus" "math" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog/v2" ) const ( - BucketCount = 20 //Must be > 0 - tolerance = 0.1 + BucketCount = 20 // Must be > 0 + tolerance = 0.1 ) + type ResourceHistogram struct { MilliCPU *prometheus.Histogram Memory *prometheus.Histogram @@ -33,21 +36,20 @@ type ResourceHistogram struct { } func NewResourceHistogram(min *Resource, max *Resource) *ResourceHistogram { - start := max.MilliCPU width := 1.0 count := 2 diff := math.Abs(min.MilliCPU - max.MilliCPU) if diff >= tolerance { start = min.MilliCPU - width = (diff/(BucketCount - 1)) + width = (diff / (BucketCount - 1)) count = BucketCount + 1 } klog.V(10).Infof("[NewResourceHistogram] Start histogram numbers for CPU: start=%f, width=%f, count=%d", start, width, count) millicpuHist := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "millicpu", - Buckets: prometheus.LinearBuckets(start, width, count),}) + Name: "millicpu", + Buckets: prometheus.LinearBuckets(start, width, count)}) start = max.Memory width = 1.0 @@ -55,14 +57,14 @@ func NewResourceHistogram(min *Resource, max *Resource) *ResourceHistogram { diff = math.Abs(min.Memory - max.Memory) if diff >= tolerance { start = min.Memory - width = (diff/(BucketCount - 1)) + width = (diff / (BucketCount - 1)) count = BucketCount + 1 } klog.V(10).Infof("[NewResourceHistogram] Start histogram numbers for Memory: start=%f, width=%f, count=%d", start, width, count) memoryHist := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "memory", - Buckets: prometheus.LinearBuckets(start, width, count),}) + Name: "memory", + Buckets: prometheus.LinearBuckets(start, width, count)}) start = float64(max.GPU) width = 1.0 @@ -70,14 +72,14 @@ func NewResourceHistogram(min *Resource, max *Resource) *ResourceHistogram { diff = math.Abs(float64(min.GPU - max.GPU)) if diff >= tolerance { start = float64(min.GPU) - width = (diff/(BucketCount - 1)) + width = (diff / (BucketCount - 1)) count = BucketCount + 1 } klog.V(10).Infof("[NewResourceHistogram] Start histogram numbers for GPU: start=%f, width=%f, count=%d", start, width, count) gpuHist := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "gpu", - Buckets: prometheus.LinearBuckets(start, width, count),}) + Name: "gpu", + Buckets: prometheus.LinearBuckets(start, width, count)}) rh := &ResourceHistogram{ MilliCPU: &millicpuHist, @@ -92,5 +94,3 @@ func (rh *ResourceHistogram) Observer(r *Resource) { (*rh.Memory).Observe(r.Memory) (*rh.GPU).Observe(float64(r.GPU)) } - - diff --git a/pkg/controller/clusterstate/api/node_info.go b/pkg/controller/clusterstate/api/node_info.go index a82dca606..19181b7ab 100644 --- a/pkg/controller/clusterstate/api/node_info.go +++ b/pkg/controller/clusterstate/api/node_info.go @@ -1,19 +1,4 @@ /* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api import ( diff --git a/pkg/controller/clusterstate/api/node_info_test.go b/pkg/controller/clusterstate/api/node_info_test.go deleted file mode 100644 index 4294ef786..000000000 --- a/pkg/controller/clusterstate/api/node_info_test.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* -Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package api - -import ( - "reflect" -) - -//MCAD schedules AWs and not pods. -//legacy code to schedule pods is removed -func nodeInfoEqual(l, r *NodeInfo) bool { - if !reflect.DeepEqual(l, r) { - return false - } - - return true -} diff --git a/pkg/controller/clusterstate/api/test_utils.go b/pkg/controller/clusterstate/api/test_utils.go deleted file mode 100644 index f116ea2fc..000000000 --- a/pkg/controller/clusterstate/api/test_utils.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* -Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package api - -import ( - "fmt" - "reflect" - - v1 "k8s.io/api/core/v1" - "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func nodesEqual(l, r map[string]*NodeInfo) bool { - if len(l) != len(r) { - return false - } - - for k, n := range l { - if !reflect.DeepEqual(n, r[k]) { - return false - } - } - - return true -} - -func buildNode(name string, alloc v1.ResourceList) *v1.Node { - return &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Status: v1.NodeStatus{ - Capacity: alloc, - Allocatable: alloc, - }, - } -} - -func buildPod(ns, n, nn string, p v1.PodPhase, req v1.ResourceList, owner []metav1.OwnerReference, labels map[string]string) *v1.Pod { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprintf("%v-%v", ns, n)), - Name: n, - Namespace: ns, - OwnerReferences: owner, - Labels: labels, - }, - Status: v1.PodStatus{ - Phase: p, - }, - Spec: v1.PodSpec{ - NodeName: nn, - Containers: []v1.Container{ - { - Resources: v1.ResourceRequirements{ - Requests: req, - }, - }, - }, - }, - } -} - -func buildPdb(n string, min int, selectorMap map[string]string) *v1beta1.PodDisruptionBudget { - selector := &metav1.LabelSelector{ - MatchLabels: selectorMap, - } - minAvailable := intstr.FromInt(min) - return &v1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: n, - }, - Spec: v1beta1.PodDisruptionBudgetSpec{ - Selector: selector, - MinAvailable: &minAvailable, - }, - } -} - -func buildResourceList(cpu string, memory string) v1.ResourceList { - return v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpu), - v1.ResourceMemory: resource.MustParse(memory), - } -} - -func buildResource(cpu string, memory string) *Resource { - return NewResource(v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpu), - v1.ResourceMemory: resource.MustParse(memory), - }) -} - -func buildOwnerReference(owner string) metav1.OwnerReference { - controller := true - return metav1.OwnerReference{ - Controller: &controller, - UID: types.UID(owner), - } -} diff --git a/pkg/controller/clusterstate/api/types.go b/pkg/controller/clusterstate/api/types.go index e3704e819..36e49251e 100644 --- a/pkg/controller/clusterstate/api/types.go +++ b/pkg/controller/clusterstate/api/types.go @@ -1,19 +1,4 @@ /* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api // TaskStatus defines the status of a task/pod. diff --git a/pkg/controller/clusterstate/cache/cache.go b/pkg/controller/clusterstate/cache/cache.go index ab1752e75..7ebbe13b0 100644 --- a/pkg/controller/clusterstate/cache/cache.go +++ b/pkg/controller/clusterstate/cache/cache.go @@ -1,19 +1,4 @@ /* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package cache import ( @@ -49,7 +35,7 @@ import ( "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/controller/clusterstate/api" ) -//New returns a Cache implementation. +// New returns a Cache implementation. func New(config *rest.Config) Cache { return newClusterStateCache(config) } diff --git a/pkg/controller/clusterstate/cache/cache_test.go b/pkg/controller/clusterstate/cache/cache_test.go deleted file mode 100644 index 1067a4cdb..000000000 --- a/pkg/controller/clusterstate/cache/cache_test.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -/* -Copyright 2019, 2021 The Multi-Cluster App Dispatcher Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package cache - -import ( - "fmt" - "reflect" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/controller/clusterstate/api" -) - -func nodesEqual(l, r map[string]*api.NodeInfo) bool { - if len(l) != len(r) { - return false - } - - for k, n := range l { - if !reflect.DeepEqual(n, r[k]) { - return false - } - } - - return true -} - -func buildNode(name string, alloc v1.ResourceList) *v1.Node { - return &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(name), - Name: name, - }, - Status: v1.NodeStatus{ - Capacity: alloc, - Allocatable: alloc, - }, - } -} - -func buildPod(ns, n, nn string, - p v1.PodPhase, req v1.ResourceList, - owner []metav1.OwnerReference, labels map[string]string) *v1.Pod { - - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprintf("%v-%v", ns, n)), - Name: n, - Namespace: ns, - OwnerReferences: owner, - Labels: labels, - }, - Status: v1.PodStatus{ - Phase: p, - }, - Spec: v1.PodSpec{ - NodeName: nn, - Containers: []v1.Container{ - { - Resources: v1.ResourceRequirements{ - Requests: req, - }, - }, - }, - }, - } -} - -func buildResourceList(cpu string, memory string) v1.ResourceList { - return v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpu), - v1.ResourceMemory: resource.MustParse(memory), - } -} - -func buildResource(cpu string, memory string) *api.Resource { - return api.NewResource(v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpu), - v1.ResourceMemory: resource.MustParse(memory), - }) -} - -func buildOwnerReference(owner string) metav1.OwnerReference { - controller := true - return metav1.OwnerReference{ - Controller: &controller, - UID: types.UID(owner), - } -} diff --git a/pkg/controller/quota/quotaforestmanager/util/utils.go b/pkg/controller/quota/quotaforestmanager/util/utils.go deleted file mode 100644 index a41163235..000000000 --- a/pkg/controller/quota/quotaforestmanager/util/utils.go +++ /dev/null @@ -1,61 +0,0 @@ -// ------------------------------------------------------ {COPYRIGHT-TOP} --- -// Copyright 2022 The Multi-Cluster App Dispatcher Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ------------------------------------------------------ {COPYRIGHT-END} --- -package util - -import ( - "fmt" - "strings" -) - -const ( - // AW Namespace used for building unique name for AW job - NamespacePrefix string = "NAMESPACE_" - - // AW Name used for building unique name for AW job - AppWrapperNamePrefix string = "_AWNAME_" -) - -func ParseId(id string) (string, string) { - ns := "" - n := "" - - // Extract the namespace seperator - nspSplit := strings.Split(id, NamespacePrefix) - if len(nspSplit) == 2 { - // Extract the appwrapper seperator - awnpSplit := strings.Split(nspSplit[1], AppWrapperNamePrefix) - if len(awnpSplit) == 2 { - // What is left if the namespace value in the first slice - if len(awnpSplit[0]) > 0 { - ns = awnpSplit[0] - } - // And the names value in the second slice - if len(awnpSplit[1]) > 0 { - n = awnpSplit[1] - } - } - } - return ns, n -} - -func CreateId(ns string, n string) string { - id := "" - if len(ns) > 0 && len(n) > 0 { - id = fmt.Sprintf("%s%s%s%s", NamespacePrefix, ns, AppWrapperNamePrefix, n) - } - return id -} - From ce39df605d276d72c8841594fe880b28e1953589 Mon Sep 17 00:00:00 2001 From: Antonin Stefanutti Date: Thu, 31 Aug 2023 09:32:42 +0200 Subject: [PATCH 3/3] Use new API groups in quota borrowing tests --- test/e2e-kuttl-borrowing/install-quota-subtree.yaml | 8 ++++---- test/e2e-kuttl-borrowing/steps/00-assert.yaml | 4 ++-- test/e2e-kuttl-borrowing/steps/01-assert.yaml | 8 ++++---- test/e2e-kuttl-borrowing/steps/03-assert.yaml | 2 +- test/e2e-kuttl-borrowing/steps/03-install.yaml | 2 +- test/e2e-kuttl-borrowing/steps/04-assert.yaml | 2 +- test/e2e-kuttl-borrowing/steps/04-install.yaml | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/test/e2e-kuttl-borrowing/install-quota-subtree.yaml b/test/e2e-kuttl-borrowing/install-quota-subtree.yaml index 236f6d009..1fc73cd8c 100644 --- a/test/e2e-kuttl-borrowing/install-quota-subtree.yaml +++ b/test/e2e-kuttl-borrowing/install-quota-subtree.yaml @@ -1,5 +1,5 @@ --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -15,7 +15,7 @@ spec: memory: 1045Mi nvidia.com/gpu: 16 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -31,7 +31,7 @@ spec: memory: 1045Mi nvidia.com/gpu: 16 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children @@ -70,7 +70,7 @@ spec: memory: 0Mi nvidia.com/gpu: 0 --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root-children diff --git a/test/e2e-kuttl-borrowing/steps/00-assert.yaml b/test/e2e-kuttl-borrowing/steps/00-assert.yaml index 853505926..596b14e54 100644 --- a/test/e2e-kuttl-borrowing/steps/00-assert.yaml +++ b/test/e2e-kuttl-borrowing/steps/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: appwrappers.mcad.ibm.com + name: appwrappers.workload.codeflare.dev status: acceptedNames: kind: AppWrapper @@ -15,7 +15,7 @@ status: apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: quotasubtrees.ibm.com + name: quotasubtrees.quota.codeflare.dev status: acceptedNames: kind: QuotaSubtree diff --git a/test/e2e-kuttl-borrowing/steps/01-assert.yaml b/test/e2e-kuttl-borrowing/steps/01-assert.yaml index 175462a90..8af089b8c 100644 --- a/test/e2e-kuttl-borrowing/steps/01-assert.yaml +++ b/test/e2e-kuttl-borrowing/steps/01-assert.yaml @@ -1,5 +1,5 @@ # Verify subtree creations -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root @@ -7,7 +7,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root @@ -15,7 +15,7 @@ metadata: labels: tree: quota_service --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: context-root-children @@ -23,7 +23,7 @@ metadata: labels: tree: quota_context --- -apiVersion: ibm.com/v1 +apiVersion: quota.codeflare.dev/v1 kind: QuotaSubtree metadata: name: service-root-children diff --git a/test/e2e-kuttl-borrowing/steps/03-assert.yaml b/test/e2e-kuttl-borrowing/steps/03-assert.yaml index 9245c48e0..652eb2212 100644 --- a/test/e2e-kuttl-borrowing/steps/03-assert.yaml +++ b/test/e2e-kuttl-borrowing/steps/03-assert.yaml @@ -1,6 +1,6 @@ # Verify job is running --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-1 diff --git a/test/e2e-kuttl-borrowing/steps/03-install.yaml b/test/e2e-kuttl-borrowing/steps/03-install.yaml index c44823c80..88b9923f5 100644 --- a/test/e2e-kuttl-borrowing/steps/03-install.yaml +++ b/test/e2e-kuttl-borrowing/steps/03-install.yaml @@ -1,5 +1,5 @@ --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-1 diff --git a/test/e2e-kuttl-borrowing/steps/04-assert.yaml b/test/e2e-kuttl-borrowing/steps/04-assert.yaml index 10649417b..ec1628723 100644 --- a/test/e2e-kuttl-borrowing/steps/04-assert.yaml +++ b/test/e2e-kuttl-borrowing/steps/04-assert.yaml @@ -1,6 +1,6 @@ # Verify job is running --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-2 diff --git a/test/e2e-kuttl-borrowing/steps/04-install.yaml b/test/e2e-kuttl-borrowing/steps/04-install.yaml index 804b06505..2b4fe84bc 100644 --- a/test/e2e-kuttl-borrowing/steps/04-install.yaml +++ b/test/e2e-kuttl-borrowing/steps/04-install.yaml @@ -1,5 +1,5 @@ --- -apiVersion: mcad.ibm.com/v1beta1 +apiVersion: workload.codeflare.dev/v1beta1 kind: AppWrapper metadata: name: my-job-2