Open pankajskku opened 1 year ago
Logs at the syncer side when the API bindings are created
E0607 20:21:50.997797 1 spec_controller.go:348] kcp-workload-syncer-spec failed to sync {{"" "v1" "secrets"} "1x5b9q93chbfwcsd|edge-2/default-token-8j4sw"}, err: secrets "default-token-8j4sw" not found I0607 20:21:50.997901 1 spec_controller.go:341] "processing key" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/default-token-8j4sw" gvr="/v1, Resource=secrets" I0607 20:21:50.998065 1 spec_process.go:169] "Deleting downstream object for upstream object" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/default-token-8j4sw" gvr="/v1, Resource=secrets" workspace="1x5b9q93chbfwcsd" namespace="edge-2" name="default-token-8j4sw" downstream.namespace="kcp-21kr0xth2dir" I0607 20:21:51.003348 1 spec_controller.go:341] "processing key" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/default-token-8j4sw" gvr="/v1, Resource=secrets" I0607 20:21:51.003599 1 spec_process.go:169] "Deleting downstream object for upstream object" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/default-token-8j4sw" gvr="/v1, Resource=secrets" workspace="1x5b9q93chbfwcsd" namespace="edge-2" name="default-token-8j4sw" downstream.namespace="kcp-21kr0xth2dir" I0607 20:21:51.019833 1 finalizer.go:101] "Updated upstream resource to remove the syncer finalizer" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-status" key="kcp-21kr0xth2dir/kcp-root-ca.crt" gvr="/v1, Resource=configmaps" downstream.namespace="kcp-21kr0xth2dir" downstream.name="kcp-root-ca.crt" workspace="1x5b9q93chbfwcsd" namespace="edge-2" name="kube-root-ca.crt" I0607 20:21:51.020866 1 spec_controller.go:299] "queueing GVR" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" E0607 20:21:51.022823 1 finalizer.go:98] "Failed updating upstream resource after removing the syncer finalizer" err="Operation cannot be fulfilled on configmaps \"kube-root-ca.crt\": the resource has been modified in the meantime" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" downstream.name="kcp-root-ca.crt" E0607 20:21:51.022858 1 spec_controller.go:348] kcp-workload-syncer-spec failed to sync {{"" "v1" "configmaps"} "1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt"}, err: Operation cannot be fulfilled on configmaps "kube-root-ca.crt": the resource has been modified in the meantime I0607 20:21:51.022917 1 spec_controller.go:341] "processing key" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0607 20:21:51.023006 1 spec_process.go:169] "Deleting downstream object for upstream object" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" workspace="1x5b9q93chbfwcsd" namespace="edge-2" name="kube-root-ca.crt" downstream.namespace="kcp-21kr0xth2dir" I0607 20:21:51.028532 1 spec_controller.go:341] "processing key" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0607 20:21:51.028596 1 spec_process.go:169] "Deleting downstream object for upstream object" syncTarget.workspace="1x5b9q93chbfwcsd" syncTarget.name="edge-2" syncTarget.key="3LHYDNcQnFvNlZKjC3HiOejxLMB2Qc6LI8e2FZ" reconciler="kcp-workload-syncer-spec" key="1x5b9q93chbfwcsd|edge-2/kube-root-ca.crt" gvr="/v1, Resource=configmaps" workspace="1x5b9q93chbfwcsd" namespace="edge-2" name="kube-root-ca.crt" downstream.namespace="kcp-21kr0xth2dir"
The shadow namespace in the cluster reappears after deleting the created APIbinding with the following logs:
I0608 15:35:27.414759 1 spec_controller.go:299] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.414892 1 spec_controller.go:341] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.535876 1 spec_controller.go:299] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.536032 1 spec_controller.go:341] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.573017 1 spec_process.go:389] "Updated upstream resource with syncer finalizer" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.575431 1 spec_controller.go:299] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.575655 1 spec_controller.go:341] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.595273 1 spec_process.go:545] "Upserted upstream resource to downstream" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/default-token-7mcwl" gvr="/v1, Resource=secrets" downstream.name="kcp-default-token-7mcwl" I0608 15:35:27.597044 1 status_controller.go:179] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-status" key="kcp-34ck1kde2fep/kcp-default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.597197 1 status_controller.go:221] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-status" key="kcp-34ck1kde2fep/kcp-default-token-7mcwl" gvr="/v1, Resource=secrets" I0608 15:35:27.709138 1 spec_process.go:389] "Updated upstream resource with syncer finalizer" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.709472 1 spec_controller.go:299] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.709594 1 spec_controller.go:341] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.722308 1 spec_process.go:545] "Upserted upstream resource to downstream" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-spec" key="2rdefjxpbx1yb9rx|edge-1/kube-root-ca.crt" gvr="/v1, Resource=configmaps" downstream.name="kcp-root-ca.crt" I0608 15:35:27.722565 1 status_controller.go:179] "queueing GVR" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-status" key="kcp-34ck1kde2fep/kcp-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:27.722748 1 status_controller.go:221] "processing key" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" reconciler="kcp-workload-syncer-status" key="kcp-34ck1kde2fep/kcp-root-ca.crt" gvr="/v1, Resource=configmaps" I0608 15:35:28.912290 1 apiimporter.go:227] "Importing APIs" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" resourcesToImport=[atomgraphlets.edge.operator.com deployments.apps ingresses.networking.k8s.io pods. services.] I0608 15:35:30.813694 1 request.go:601] Waited for 1.045322397s due to client-side throttling, not priority and fairness, request: GET:https://10.96.0.1:443/apis/batch/v1beta1?timeout=32s I0608 15:35:31.863048 1 request.go:601] Waited for 2.094545052s due to client-side throttling, not priority and fairness, request: GET:https://10.96.0.1:443/apis/coordination.k8s.io/v1beta1?timeout=32s I0608 15:35:31.917582 1 discovery.go:186] "processing discovery" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" groupVersion="v1" resource="pods" kind="Pod" crd="pods.core" I0608 15:35:31.930012 1 discovery.go:186] "processing discovery" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" groupVersion="v1" resource="services" kind="Service" crd="services.core" I0608 15:35:31.935404 1 discovery.go:186] "processing discovery" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" groupVersion="apps/v1" resource="deployments" kind="Deployment" crd="deployments.apps" I0608 15:35:31.946623 1 discovery.go:186] "processing discovery" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" groupVersion="networking.k8s.io/v1" resource="ingresses" kind="Ingress" crd="ingresses.networking.k8s.io" I0608 15:35:31.953403 1 discovery.go:186] "processing discovery" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" groupVersion="edge.operator.com/v1alpha1" resource="atomgraphlets" kind="Atomgraphlet" crd="atomgraphlets.edge.operator.com" I0608 15:35:31.963342 1 apiimporter.go:271] "updating APIResourceImport" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" group="" version="v1" resource="services" apiResourceImport="services.edge-1.v1.core" I0608 15:35:32.409469 1 apiimporter.go:271] "updating APIResourceImport" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" group="apps" version="v1" resource="deployments" apiResourceImport="deployments.edge-1.v1.apps" I0608 15:35:33.079942 1 apiimporter.go:271] "updating APIResourceImport" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" group="networking.k8s.io" version="v1" resource="ingresses" apiResourceImport="ingresses.edge-1.v1.networking.k8s.io" I0608 15:35:33.236986 1 apiimporter.go:271] "updating APIResourceImport" syncTarget.workspace="2rdefjxpbx1yb9rx" syncTarget.name="edge-1" syncTarget.key="6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq" resources=[atomgraphlets.edge.operator.com] reconciler="api-importer" group="edge.operator.com" version="v1alpha1" resource="atomgraphlets" apiResourceImport="atomgraphlets.edge-1.v1alpha1.edge.operator.com"
@pankajskku as mentioned in the Slack chat, any chance to get a smaller replicable for this? Im just being realistic, as this would require somebody to recreate your setup on two different architectures with the code which is already removed from the main branch.
Or is there any change you can try looking yourself at the code and finding which part is responsible for deleting the resources and why?
/transfer-issue contrib-tmc
/transfer-issue contrib-tmc
Describe the bug
To facilitate the synchronization of Kubernetes objects from the KCP namespace to its corresponding synctarget (in turn in a namespace in the pcluster), I have created placement policies mapping each sync target/location to a KCP namespace. However, I have encountered an inconsistency in KCP's behavior when creating the API binding on a Macbook (Darwin ARM64, M1 Pro) compared to a Linux VM (AMD64). On the Linux VM, creating an API binding for the Kubernetes API export results in the termination of the namespace in the provisioned cluster that corresponds to the KCP namespace. Yet, this does not occur on the Macbook (Darwin ARM64) - creating an API binding for Kubernetes resources does not affect the namespace in the provisioned cluster. Here is the sequence of steps I follow to establish KCP wiring:
env_variables.sh: It has the environment variables for cluster name, workspace, etc. The inconsistency appears with KCP's behavior during step 4 on the Linux VM (ARM64). If anyone has encountered a similar issue or has insights to share, your input would be greatly appreciated.
Steps To Reproduce
Expected Behaviour
With KCP v0.11.0 for AMD64 arch. : The namespace (downstream object) in the pcluster that is getting terminated is corresponding to the KCP namespace (upstream object).
With KCP v0.11.0 for Darwin ARM64 arch. : The namespace (downstream object) in the pcluster is not terminated.
Additional Context
The namespace state after creating the API binding
irl@hub:~/pankaj/octopus$ k get ns edge-1 -o yaml apiVersion: v1 kind: Namespace metadata: annotations: kcp.io/cluster: 2rdefjxpbx1yb9rx kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","kind":"Namespace","metadata":{"annotations":{},"labels":{"name":"edge-1"},"name":"edge-1"}} scheduling.kcp.io/placement: "" creationTimestamp: "2023-06-08T15:10:58Z" labels: kubernetes.io/metadata.name: edge-1 name: edge-1 name: edge-1 resourceVersion: "1972" uid: 92f0dd0b-b1fa-45ef-8d20-5bafd1d11e28 spec: finalizers:
The namespace state before creating the API binding
irl@hub:~/pankaj/octopus$ k get ns edge-1 -o yaml apiVersion: v1 kind: Namespace metadata: annotations: kcp.io/cluster: 2rdefjxpbx1yb9rx kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","kind":"Namespace","metadata":{"annotations":{},"labels":{"name":"edge-1"},"name":"edge-1"}} scheduling.kcp.io/placement: "" creationTimestamp: "2023-06-08T15:10:58Z" labels: kubernetes.io/metadata.name: edge-1 name: edge-1 state.workload.kcp.io/6YKBsgxBSSW1XYFm3wkHf97QZEkLyTHV4Pqguq: Sync name: edge-1 resourceVersion: "2033" uid: 92f0dd0b-b1fa-45ef-8d20-5bafd1d11e28 spec: finalizers:
pankajthorat@Pankajs-MacBook-Pro octopus % cat env_variables.sh
!/bin/bash
export HUB_CLUSTER_NAME="hub-operator-system"
export CORE_CLUSTER_NAME="core-1" export EDGE1_CLUSTER_NAME="edge-1" export EDGE2_CLUSTER_NAME="edge-2"
export CLUSTER_NAMES=("$HUB_CLUSTER_NAME" "$CORE_CLUSTER_NAME")
export CLUSTER_NAMES=("$HUB_CLUSTER_NAME" "$CORE_CLUSTER_NAME" "$EDGE1_CLUSTER_NAME" "$EDGE2_CLUSTER_NAME")
export WORKSPACE_NAME="octopus" pankajthorat@Pankajs-MacBook-Pro octopus % cat 4-ws-sync.sh
!/bin/bash
source env_variables.sh
export KUBECONFIG=.kcp/admin.kubeconfig kubectl workspace create $WORKSPACE_NAME --enter
kubectl workspace create-context
for cluster_name in "${CLUSTER_NAMES[@]}"; do
done
for cluster_name in "${CLUSTER_NAMES[@]}"; do KUBECONFIG=~/.kube/config kubectl config use-context kind-"$cluster_name" KUBECONFIG=~/.kube/config kubectl apply -f "$cluster_name".yaml done
echo "Sleeping for 30 seconds..." sleep 30 pankajthorat@Pankajs-MacBook-Pro octopus % cat 5-labelsyncer.sh
!/bin/bash
source env_variables.sh
export KUBECONFIG=.kcp/admin.kubeconfig
for cluster_name in "${CLUSTER_NAMES[@]}"; do kubectl label synctarget/"$cluster_name" name=st-"$cluster_name" --overwrite done pankajthorat@Pankajs-MacBook-Pro octopus % cat 6-ns-loc-pp.sh
!/bin/bash
source env_variables.sh
export KUBECONFIG=.kcp/admin.kubeconfig
create namespaces
for cluster_name in "${CLUSTER_NAMES[@]}"; do kubectl apply -f - <<EOF apiVersion: v1 kind: Namespace metadata: name: ${cluster_name} labels: name: ${cluster_name} EOF done
create new locations
for cluster_name in "${CLUSTER_NAMES[@]}"; do kubectl apply -f - <<EOF apiVersion: scheduling.kcp.io/v1alpha1 kind: Location metadata: name: location-$cluster_name labels: name: location-$cluster_name spec: instanceSelector: matchLabels: name: st-$cluster_name resource: group: workload.kcp.io resource: synctargets version: v1alpha1 EOF done
Delete the default location
kubectl delete location default
create placement policies
for cluster_name in "${CLUSTER_NAMES[@]}"; do kubectl apply -f - <<EOF apiVersion: scheduling.kcp.io/v1alpha1 kind: Placement metadata: name: pp-$cluster_name spec: locationResource: group: workload.kcp.io resource: synctargets version: v1alpha1 locationSelectors:
locationWorkspace: root
EOF done
kubectl kcp bind compute root
kubectl kcp bind compute root:$WORKSPACE_NAME --apiexports=root:$WORKSPACE_NAME:kubernetes
kubectl delete placements placement-1cgav5jo
pankajthorat@Pankajs-MacBook-Pro octopus % cat 7a-APIBINDING.sh
!/bin/bash
kubectl ws .
kubectl kcp bind compute root:octopus --apiexports=root:octopus:kubernetes
export KUBECONFIG=.kcp/admin.kubeconfig
kubectl ws .
kubectl apply -f - <<EOF apiVersion: apis.kcp.io/v1alpha1 kind: APIBinding metadata: name: bind-kube spec: reference: export: path: "root:compute" name: kubernetes EOF