rancher / rke

Rancher Kubernetes Engine (RKE), an extremely simple, lightning fast Kubernetes distribution that runs entirely within containers.
Apache License 2.0
3.21k stars 581 forks source link

Config Parameter folder in Cloud Provider vSphere #1310

Closed juame closed 5 years ago

juame commented 5 years ago

RKE version: $ rke --version rke version v0.2.2

Docker version: (docker version,docker info preferred)

$ docker version
Client:
 Version:           18.09.5
 API version:       1.39
 Go version:        go1.10.8
 Git commit:        e8ff056
 Built:             Thu Apr 11 04:43:57 2019
 OS/Arch:           linux/amd64
 Experimental:      false

Server: Docker Engine - Community
 Engine:
  Version:          18.09.5
  API version:      1.39 (minimum version 1.12)
  Go version:       go1.10.8
  Git commit:       e8ff056
  Built:            Thu Apr 11 04:10:53 2019
  OS/Arch:          linux/amd64
  Experimental:     false

Operating system and kernel: (cat /etc/os-release, uname -r preferred)

cat /etc/os-release && uname -r
NAME="Ubuntu"
VERSION="18.04.2 LTS (Bionic Beaver)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 18.04.2 LTS"
VERSION_ID="18.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=bionic
UBUNTU_CODENAME=bionic
4.15.0-47-generic

Type/provider of hosts: (VirtualBox/Bare-metal/AWS/GCE/DO)

VSphere 6.5

cluster.yml file:

# If you intened to deploy Kubernetes in an air-gapped environment,
# please consult the documentation on how to configure custom RKE images.
nodes:
- address: kubernetes-pocstack-master1.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - controlplane
  - etcd
  hostname_override: ""
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
- address: kubernetes-pocstack-master2.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - controlplane
  - etcd
  hostname_override: "n"
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
- address: kubernetes-pocstack-master3.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - controlplane
  - etcd
  hostname_override: ""
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
- address: kubernetes-pocstack-worker1.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: ""
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
- address: kubernetes-pocstack-worker2.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: ""
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
- address: kubernetes-pocstack-worker3.itoslab.example.ch
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: ""
  user: itoslab
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/rke/pocstack/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
services:
  etcd:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    external_urls: []
    ca_cert: ""
    cert: ""
    key: ""
    path: ""
    snapshot: null
    retention: ""
    creation: ""
    backup_config: null
  kube-api:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: ""
    pod_security_policy: false
    always_pull_images: false
  kube-controller:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
  scheduler:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
  kubelet:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    cluster_domain: cluster.local
    infra_container_image: ""
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
  kubeproxy:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
network:
  plugin: canal
  options: {}
authentication:
  strategy: x509
  sans: []
  webhook: null
addons: ""
addons_include: []
system_images:
  etcd: rancher/coreos-etcd:v3.2.24-rancher1
  alpine: rancher/rke-tools:v0.1.27
  nginx_proxy: rancher/rke-tools:v0.1.27
  cert_downloader: rancher/rke-tools:v0.1.27
  kubernetes_services_sidecar: rancher/rke-tools:v0.1.27
  kubedns: rancher/k8s-dns-kube-dns:1.15.0
  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
  coredns: coredns/coredns:1.2.6
  coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
  kubernetes: rancher/hyperkube:v1.13.5-rancher1
  flannel: rancher/coreos-flannel:v0.10.0-rancher1
  flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
  calico_node: rancher/calico-node:v3.4.0
  calico_cni: rancher/calico-cni:v3.4.0
  calico_controllers: ""
  calico_ctl: rancher/calico-ctl:v2.0.0
  canal_node: rancher/calico-node:v3.4.0
  canal_cni: rancher/calico-cni:v3.4.0
  canal_flannel: rancher/coreos-flannel:v0.10.0
  weave_node: weaveworks/weave-kube:2.5.0
  weave_cni: weaveworks/weave-npc:2.5.0
  pod_infra_container: rancher/pause:3.1
  ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
  metrics_server: rancher/metrics-server:v0.3.1
ssh_key_path: ~/rke/pocstack/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
  mode: rbac
  options: {}
ignore_docker_version: false
kubernetes_version: ""
private_registries: []
ingress:
  provider: ""
  options: {}
  node_selector: {}
  extra_args: {}
cluster_name: ""
cloud_provider:
  name: vsphere
  vsphereCloudProvider:
    global:
      insecure-flag: true
    virtual_center:
      vcs06.itoslab.example.ch:
        user: user
        password: "password"
        port: 443
        datacenters: Enterprise Lab
    network:
      public-network: SVCIntra030
    workspace:
      server: vcs06.itoslab.example.ch
      folder: k8s
      default-datastore: DSP2-DOR-001
      datacenter: Enterprise Lab
    disk:
      scsicontrollertype: pvscsi
prefix_path: ""
addon_job_timeout: 0
bastion_host:
  address: ""
  port: ""
  user: ""
  ssh_key: ""
  ssh_key_path: ""
  ssh_cert: ""
  ssh_cert_path: ""
monitoring:
  provider: ""
  options: {}
restore:
  restore: false
  snapshot_name: ""
dns: null

Steps to Reproduce:

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata: 
  name: vspherevolume
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/vsphere-volume
parameters: 
  diskformat: thin

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pvcsc001
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 2Gi

Volume will be created in Kubernetes and the vSphere DataStore: DSP2-DOR-001/kubevol/kubernetes-dynamic-pvc-111f2483-66a3-11e9-9dcc-005056931b79.vmdk

From documentation:

Path of folder in which to create dummy VMs used for volume provisioning (relative from the root of the datastore), e.g. “kubernetes”.

Results:

The volume will be in the folder kubevol... If I mount the volume to a pod there are a lot of files in kubevol:

Questions:

Thanks for your help!

galal-hussein commented 5 years ago

@juame Thanks for opening an issue, unfortunately this isn't an rke issue, rke doesn't have the control over creation of persistent volumes, you may want to open an issue with https://github.com/kubernetes/kubernetes for this, I am closing this issue please reopen if you think its related to rke