ansible / ansible-container

DEPRECATED -- Ansible Container was a tool to build Docker images and orchestrate containers using only Ansible playbooks.
GNU Lesser General Public License v3.0
2.19k stars 394 forks source link

Unclear why ansible-container run failed #563

Open ehelms opened 7 years ago

ehelms commented 7 years ago
ISSUE TYPE
container.yml
---
version: "2"

settings:
  conductor_base: centos:7
  project_name: foreman
  k8s_namespace:
    name: foreman
    description: Foreman stack
    display_name: Foreman

defaults:
  POSTGRES_USER: foreman
  POSTGRES_PASSWORD: foreman
  POSTGRES_DB: foreman
  POSTGRES_PORT: 5432
  MONGODB_USER: admin
  MONGODB_PASSWORD: admin
  MONGODB_DATABASE: pulp_database
  MONGODB_PORT: 27017

services:
  foreman-base:
    from: centos:7
    roles:
      - epel-repositories
      - role: puppet-repositories
        puppet_repositories_version: 4
      - foreman-repositories
      - katello-repositories
      - foreman
    openshift:
      state: absent
  foreman:
    from: foreman-foreman-base:latest
    roles:
      - noop
    command: ['/usr/bin/start-foreman.sh']
    entrypoint: ['/usr/bin/entrypoint.sh']
    environment:
      - POSTGRES_DB={{ POSTGRES_DB }}
      - POSTGRES_USER={{ POSTGRES_USER }}
      - POSTGRES_PASSWORD={{ POSTGRES_PASSWORD }}
      - SEED_ADMIN_USER=admin
      - SEED_ADMIN_PASSWORD=changeme
      - SEED_ORGANIZATION=Default Organization
      - SEED_LOCATION=Raleigh
    ports:
      - 8080:8080
    expose:
      - 8080
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  foreman-tasks:
    from: foreman-foreman-base:latest
    roles:
      - foreman-tasks
    command: ['/usr/bin/start-foreman-tasks.sh']
    entrypoint: ['/usr/bin/entrypoint.sh']
    environment:
      - POSTGRES_DB={{ POSTGRES_DB }}
      - POSTGRES_USER={{ POSTGRES_USER }}
      - POSTGRES_PASSWORD={{ POSTGRES_PASSWORD }}
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  foreman-proxy:
    from: centos:7
    roles:
      - epel-repositories
      - foreman-repositories
      - foreman-proxy
    command: ['/usr/bin/start-foreman-proxy.sh']
    expose:
      - 8080
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  foreman-proxy-register:
    from: centos:7
    roles:
      - epel-repositories
      - foreman-repositories
      - foreman-proxy
    command: ['/usr/bin/register.sh']
    environment:
      - FOREMAN_PROXY_SERVICE_HOST=foreman_proxy
      - FOREMAN_PROXY_SERVICE_PORT=8080
      - FOREMAN_SERVICE_HOST=foreman
      - FOREMAN_SERVICE_PORT=8080
    depends_on:
      - foreman
      - foreman-proxy
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  candlepin:
    from: centos:7
    roles:
      - epel-repositories
      - katello-repositories
      - candlepin
    environment:
      - POSTGRES_DB={{ POSTGRES_DB }}
      - POSTGRES_USER={{ POSTGRES_USER }}
      - POSTGRES_PASSWORD={{ POSTGRES_PASSWORD }}
      - POSTGRES_PORT={{ POSTGRES_PORT }}
      - POSTGRES_SERVICE=postgres
      - QPID_SERVICE=qpid
      - QPID_PORT=5672
    expose:
      - 8080
    command: ['/usr/bin/start_candlepin.sh']
    entrypoint: ['/usr/bin/entrypoint.sh']
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
      security_context:
        run_as_user: 1000
  qpid:
    from: centos:7
    roles:
      - epel-repositories
      - qpid
    command: ['/usr/bin/startup.sh']
    expose:
      - 5672
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  pulp-base:
    from: centos:7
    roles:
      - epel-repositories
      - katello-repositories
      - pulp
    openshift:
      state: absent
  pulp:
    from: foreman-pulp-base:latest
    roles:
      - noop
    command: ['/usr/bin/start_httpd.sh']
    entrypoint: ['/usr/bin/entrypoint.sh']
    expose:
      - 8080
    depends_on:
      - qpid
      - mongodb
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  pulp-worker:
    from: foreman-pulp-base:latest
    roles:
      - pulp-worker
    entrypoint: ['/usr/bin/entrypoint.sh']
    depends_on:
      - qpid
      - pulp
    depends_on:
      - mongodb
    volumes:
      - pulp-data:/var/lib/pulp
      - puppet-data:/etc/puppet
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  pulp-resource-manager:
    from: foreman-pulp-base:latest
    roles:
      - pulp-resource-manager
    depends_on:
      - qpid
      - pulp
    entrypoint: ['/usr/bin/entrypoint.sh']
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  pulp-celerybeat:
    from: foreman-pulp-base:latest
    roles:
      - pulp-celerybeat
    entrypoint: ['/usr/bin/entrypoint.sh']
    depends_on:
      - qpid
      - pulp
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  puppet:
    from: puppet/puppetserver
    volumes:
      - puppet-data:/etc/puppet
    expose:
      - 8140
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  postgres:
    from: ansible/postgresql:latest
    environment:
      - POSTGRES_DB={{ POSTGRES_DB }}
      - POSTGRES_USER={{ POSTGRES_USER }}
      - POSTGRES_PASS={{ POSTGRES_PASSWORD }}
      - PGDATA=/var/lib/pgsql/data/userdata
    volumes:
      - postgres-data:/var/lib/pgsql/data
    expose:
      - "{{ POSTGRES_PORT }}"
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1
  mongodb:
    from: centos/mongodb-26-centos7:latest
    environment:
      - MONGODB_USER={{ MONGODB_USER }}
      - MONGODB_PASSWORD={{ MONGODB_PASSWORD }}
      - MONGODB_DATABASE={{ MONGODB_DATABASE }}
      - MONGODB_ADMIN_PASSWORD={{ MONGODB_PASSWORD }}
    volumes:
      - mongodb-data:/var/lib/mongodb/data
    expose:
      - "{{ MONGODB_PORT }}"
    openshift:
      state: present
      deployment:
        force: false
        replicas: 1

volumes:
  postgres-data:
    openshift:
      state: present
      force: false
      access_modes:
        - ReadWriteOnce
      requested_storage: 1Gi
  mongodb-data:
    openshift:
      state: present
      force: false
      access_modes:
        - ReadWriteOnce
      requested_storage: 1Gi
  puppet-data:
    openshift:
      state: present
      force: false
      access_modes:
        - ReadWriteOnce
      requested_storage: 1Gi
  pulp-data:
    openshift:
      state: present
      force: false
      access_modes:
        - ReadWriteOnce
      requested_storage: 1Gi

registries:
  oc-cluster:
    url: 172.30.1.1:5000
    namespace: foreman
OS / ENVIRONMENT
Ansible Container, version 0.9.2rc0
Linux, war.eagle, 4.6.6-200.fc23.x86_64, #1 SMP Wed Aug 10 23:13:35 UTC 2016, x86_64
2.7.11 (default, Jul  8 2016, 19:45:00) 
[GCC 5.3.1 20160406 (Red Hat 5.3.1-6)] /usr/bin/python
{
  "ContainersPaused": 0, 
  "Labels": null, 
  "DefaultRuntime": "runc", 
  "CgroupDriver": "cgroupfs", 
  "ContainersRunning": 12, 
  "NGoroutines": 99, 
  "Swarm": {
    "Managers": 0, 
    "ControlAvailable": false, 
    "NodeID": "", 
    "Cluster": {
      "Spec": {
        "Raft": {}, 
        "CAConfig": {}, 
        "Dispatcher": {}, 
        "Orchestration": {}, 
        "TaskDefaults": {}
      }, 
      "Version": {}, 
      "ID": "", 
      "CreatedAt": "0001-01-01T00:00:00Z", 
      "UpdatedAt": "0001-01-01T00:00:00Z"
    }, 
    "Nodes": 0, 
    "Error": "", 
    "RemoteManagers": null, 
    "LocalNodeState": "inactive", 
    "NodeAddr": ""
  }, 
  "LoggingDriver": "json-file", 
  "OSType": "linux", 
  "HttpProxy": "", 
  "Runtimes": {
    "runc": {
      "path": "docker-runc"
    }
  }, 
  "DriverStatus": [
    [
      "Pool Name", 
      "docker-253:3-17170446-pool"
    ], 
    [
      "Pool Blocksize", 
      "65.54 kB"
    ], 
    [
      "Base Device Size", 
      "10.74 GB"
    ], 
    [
      "Backing Filesystem", 
      "xfs"
    ], 
    [
      "Data file", 
      "/dev/loop0"
    ], 
    [
      "Metadata file", 
      "/dev/loop1"
    ], 
    [
      "Data Space Used", 
      "13.33 GB"
    ], 
    [
      "Data Space Total", 
      "107.4 GB"
    ], 
    [
      "Data Space Available", 
      "94.04 GB"
    ], 
    [
      "Metadata Space Used", 
      "19.08 MB"
    ], 
    [
      "Metadata Space Total", 
      "2.147 GB"
    ], 
    [
      "Metadata Space Available", 
      "2.128 GB"
    ], 
    [
      "Thin Pool Minimum Free Space", 
      "10.74 GB"
    ], 
    [
      "Udev Sync Supported", 
      "true"
    ], 
    [
      "Deferred Removal Enabled", 
      "false"
    ], 
    [
      "Deferred Deletion Enabled", 
      "false"
    ], 
    [
      "Deferred Deleted Device Count", 
      "0"
    ], 
    [
      "Data loop file", 
      "/home/docker/devicemapper/devicemapper/data"
    ], 
    [
      "Metadata loop file", 
      "/home/docker/devicemapper/devicemapper/metadata"
    ], 
    [
      "Library Version", 
      "1.02.109 (2015-09-22)"
    ]
  ], 
  "OperatingSystem": "Fedora 23 (Workstation Edition)", 
  "Containers": 15, 
  "HttpsProxy": "", 
  "BridgeNfIp6tables": true, 
  "MemTotal": 16700760064, 
  "Driver": "devicemapper", 
  "IndexServerAddress": "https://index.docker.io/v1/", 
  "ClusterStore": "", 
  "ExecutionDriver": "", 
  "SystemStatus": null, 
  "OomKillDisable": true, 
  "ClusterAdvertise": "", 
  "SystemTime": "2017-05-26T21:46:26.658643314-04:00", 
  "Name": "war.eagle", 
  "CPUSet": true, 
  "RegistryConfig": {
    "InsecureRegistryCIDRs": [
      "172.30.0.0/16", 
      "127.0.0.0/8"
    ], 
    "IndexConfigs": {
      "docker.io": {
        "Official": true, 
        "Name": "docker.io", 
        "Secure": true, 
        "Mirrors": null
      }
    }, 
    "Mirrors": null
  }, 
  "SecurityOptions": [
    "seccomp"
  ], 
  "ContainersStopped": 3, 
  "NCPU": 8, 
  "NFd": 114, 
  "Architecture": "x86_64", 
  "KernelMemory": true, 
  "CpuCfsQuota": true, 
  "Debug": false, 
  "ID": "PP7J:YWUF:VJG7:OPQQ:AVCZ:BEIA:XEX2:BB7W:ELOD:I2QM:O472:Y2MB", 
  "IPv4Forwarding": true, 
  "KernelVersion": "4.6.6-200.fc23.x86_64", 
  "BridgeNfIptables": true, 
  "NoProxy": "", 
  "LiveRestoreEnabled": false, 
  "ServerVersion": "1.12.6", 
  "CpuCfsPeriod": true, 
  "ExperimentalBuild": false, 
  "MemoryLimit": true, 
  "SwapLimit": true, 
  "Plugins": {
    "Volume": [
      "local"
    ], 
    "Network": [
      "bridge", 
      "host", 
      "null", 
      "overlay"
    ], 
    "Authorization": null
  }, 
  "Images": 60, 
  "DockerRootDir": "/home/docker", 
  "NEventsListener": 0, 
  "CPUShares": true
}
{
  "KernelVersion": "4.6.6-200.fc23.x86_64", 
  "Os": "linux", 
  "BuildTime": "2017-01-10T20:21:11.017972579+00:00", 
  "ApiVersion": "1.24", 
  "Version": "1.12.6", 
  "GitCommit": "78d1802", 
  "Arch": "amd64", 
  "GoVersion": "go1.6.4"
}
SUMMARY

Attempting to run ansible-container run to spin up deployment stack locally and I get an error. The error is not clear as to what the problem is however so that I can take steps to resolve the issue. I also tried running the run command with --debug but the output so verbose that I was unable to decipher what might be the issue.

ehelms@war openshift (openshift-2.0)$ ansible-container run
Parsing conductor CLI args.
Engine integration loaded. Preparing run.   engine=Docker™ daemon
Verifying service image service=foreman-base
Verifying service image service=foreman
Verifying service image service=foreman-tasks
Verifying service image service=foreman-proxy
Verifying service image service=foreman-proxy-register
Verifying service image service=candlepin
Verifying service image service=qpid
Verifying service image service=pulp-base
Verifying service image service=pulp
Verifying service image service=pulp-worker
Verifying service image service=pulp-resource-manager
Verifying service image service=pulp-celerybeat

PLAY [localhost] ***************************************************************

TASK [docker_service] **********************************************************
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "", "module_stdout": "", "msg": "Error starting project "}

msg: Error starting project

PLAY RECAP *********************************************************************
localhost                  : ok=0    changed=0    unreachable=0    failed=1

All services running.   playbook_rc=2
Conductor terminated. Cleaning up.  command_rc=0 conductor_id=c5be01d7f7f0b603470565719366672ccd9ee8bf4831b10be9bc139a7e0bde35 save_container=False
ehelms commented 7 years ago

I'm not sure if it did fail though? If I look at docker ps some things do appear to be running:

CONTAINER ID        IMAGE                                          COMMAND                  CREATED             STATUS              PORTS                    NAMES
b015f2a4c52c        foreman-pulp-resource-manager:20170527012712   "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes                                foreman_pulp-resource-manager_1
ac5c34744eca        foreman-pulp-celerybeat:20170527012718         "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes                                foreman_pulp-celerybeat_1
54300c33a659        foreman-pulp:20170527012700                    "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes       8080/tcp                 foreman_pulp_1
e8e21b82142e        foreman-pulp-worker:20170527012706             "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes                                foreman_pulp-worker_1
66627c6d8fee        foreman-foreman-proxy:20170527012239           "/usr/bin/start-forem"   49 minutes ago      Up 49 minutes       8080/tcp                 foreman_foreman-proxy_1
b7c1aebadb4c        centos/mongodb-26-centos7:latest               "container-entrypoint"   49 minutes ago      Up 49 minutes       27017/tcp                foreman_mongodb_1
c77bdac68bb3        ansible/postgresql:latest                      "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes       5432/tcp                 foreman_postgres_1
66105c853d0d        foreman-foreman:20170527012136                 "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes       0.0.0.0:8080->8080/tcp   foreman_foreman_1
84d21ee78a9d        foreman-foreman-tasks:20170527012148           "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes                                foreman_foreman-tasks_1
d53c40f61932        foreman-qpid:20170527012532                    "/usr/bin/startup.sh"    49 minutes ago      Up 49 minutes       5672/tcp                 foreman_qpid_1
35bad9d7f524        foreman-candlepin:20170527012454               "/usr/bin/entrypoint."   49 minutes ago      Up 49 minutes       8080/tcp                 foreman_candlepin_1
a9521850b243        puppet/puppetserver:latest                     "dumb-init /docker-en"   49 minutes ago      Up 49 minutes       8140/tcp                 foreman_puppet_1
ehelms commented 7 years ago

Here is some output from running with --debug since I hit this on two separate projects and I have no idea why they don't run:

TASK [docker_service] **********************************************************
task path: /home/ehelms/workspace/downstream/satellite-build/ansible-deployment/playbook.yml:3
Tuesday 30 May 2017  13:58:32 +0000 (0:00:00.040)       0:00:00.040 ***********
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/basic.py
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/docker_common.py
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/_text.py
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/pycompat24.py
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/six/__init__.py
Using module_utils file /usr/lib/python2.7/site-packages/ansible/module_utils/six/_six.py
Using module file /usr/lib/python2.7/site-packages/ansible/modules/cloud/docker/docker_service.py
<127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: root
<127.0.0.1> EXEC /bin/sh -c 'echo ~ && sleep 0'
<127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466 `" && echo ansible-tmp-1496152712.19-192579723797466="` echo /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466 `" ) && sleep 0'
<127.0.0.1> PUT /tmp/tmp2HRTZz TO /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466/docker_service.py
<127.0.0.1> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466/ /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466/docker_service.py && sleep 0'
<127.0.0.1> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466/docker_service.py; rm -rf "/root/.ansible/tmp/ansible-tmp-1496152712.19-192579723797466/" > /dev/null 2>&1 && sleep 0'
chouseknecht commented 7 years ago

I think this may be an issue with the docker_service module, and perhaps related to ansible isssue #20480. Need to investigate further.