Environment:
printf "$(uname -srm)\n$(cat /etc/os-release)\n"):Linux 4.15.0-43-generic x86_64
NAME="Ubuntu"
VERSION="16.04.5 LTS (Xenial Xerus)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 16.04.5 LTS"
VERSION_ID="16.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
VERSION_CODENAME=xenial
UBUNTU_CODENAME=xenial
ansible --version):root@heku1 ~/kubespray# ansible --version
ansible 2.7.5
config file = /root/kubespray/ansible.cfg
configured module search path = [u'/root/kubespray/library']
ansible python module location = /usr/lib/python2.7/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.12 (default, Nov 12 2018, 14:36:49) [GCC 5.4.0 20160609]
Kubespray version (commit) (git rev-parse --short HEAD):
7c620ad
Network plugin used:
calico
Copy of your inventory file:
root@heku1 ~/kubespray# cat inventory/myCluster/inventory.ip
[all:vars]
[kube-master]
heku1 ansible_host=94.20.20.247 ansible_ssh_user=root ansible_ssh_pass=GJksmlkjsflkj0923kAL ansible_connection=ssh ansible_port=122
heku2 ansible_host=94.20.20.248 ansible_ssh_user=root ansible_ssh_pass=AkfmLkdfj20934LAkmf ansible_connection=ssh ansible_port=122
heku3 ansible_host=94.20.20.249 ansible_ssh_user=root ansible_ssh_pass=Lkmflj23lAkmlal1masd ansible_connection=ssh ansible_port=122
[etcd]
heku1 ansible_host=94.20.20.247 ansible_ssh_user=root ansible_ssh_pass=GJksmlkjsflkj0923kAL ansible_connection=ssh ansible_port=122
heku2 ansible_host=94.20.20.248 ansible_ssh_user=root ansible_ssh_pass=AkfmLkdfj20934LAkmf ansible_connection=ssh ansible_port=122
heku3 ansible_host=94.20.20.249 ansible_ssh_user=root ansible_ssh_pass=Lkmflj23lAkmlal1masdKALf93 ansible_connection=ssh ansible_port=122
[kube-node]
heku1 ansible_host=94.20.20.247 ansible_ssh_user=root ansible_ssh_pass=GJksmlkjsflkj0923kAL ansible_connection=ssh ansible_port=122
heku2 ansible_host=94.20.20.248 ansible_ssh_user=root ansible_ssh_pass=AkfmLkdfj20934LAkmf ansible_connection=ssh ansible_port=122
heku3 ansible_host=94.20.20.249 ansible_ssh_user=root ansible_ssh_pass=Lkmflj23lAkmlal1masd ansible_connection=ssh ansible_port=122
heku4 ansible_host=144.112.133.131 ansible_ssh_user=root ansible_ssh_pass=Llmflakd4mlKAmfasdkf ansible_connection=ssh ansible_port=122
[k8s-cluster:children]
kube-node
kube-master
Command used to invoke ansible:
ansible-playbook cluster.yml -i inventory/myCluster/inventory.ip --become --become-user=root -vvv --limit "heku4"
Output of ansible run:
TASK [kubernetes/kubeadm : Create kubeadm token for joining nodes with 24h expiration (default)] ***************************************************************************************************
task path: /root/kubespray/roles/kubernetes/kubeadm/tasks/main.yml:24
Friday 11 January 2019 22:40:41 +0100 (0:00:00.198) 0:00:53.877 ********
Using module file /usr/lib/python2.7/dist-packages/ansible/modules/commands/command.py
<94.20.20.247> ESTABLISH SSH CONNECTION FOR USER: root
<94.20.20.247> SSH: EXEC sshpass -d6 ssh -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=122 -o User=root -o ConnectTimeout=10 -o ControlPath=/root/.ansible/cp/45d18b338c 94.20.20.247 '/bin/sh -c '"'"'/usr/bin/python && sleep 0'"'"''
<94.20.20.247> (1, '\n{"exception": "WARNING: The below traceback may *not* be related to the actual failure.\\n File \\"/tmp/ansible_command_payload_ilkhck/ansible_command_payload.zip/ansible/module_utils/basic.py\\", line 2848, in run_command\\n cmd = subprocess.Popen(args, **kwargs)\\n File \\"/usr/lib/python2.7/subprocess.py\\", line 711, in __init__\\n errread, errwrite)\\n File \\"/usr/lib/python2.7/subprocess.py\\", line 1343, in _execute_child\\n raise child_exception\\n", "cmd": "/usr/local/bin/kubeadm token create", "failed": true, "rc": 2, "invocation": {"module_args": {"warn": true, "executable": null, "_uses_shell": false, "_raw_params": "/usr/local/bin/kubeadm token create", "removes": null, "argv": null, "creates": null, "chdir": null, "stdin": null}}, "msg": "[Errno 2] No such file or directory"}\n', '')
The full traceback is:
WARNING: The below traceback may *not* be related to the actual failure.
File "/tmp/ansible_command_payload_ilkhck/ansible_command_payload.zip/ansible/module_utils/basic.py", line 2848, in run_command
cmd = subprocess.Popen(args, **kwargs)
File "/usr/lib/python2.7/subprocess.py", line 711, in __init__
errread, errwrite)
File "/usr/lib/python2.7/subprocess.py", line 1343, in _execute_child
raise child_exception
fatal: [heku4 -> 94.20.20.247]: FAILED! => {
"changed": false,
"cmd": "/usr/local/bin/kubeadm token create",
"invocation": {
"module_args": {
"_raw_params": "/usr/local/bin/kubeadm token create",
"_uses_shell": false,
"argv": null,
"chdir": null,
"creates": null,
"executable": null,
"removes": null,
"stdin": null,
"warn": true
}
},
"msg": "[Errno 2] No such file or directory",
"rc": 2
}
NO MORE HOSTS LEFT *********************************************************************************************************************************************************************************
to retry, use: --limit @/root/kubespray/cluster.retry
PLAY RECAP *****************************************************************************************************************************************************************************************
heku4 : ok=223 changed=5 unreachable=0 failed=1
I don't if this could help you but I added the real path to the ansible task on file role/kubernetes/kubeadm/tasks/main.yml
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "/usr/bin/kubeadm token create"
run_once: true
register: temp_token
delegate_to: "{{ groups['kube-master'][0] }}"
Issues go stale after 90d of inactivity.
Mark the issue as fresh with /remove-lifecycle stale.
Stale issues rot after an additional 30d of inactivity and eventually close.
If this issue is safe to close now please do so with /close.
Send feedback to sig-testing, kubernetes/test-infra and/or fejta.
/lifecycle stale
Stale issues rot after 30d of inactivity.
Mark the issue as fresh with /remove-lifecycle rotten.
Rotten issues close after an additional 30d of inactivity.
If this issue is safe to close now please do so with /close.
Send feedback to sig-testing, kubernetes/test-infra and/or fejta.
/lifecycle rotten
Rotten issues close after 30d of inactivity.
Reopen the issue with /reopen.
Mark the issue as fresh with /remove-lifecycle rotten.
Send feedback to sig-testing, kubernetes/test-infra and/or fejta.
/close
@fejta-bot: Closing this issue.
In response to this:
Rotten issues close after 30d of inactivity.
Reopen the issue with/reopen.
Mark the issue as fresh with/remove-lifecycle rotten.Send feedback to sig-testing, kubernetes/test-infra and/or fejta.
/close
Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the kubernetes/test-infra repository.
@de1m
Did u solve it? If yes , plz tell how to!
Most helpful comment
@de1m
Did u solve it? If yes , plz tell how to!