Тайм-аут ввода-вывода Helm kubernetes

Попытка использовать helm на моем кластере kubernetes. helm init прошел нормально, но попытка установить или просто helm list выдает эту ошибку.

$ helm list
Error: Get https://10.96.0.1:443/api/v1/namespaces/kube-system/configmaps?labelSelector=OWNER%!D(MISSING)TILLER: dial tcp 10.96.0.1:443: i/o timeout

 $ helm version
Client: &version.Version{SemVer:"v2.10.0", GitCommit:"9ad53aac42165a5fadc6c87be0dea6b115f93090", GitTreeState:"clean"}
Server: &version.Version{SemVer:"v2.10.0", GitCommit:"9ad53aac42165a5fadc6c87be0dea6b115f93090", GitTreeState:"clean"}

$ kubectl logs tiller-deploy-64c9d747bd-wgt8m -n kube-system 
[main] 2018/09/15 14:56:28 Starting Tiller v2.10.0 (tls=false) 
[main] 2018/09/15 14:56:28 GRPC listening on :44134 
[main] 2018/09/15 14:56:28 Probes listening on :44135 
[main] 2018/09/15 14:56:28 Storage driver is ConfigMap 
[main] 2018/09/15 14:56:28 Max history per release is 0 [storage] 2018/09/15 15:59:03 listing all releases with filter 
[storage/driver] 2018/09/15 15:59:33 list: failed to list: Get https://10.96.0.1:443/api/v1/namespaces/kube-system/configmaps?labelSelector=OWNER%3DTILLER: dial tcp 10.96.0.1:443: i/o timeout

Я много часов гуглил, и, похоже, ничего не решает проблему. Вот информация о моем кластере:

 $ kubectl cluster-info
Kubernetes master is running at https://192.168.0.68:6443
KubeDNS is running at https://192.168.0.68:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

 $ kubectl get pods --all-namespaces
NAMESPACE     NAME                                        READY     STATUS    RESTARTS   AGE
kube-system   calico-etcd-pljts                           1/1       Running   0          9h
kube-system   calico-node-cmtsv                           2/2       Running   4          9h
kube-system   calico-node-nkmtz                           2/2       Running   0          9h
kube-system   calico-node-nxshj                           2/2       Running   0          9h
kube-system   coredns-99b9bb8bd-c6nvc                     1/1       Running   0          7h
kube-system   coredns-99b9bb8bd-swhmt                     1/1       Running   0          7h
kube-system   etcd-gab-kube-master01                      1/1       Running   1          1d
kube-system   kube-apiserver-gab-kube-master01            1/1       Running   5          1d
kube-system   kube-controller-manager-gab-kube-master01   1/1       Running   1          1d
kube-system   kube-proxy-fwcvf                            1/1       Running   0          9h
kube-system   kube-proxy-msxtb                            1/1       Running   0          9h
kube-system   kube-proxy-v88kj                            1/1       Running   0          9h
kube-system   kube-scheduler-gab-kube-master01            1/1       Running   1          1d
kube-system   tiller-deploy-64c9d747bd-wgt8m              1/1       Running   0          1h


 $ kubectl get ep --all-namespaces
NAMESPACE     NAME                      ENDPOINTS                                                  AGE
default       kubernetes                192.168.0.68:6443                                 1d
default       tiller-deploy             <none>                                                     9h
kube-system   calico-etcd               192.168.0.68:6666                                          1d
kube-system   calico-typha              <none>                                                     9h
kube-system   kube-controller-manager   <none>                                                     1d
kube-system   kube-dns                  192.168.0.2:53,192.168.0.3:53,192.168.0.2:53 + 1 more...   1d
kube-system   kube-scheduler            <none>                                                     1d
kube-system   tiller-deploy             192.168.2.5:44134                                          10h


 $ kubectl exec -it -n kube-system tiller-deploy-64c9d747bd-wgt8m sh
~ $ printenv
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT=443
CALICO_ETCD_PORT=tcp://10.96.232.136:6666
CALICO_ETCD_SERVICE_PORT=6666
CALICO_ETCD_PORT_6666_TCP_ADDR=10.96.232.136
KUBE_DNS_SERVICE_PORT_DNS_TCP=53
TILLER_DEPLOY_SERVICE_HOST=10.96.9.83
HOSTNAME=tiller-deploy-64c9d747bd-wgt8m
SHLVL=1
CALICO_ETCD_PORT_6666_TCP_PORT=6666
HOME=/tmp
CALICO_ETCD_PORT_6666_TCP_PROTO=tcp
KUBE_DNS_SERVICE_HOST=10.96.0.10
TILLER_DEPLOY_SERVICE_PORT=44134
TILLER_DEPLOY_PORT=tcp://10.96.9.83:44134
TILLER_DEPLOY_PORT_44134_TCP_ADDR=10.96.9.83
CALICO_ETCD_PORT_6666_TCP=tcp://10.96.232.136:6666
KUBE_DNS_PORT=udp://10.96.0.10:53
KUBE_DNS_SERVICE_PORT=53
TILLER_DEPLOY_PORT_44134_TCP_PORT=44134
TILLER_DEPLOY_PORT_44134_TCP_PROTO=tcp
TILLER_HISTORY_MAX=0
TILLER_NAMESPACE=kube-system
CALICO_TYPHA_SERVICE_PORT_CALICO_TYPHA=5473
CALICO_TYPHA_SERVICE_HOST=10.106.249.221
TERM=xterm
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
TILLER_DEPLOY_SERVICE_PORT_TILLER=44134
CALICO_TYPHA_PORT_5473_TCP_ADDR=10.106.249.221
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
KUBE_DNS_PORT_53_TCP_ADDR=10.96.0.10
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
CALICO_TYPHA_PORT_5473_TCP_PORT=5473
KUBE_DNS_PORT_53_UDP_ADDR=10.96.0.10
CALICO_TYPHA_PORT_5473_TCP_PROTO=tcp
CALICO_TYPHA_PORT=tcp://10.106.249.221:5473
CALICO_TYPHA_SERVICE_PORT=5473
TILLER_DEPLOY_PORT_44134_TCP=tcp://10.96.9.83:44134
KUBE_DNS_PORT_53_TCP_PORT=53
KUBE_DNS_PORT_53_TCP_PROTO=tcp
KUBE_DNS_PORT_53_UDP_PORT=53
KUBE_DNS_SERVICE_PORT_DNS=53
KUBE_DNS_PORT_53_UDP_PROTO=udp
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS=443
PWD=/
KUBERNETES_SERVICE_HOST=10.96.0.1
CALICO_TYPHA_PORT_5473_TCP=tcp://10.106.249.221:5473
CALICO_ETCD_SERVICE_HOST=10.96.232.136
KUBE_DNS_PORT_53_TCP=tcp://10.96.0.10:53
KUBE_DNS_PORT_53_UDP=udp://10.96.0.10:53
  • Что я пробовал:
    • Перезапуск всех узлов
    • Удаление Calico и переустановка
    • Обновление Calico
    • Установка Helm в пространстве имен «по умолчанию»
    • Удар головой в рабочий стол
1
задан 15 September 2018 в 21:12
1 ответ

Я смог решить эту проблему, повторно подключив кластер с другим CIDR (ранее использовался тот же CIDR, что и хост vm (192.168.0.0/16). Я использовал 172.16.0.0/16) и это сработало сразу.

2
ответ дан 3 December 2019 в 20:12

Теги

Похожие вопросы