I have deployed OpenShift 3.10 on 3 VMs (Master(etcd in in master) node, Infra node, Compute node).
I am seeing following issue with my integrated docker registry "docker-registry.default.svc:5000":
[root@master ~]# docker push docker-registry.default.svc:5000/rkproject/rknodejs:v1.0
The push refers to a repository [docker-registry.default.svc:5000/rkproject/rknodejs]
c43034b1a2f0: Preparing
3e62d50a52ae: Preparing
ea018628f99e: Preparing
2793dc0607dd: Preparing
74800c25aa8c: Preparing
ba504a540674: Waiting
81101ce649d5: Waiting
daf45b2cad9a: Waiting
8c466bf4ca6f: Waiting
Error: Status 404 trying to push repository rkproject/rknodejs: "<HTML><HEAD>\n<TITLE>Network Error</TITLE>\n</HEAD>\n<BODY>\n<FONT face=\"Helvetica\">\n<big><strong></strong></big><BR>\n</FONT>\n<blockquote>\n<TABLE border=0 cellPadding=1 width=\"80%\">\n<TR><TD>\n<FONT face=\"Helvetica\">\n<big>Network Error (dns_unresolved_hostname)</big>\n<BR>\n<BR>\n</FONT>\n</TD></TR>\n<TR><TD>\n<FONT face=\"Helvetica\">\nYour requested host \"docker-registry.default.svc\" could not be resolved by DNS.\n</FONT>\n</TD></TR>\n<TR><TD>\n<FONT face=\"Helvetica\">\n\n</FONT>\n</TD></TR>\n<TR><TD>\n<FONT face=\"Helvetica\" SIZE=2>\n<BR>\nFor assistance, contact your network support team.\n</FONT>\n</TD></TR>\n</TABLE>\n</blockquote>\n</FONT>\n</BODY></HTML>\n"
[root@master ~]#
nslookup resolving without any issues.
[root@master ~]# nslookup docker-registry.default.svc
Server: 15.115.14.21
Address: 15.115.14.21#53
Name: docker-registry.default.svc.cluster.local
Address: 172.30.221.136
[root@master ~]#
[root@master ~]# curl -v https://docker-registry.default.svc:5000 --insecure
* About to connect() to docker-registry.default.svc port 5000 (#0)
* Trying 172.30.221.136...
* Connected to docker-registry.default.svc (172.30.221.136) port 5000 (#0)
* Initializing NSS with certpath: sql:/etc/pki/nssdb
* skipping SSL peer certificate verification
* SSL connection using TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
* Server certificate:
* subject: CN=172.30.221.136
* start date: Sep 24 05:26:16 2018 GMT
* expire date: Sep 23 05:26:17 2020 GMT
* common name: 172.30.221.136
* issuer: CN=openshift-signer@1537759507
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: docker-registry.default.svc:5000
> Accept: */*
>
< HTTP/1.1 200 OK
< Cache-Control: no-cache
< Date: Wed, 03 Oct 2018 09:06:29 GMT
< Content-Length: 0
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host docker-registry.default.svc left intact
[root@master ~]#
[root@master ~]# oc version
oc v3.10.45
kubernetes v1.10.0+b81c8f8
features: Basic-Auth GSSAPI Kerberos SPNEGO
Server https://master.rkdomain.test:8443
openshift v3.10.45
kubernetes v1.10.0+b81c8f8
[root@master ~]#
docker build -t docker-registry.default.svc:5000/rknodejs .docker push docker-registry.default.svc:5000/rkproject/rknodejs:v1.0In 3rd Step above it fails with error "Your requested host "docker-registry.default.svc" could not be resolved by DNS."
docker push command should work without any issue.
Content of /etc/sysconfig/docker
OPTIONS=' --selinux-enabled --insecure-registry docker-registry.default.svc:5000 --signature-verification=False'
if [ -z "${DOCKER_CERT_PATH}" ]; then
DOCKER_CERT_PATH=/etc/docker
fi
ADD_REGISTRY='--add-registry registry.access.redhat.com'
Content of /etc/containers/registries.conf
[registries.search]
registries = ['registry.access.redhat.com']
[registries.insecure]
registries = ['docker-registry.default.svc']
[registries.block]
registries = []
Content of /etc/docker/daemon.json
[root@master test-docker]# cat /etc/docker/daemon.json
{
"dns": ["15.115.14.21"]
}
[root@master test-docker]#
oc whoami
[root@master test-docker]# oc whoami
demo
[root@master test-docker]#
oc project
[root@master test-docker]# oc project
Using project "rkproject" on server "https://master.rkdomain.test:8443".
[root@master test-docker]#
oc get all -o json
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"openshift.io/deployment-config.latest-version": "1",
"openshift.io/deployment-config.name": "rkproject",
"openshift.io/deployment.name": "rkproject-1",
"openshift.io/generated-by": "OpenShiftNewApp",
"openshift.io/scc": "restricted"
},
"creationTimestamp": "2018-09-29T07:36:36Z",
"generateName": "rkproject-1-",
"labels": {
"app": "rkproject",
"deployment": "rkproject-1",
"deploymentconfig": "rkproject"
},
"name": "rkproject-1-j86p8",
"namespace": "rkproject",
"ownerReferences": [
{
"apiVersion": "v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "ReplicationController",
"name": "rkproject-1",
"uid": "639b6a8d-c3ba-11e8-b5a7-005056a57c87"
}
],
"resourceVersion": "881014",
"selfLink": "/api/v1/namespaces/rkproject/pods/rkproject-1-j86p8",
"uid": "6562bf79-c3ba-11e8-b5a7-005056a57c87"
},
"spec": {
"containers": [
{
"image": "quay.io/raj4linux/rkproject:v1.0",
"imagePullPolicy": "IfNotPresent",
"name": "rkproject",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"securityContext": {
"capabilities": {
"drop": [
"KILL",
"MKNOD",
"SETGID",
"SETUID"
]
},
"runAsUser": 1000170000
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-6p7p8",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"imagePullSecrets": [
{
"name": "default-dockercfg-6j7wq"
}
],
"nodeName": "nodeone.rkdomain.test",
"nodeSelector": {
"node-role.kubernetes.io/compute": "true"
},
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {
"fsGroup": 1000170000,
"seLinuxOptions": {
"level": "s0:c13,c7"
}
},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"volumes": [
{
"name": "default-token-6p7p8",
"secret": {
"defaultMode": 420,
"secretName": "default-token-6p7p8"
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2018-09-29T07:36:36Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2018-09-29T07:36:38Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2018-09-29T07:36:36Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "docker://7b8a5684f93d63378f4172e2c8af2b522587212aaf1ce43e6901300e81c45c7c",
"image": "quay.io/raj4linux/rkproject:v1.0",
"imageID": "docker-pullable://quay.io/raj4linux/rkproject@sha256:101c220b6cb8977e4912d8bf69e4b5cdb345f285ce79520fdc7931170eee191d",
"lastState": {},
"name": "rkproject",
"ready": true,
"restartCount": 0,
"state": {
"running": {
"startedAt": "2018-09-29T07:36:38Z"
}
}
}
],
"hostIP": "192.168.151.7",
"phase": "Running",
"podIP": "10.130.0.100",
"qosClass": "BestEffort",
"startTime": "2018-09-29T07:36:36Z"
}
},
{
"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {
"annotations": {
"openshift.io/deployer-pod.completed-at": "2018-09-29 07:36:38 +0000 UTC",
"openshift.io/deployer-pod.created-at": "2018-09-29 07:36:33 +0000 UTC",
"openshift.io/deployer-pod.name": "rkproject-1-deploy",
"openshift.io/deployment-config.latest-version": "1",
"openshift.io/deployment-config.name": "rkproject",
"openshift.io/deployment.phase": "Complete",
"openshift.io/deployment.replicas": "1",
"openshift.io/deployment.status-reason": "config change",
"openshift.io/encoded-deployment-config": "{\"kind\":\"DeploymentConfig\",\"apiVersion\":\"v1\",\"metadata\":{\"name\":\"rkproject\",\"namespace\":\"rkproject\",\"selfLink\":\"/apis/apps.openshift.io/v1/namespaces/rkproject/deploymentconfigs/rkproject\",\"uid\":\"63961ad6-c3ba-11e8-b5a7-005056a57c87\",\"resourceVersion\":\"880971\",\"generation\":1,\"creationTimestamp\":\"2018-09-29T07:36:33Z\",\"labels\":{\"app\":\"rkproject\"},\"annotations\":{\"openshift.io/generated-by\":\"OpenShiftNewApp\"}},\"spec\":{\"strategy\":{\"type\":\"Rolling\",\"rollingParams\":{\"updatePeriodSeconds\":1,\"intervalSeconds\":1,\"timeoutSeconds\":600,\"maxUnavailable\":\"25%\",\"maxSurge\":\"25%\"},\"resources\":{},\"activeDeadlineSeconds\":21600},\"triggers\":[{\"type\":\"ConfigChange\"}],\"replicas\":1,\"revisionHistoryLimit\":10,\"test\":false,\"selector\":{\"app\":\"rkproject\",\"deploymentconfig\":\"rkproject\"},\"template\":{\"metadata\":{\"creationTimestamp\":null,\"labels\":{\"app\":\"rkproject\",\"deploymentconfig\":\"rkproject\"},\"annotations\":{\"openshift.io/generated-by\":\"OpenShiftNewApp\"}},\"spec\":{\"containers\":[{\"name\":\"rkproject\",\"image\":\"quay.io/raj4linux/rkproject:v1.0\",\"ports\":[{\"containerPort\":8080,\"protocol\":\"TCP\"}],\"resources\":{},\"terminationMessagePath\":\"/dev/termination-log\",\"terminationMessagePolicy\":\"File\",\"imagePullPolicy\":\"IfNotPresent\"}],\"restartPolicy\":\"Always\",\"terminationGracePeriodSeconds\":30,\"dnsPolicy\":\"ClusterFirst\",\"securityContext\":{},\"schedulerName\":\"default-scheduler\"}}},\"status\":{\"latestVersion\":1,\"observedGeneration\":0,\"replicas\":0,\"updatedReplicas\":0,\"availableReplicas\":0,\"unavailableReplicas\":0,\"details\":{\"message\":\"config change\",\"causes\":[{\"type\":\"ConfigChange\"}]}}}\n"
},
"creationTimestamp": "2018-09-29T07:36:33Z",
"generation": 2,
"labels": {
"app": "rkproject",
"openshift.io/deployment-config.name": "rkproject"
},
"name": "rkproject-1",
"namespace": "rkproject",
"ownerReferences": [
{
"apiVersion": "apps.openshift.io/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "DeploymentConfig",
"name": "rkproject",
"uid": "63961ad6-c3ba-11e8-b5a7-005056a57c87"
}
],
"resourceVersion": "881021",
"selfLink": "/api/v1/namespaces/rkproject/replicationcontrollers/rkproject-1",
"uid": "639b6a8d-c3ba-11e8-b5a7-005056a57c87"
},
"spec": {
"replicas": 1,
"selector": {
"app": "rkproject",
"deployment": "rkproject-1",
"deploymentconfig": "rkproject"
},
"template": {
"metadata": {
"annotations": {
"openshift.io/deployment-config.latest-version": "1",
"openshift.io/deployment-config.name": "rkproject",
"openshift.io/deployment.name": "rkproject-1",
"openshift.io/generated-by": "OpenShiftNewApp"
},
"creationTimestamp": null,
"labels": {
"app": "rkproject",
"deployment": "rkproject-1",
"deploymentconfig": "rkproject"
}
},
"spec": {
"containers": [
{
"image": "quay.io/raj4linux/rkproject:v1.0",
"imagePullPolicy": "IfNotPresent",
"name": "rkproject",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File"
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"terminationGracePeriodSeconds": 30
}
}
},
"status": {
"availableReplicas": 1,
"fullyLabeledReplicas": 1,
"observedGeneration": 2,
"readyReplicas": 1,
"replicas": 1
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
},
"creationTimestamp": "2018-09-29T07:36:33Z",
"labels": {
"app": "rkproject"
},
"name": "rkproject",
"namespace": "rkproject",
"resourceVersion": "880970",
"selfLink": "/api/v1/namespaces/rkproject/services/rkproject",
"uid": "639999b3-c3ba-11e8-b5a7-005056a57c87"
},
"spec": {
"clusterIP": "172.30.136.180",
"ports": [
{
"name": "8080-tcp",
"port": 8080,
"protocol": "TCP",
"targetPort": 8080
}
],
"selector": {
"app": "rkproject",
"deploymentconfig": "rkproject"
},
"sessionAffinity": "None",
"type": "ClusterIP"
},
"status": {
"loadBalancer": {}
}
},
{
"apiVersion": "apps.openshift.io/v1",
"kind": "DeploymentConfig",
"metadata": {
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
},
"creationTimestamp": "2018-09-29T07:36:33Z",
"generation": 1,
"labels": {
"app": "rkproject"
},
"name": "rkproject",
"namespace": "rkproject",
"resourceVersion": "881022",
"selfLink": "/apis/apps.openshift.io/v1/namespaces/rkproject/deploymentconfigs/rkproject",
"uid": "63961ad6-c3ba-11e8-b5a7-005056a57c87"
},
"spec": {
"replicas": 1,
"revisionHistoryLimit": 10,
"selector": {
"app": "rkproject",
"deploymentconfig": "rkproject"
},
"strategy": {
"activeDeadlineSeconds": 21600,
"resources": {},
"rollingParams": {
"intervalSeconds": 1,
"maxSurge": "25%",
"maxUnavailable": "25%",
"timeoutSeconds": 600,
"updatePeriodSeconds": 1
},
"type": "Rolling"
},
"template": {
"metadata": {
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
},
"creationTimestamp": null,
"labels": {
"app": "rkproject",
"deploymentconfig": "rkproject"
}
},
"spec": {
"containers": [
{
"image": "quay.io/raj4linux/rkproject:v1.0",
"imagePullPolicy": "IfNotPresent",
"name": "rkproject",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File"
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"terminationGracePeriodSeconds": 30
}
},
"test": false,
"triggers": [
{
"type": "ConfigChange"
}
]
},
"status": {
"availableReplicas": 1,
"conditions": [
{
"lastTransitionTime": "2018-09-29T07:36:39Z",
"lastUpdateTime": "2018-09-29T07:36:39Z",
"message": "Deployment config has minimum availability.",
"status": "True",
"type": "Available"
},
{
"lastTransitionTime": "2018-09-29T07:36:40Z",
"lastUpdateTime": "2018-09-29T07:36:40Z",
"message": "replication controller \"rkproject-1\" successfully rolled out",
"reason": "NewReplicationControllerAvailable",
"status": "True",
"type": "Progressing"
}
],
"details": {
"causes": [
{
"type": "ConfigChange"
}
],
"message": "config change"
},
"latestVersion": 1,
"observedGeneration": 1,
"readyReplicas": 1,
"replicas": 1,
"unavailableReplicas": 0,
"updatedReplicas": 1
}
},
{
"apiVersion": "image.openshift.io/v1",
"kind": "ImageStream",
"metadata": {
"annotations": {
"openshift.io/image.dockerRepositoryCheck": "2018-09-26T15:15:01Z"
},
"creationTimestamp": "2018-09-26T15:15:01Z",
"generation": 2,
"name": "nodejs",
"namespace": "rkproject",
"resourceVersion": "396065",
"selfLink": "/apis/image.openshift.io/v1/namespaces/rkproject/imagestreams/nodejs",
"uid": "f05a2366-c19e-11e8-8cc1-005056a57c87"
},
"spec": {
"lookupPolicy": {
"local": false
},
"tags": [
{
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "docker-registry.default.svc:5000/openshift/nodejs:latest"
},
"generation": 2,
"importPolicy": {},
"name": "latest",
"referencePolicy": {
"type": "Source"
}
}
]
},
"status": {
"dockerImageRepository": "docker-registry.default.svc:5000/rkproject/nodejs",
"tags": [
{
"conditions": [
{
"generation": 2,
"lastTransitionTime": "2018-09-26T15:15:01Z",
"message": "Internal error occurred: Get https://docker-registry.default.svc:5000/v2/: x509: certificate signed by unknown authority",
"reason": "InternalError",
"status": "False",
"type": "ImportSuccess"
}
],
"items": null,
"tag": "latest"
}
]
}
},
{
"apiVersion": "route.openshift.io/v1",
"kind": "Route",
"metadata": {
"creationTimestamp": "2018-10-01T06:58:34Z",
"labels": {
"app": "rkproject"
},
"name": "rkproject",
"namespace": "rkproject",
"resourceVersion": "1126130",
"selfLink": "/apis/route.openshift.io/v1/namespaces/rkproject/routes/rkproject",
"uid": "69b60dd2-c547-11e8-896f-005056a57c87"
},
"spec": {
"host": "rkproject.cloudapps.rkdomain.test",
"path": "/",
"port": {
"targetPort": "8080-tcp"
},
"to": {
"kind": "Service",
"name": "rkproject",
"weight": 100
},
"wildcardPolicy": "None"
},
"status": {
"ingress": [
{
"conditions": [
{
"lastTransitionTime": "2018-10-01T07:04:24Z",
"status": "True",
"type": "Admitted"
}
],
"host": "rkproject.cloudapps.rkdomain.test",
"routerName": "router",
"wildcardPolicy": "None"
}
]
}
}
],
"kind": "List",
"metadata": {
"resourceVersion": "",
"selfLink": ""
}
}
My docker registry is running on infra node, and it looks like this:
Liveness: http-get _https://:5000_/healthz
Is this no hostname is expected ?
[root@infranode ~]# oc describe pod docker-registry-1-2grxs
Name: docker-registry-1-2grxs
Namespace: default
Node: infranode.rkdomain.test/192.168.151.8
Start Time: Sat, 29 Sep 2018 02:26:16 -0400
Labels: deployment=docker-registry-1
deploymentconfig=docker-registry
docker-registry=default
Annotations: openshift.io/deployment-config.latest-version=1
openshift.io/deployment-config.name=docker-registry
openshift.io/deployment.name=docker-registry-1
openshift.io/scc=restricted
Status: Running
IP: 10.129.0.122
Controlled By: ReplicationController/docker-registry-1
Containers:
registry:
Container ID: docker://bf33f2b7283f79352a8f7fceb22984016c7e77ed5b64fb3d4ad16de7f9196629
Image: registry.access.redhat.com/openshift3/ose-docker-registry:v3.10.45
Image ID: docker-pullable://registry.access.redhat.com/openshift3/ose-docker-registry@sha256:d8b8c02d6c6d338647f76d9a8093ad6250749b6336759681dd22dc63082b33c3
Port: 5000/TCP
Host Port: 0/TCP
State: Running
Started: Mon, 01 Oct 2018 07:08:40 -0400
Last State: Terminated
Reason: Error
Exit Code: 255
Started: Sat, 29 Sep 2018 02:26:18 -0400
Finished: Mon, 01 Oct 2018 07:07:44 -0400
Ready: True
Restart Count: 1
Requests:
cpu: 100m
memory: 256Mi
Liveness: http-get **https://:5000/healthz** delay=10s timeout=5s period=10s #success=1 #failure=3
Readiness: **http-get https://:5000/healthz** delay=0s timeout=5s period=10s #success=1 #failure=3
Environment:
REGISTRY_HTTP_ADDR: :5000
REGISTRY_HTTP_NET: tcp
REGISTRY_HTTP_SECRET: BYefHhNvBPg1W6CcxwFDrSSkOna6I6q4qJTbx0hTjr0=
REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA: false
REGISTRY_OPENSHIFT_SERVER_ADDR: docker-registry.default.svc:5000
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
Mounts:
/etc/secrets from registry-certificates (rw)
/registry from registry-storage (rw)
/var/run/secrets/kubernetes.io/serviceaccount from registry-token-vjj5b (ro)
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
Volumes:
registry-storage:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
registry-certificates:
Type: Secret (a volume populated by a Secret)
SecretName: registry-certificates
Optional: false
registry-token-vjj5b:
Type: Secret (a volume populated by a Secret)
SecretName: registry-token-vjj5b
Optional: false
QoS Class: Burstable
Node-Selectors: node-role.kubernetes.io/infra=true
Tolerations: node.kubernetes.io/memory-pressure:NoSchedule
Events: <none>
[root@infranode ~]#
I managed to resolved it myself. I am composing solution for those who are going to face the same problem in future (like me):
My whole setup is behind the PROXY. To access internet I rely on this PROXY.
nslookup, curl commands resolves all host names referring _/etc/resolve.conf_ and use proxy settings from _/etc/environment_ file to communicate to the target server.
But, docker uses proxy from _/etc/systemd/system/docker.service.d/http-proxy.conf_ and to my surprise docker used proxy to connect with DNS as well. So docker was not able to resolve my _docker-regisrty_ that was inside the cluster and through proxy was not getting resolved. So I was getting this error always for docker push docker-registry.default.svc:5000/rkproject/rknodejs:v1.0:
Your requested host \"docker-registry.default.svc\" could not be resolved by DNS.
Here is the link from docker: HTTP/HTTPS proxy
Here is how one's _/etc/systemd/system/docker.service.d/http-proxy.conf_ content should looks like:
[root@master ~]# cat /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://<your_proxy_server_ip_or_hostname>:<your_proxy_server_port>" "NO_PROXY=docker-registry.default.svc"
[root@master ~]#
And _/etc/docker/daemon.json_ is like this (that is not environment variable is set):
[root@master ~]# cat /etc/docker/daemon.json
{}
[root@master ~]#
Above approach has worked for me to access registry servers from internet and from internal network.
Hope the same work for you.
I had also similar kind of issue.
Thanks @raj4linux. Works for me too.
Just follow that, the only effective approach
As ROOT let resolve.conf changeable.
chattr -ai /etc/resolv.conf
Edit it
vi /etc/resolv.conf聽
Add those on search entry
search cluster.local default.svc.cluster.local svc.cluster.local cluster.local default.svc
systemctl restart NetworkManager
Most helpful comment
I managed to resolved it myself. I am composing solution for those who are going to face the same problem in future (like me):
My whole setup is behind the PROXY. To access internet I rely on this PROXY.
nslookup, curl commands resolves all host names referring _/etc/resolve.conf_ and use proxy settings from _/etc/environment_ file to communicate to the target server.
But, docker uses proxy from _/etc/systemd/system/docker.service.d/http-proxy.conf_ and to my surprise docker used proxy to connect with DNS as well. So docker was not able to resolve my _docker-regisrty_ that was inside the cluster and through proxy was not getting resolved. So I was getting this error always for docker push docker-registry.default.svc:5000/rkproject/rknodejs:v1.0:
Your requested host \"docker-registry.default.svc\" could not be resolved by DNS.Here is the link from docker: HTTP/HTTPS proxy
Here is how one's _/etc/systemd/system/docker.service.d/http-proxy.conf_ content should looks like:
And _/etc/docker/daemon.json_ is like this (that is not environment variable is set):
Above approach has worked for me to access registry servers from internet and from internal network.
Hope the same work for you.