Do you want to request a feature or report a bug?
What is the current behavior?
If the current behavior is a bug, please provide the steps to reproduce.
What is the expected behavior?
Please mention your node.js, mongoose and MongoDB version.
Hi,
I keep getting the following error. Any help would be greatly appreciated. This does not happen when I run the app outside of a container.
node v8.9.4,
[email protected],
[email protected]
Code to connect:
const options = {
native_parser: true,
poolSize: 5,
user: config.user,
pass: config.pass,
promiseLibrary: global.Promise,
autoIndex: false, // Don't build indexes
reconnectTries: 30, // Retry up to 30 times
reconnectInterval: 500, // Reconnect every 500ms
bufferMaxEntries: 0,
connectWithNoPrimary: true
};
mongoose.connect(getURL(config), options);
mongoose.connection.on('error', (err) => {
mediator.emit('db.error', err);
});
mongoose.connection.on('connected', () => {
mediator.emit('db.ready', mongoose);
});
I am running running node in a docker container.
(node:16) UnhandledPromiseRejectionWarning: MongoNetworkError: failed to connect to server [worker1:27017] on first connect [MongoNetworkError: connection 4 to worker1:27017 timed out]
at Pool.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/topologies/server.js:505:11)
at Pool.emit (events.js:182:13)
at Connection.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/connection/pool.js:329:12)
at Object.onceWrapper (events.js:273:13)
at Connection.emit (events.js:182:13)
at Socket.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/connection/connection.js:256:10)
at Object.onceWrapper (events.js:273:13)
at Socket.emit (events.js:182:13)
at Socket._onTimeout (net.js:447:8)
at ontimeout (timers.js:427:11)
at tryOnTimeout (timers.js:289:5)
at listOnTimeout (timers.js:252:5)
at Timer.processTimers (timers.js:212:10)
(node:16) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)
(node:16) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
@RayKahn can you include your connection string with any user names and passwords redacted? Can you also verify that there is a replica host up and running at the specified port in your connection string?
Here's the rs.status() output:
rs1:PRIMARY> rs.status()
{
"set" : "rs1",
"date" : ISODate("2018-05-12T16:29:24.865Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "manager1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 298,
"optime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-05-12T16:29:16Z"),
"electionTime" : Timestamp(1526142275, 2),
"electionDate" : ISODate("2018-05-12T16:24:35Z"),
"configVersion" : 3,
"self" : true
},
{
"_id" : 1,
"name" : "worker1:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 262,
"optime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-05-12T16:29:16Z"),
"optimeDurableDate" : ISODate("2018-05-12T16:29:16Z"),
"lastHeartbeat" : ISODate("2018-05-12T16:29:23.979Z"),
"lastHeartbeatRecv" : ISODate("2018-05-12T16:29:22.993Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "worker2:27017",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "worker2:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 257,
"optime" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1526142556, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-05-12T16:29:16Z"),
"optimeDurableDate" : ISODate("2018-05-12T16:29:16Z"),
"lastHeartbeat" : ISODate("2018-05-12T16:29:23.979Z"),
"lastHeartbeatRecv" : ISODate("2018-05-12T16:29:24.564Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "manager1:27017",
"configVersion" : 3
}
],
"ok" : 1,
"operationTime" : Timestamp(1526142556, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1526142556, 1),
"signature" : {
"hash" : BinData(0,"u3s15/PhyIDWC8f3gMF4VP6VNz0="),
"keyId" : NumberLong("6554731164463005697")
}
}
}
Connection string:
mongodb://xxxx:[email protected]:27017,192.168.99.101:27017,192.168.99.102:27017/medmart_db?replicaSet=rs1&authSource=admin
I have also noticed that after running the application, and getting the error, primary is no longer the Primary.
docker exec -it mongoNode1 bash -c 'mongo -u $MONGO_USER_ADMIN -p $MONGO_PASS_ADMIN --authenticationDatabase "admin"'
MongoDB shell version v3.6.4
connecting to: mongodb://127.0.0.1:27017
MongoDB server version: 3.6.4
rs1:PRIMARY>
rs1:PRIMARY> rs.status()
{
"operationTime" : Timestamp(1526143426, 1),
"ok" : 0,
"errmsg" : "not authorized on admin to execute command { replSetGetStatus: 1.0, $clusterTime: { clusterTime: Timestamp(1526143416, 1), signature: { hash: BinData(0, 18E90C697611E4CA7F15B99829C3F5FA84989B5A), keyId: 6554731164463005697 } }, $db: \"admin\" }",
"code" : 13,
"codeName" : "Unauthorized",
"$clusterTime" : {
"clusterTime" : Timestamp(1526143426, 1),
"signature" : {
"hash" : BinData(0,"GW6Hj6qMxgSVhUPDIhsxDfB62qU="),
"keyId" : NumberLong("6554731164463005697")
}
}
}
rs1:PRIMARY>
@RayKahn you might checkout this comment on #5819 and see if using the hostnames from your replicaSet instead of the IPs in your connection string helps.
I changed to hostname already to no avail... It does not make any difference.
@lineus Any updates? Anything else that could resolve my issue? I am stuck and can't move forward.
@RayKahn
I changed to hostname already to no avail... It does not make any difference.
Can you elaborate on what you changed the connection string to? did you have worker1, manager1, and worker2 in your /etc/hosts file?
Also, are there any connection errors in the server logs for worker1? Or any other logs that would indicate the reason for the primary switch?
@lineus
I changed the connection url from ip to manager1, worker1, worker2. The error did not go away. About 30 mins ago I decided to add to each docker-machine the following:
docker-machine ssh manager1 (and worker1 and worker2)
sudo vi /etc/hosts
# add the following to the hosts file on each virtualBox
192.168.99.100 manager1
192.168.99.101 worker1
192.168.99.102 worker2
That seems to get rid of the original error. But now I am getting the following error, which is unrelated to the original one. Any idea why I am getting this error?
Error: ERROR::providers-service::model::ProviderModel::getProviderById: MongoError: no connection available for operation and number of stored operation > 0
at cb (/home/nupp/app/src/model/ProviderModel.js:181:13)
at /home/nupp/app/node_modules/mongoose/lib/model.js:4161:16
at Immediate.Query.base.findOne.call (/home/nupp/app/node_modules/mongoose/lib/query.js:1529:14)
at Immediate.<anonymous> (/home/nupp/app/node_modules/mquery/lib/utils.js:119:16)
at runCallback (timers.js:696:18)
at tryOnImmediate (timers.js:667:5)
at processImmediate (timers.js:649:5)
Here's my connection code:
mediator.once('boot.ready', () => {
const options = {
native_parser: true,
poolSize: 5,
user: config.user,
pass: config.pass,
promiseLibrary: global.Promise,
autoIndex: false, // Don't build indexes
reconnectTries: 30, // Retry up to 30 times
reconnectInterval: 500, // Reconnect every 500ms
bufferMaxEntries: 0,
connectWithNoPrimary: true ,
//readPreference: 'ReadPreference.SECONDARY_PREFERRED',
};
mongoose.connect(getURL(config), options);
mongoose.connection.on('error', (err) => {
mediator.emit('db.error', err);
});
mongoose.connection.on('connected', () => {
mediator.emit('db.ready', mongoose);
});
Mongoose object print out is:
Mongoose {
connections:
[ NativeConnection {
base: [Circular],
collections: {},
models: {},
config: [Object],
replica: false,
options: null,
otherDbs: [],
relatedDbs: {},
states: [Object],
_readyState: 1,
_closeCalled: false,
_hasOpened: true,
_listening: false,
_connectionOptions: [Object],
client: [MongoClient],
name: 'medmart_db',
'$initialConnection': [Promise],
_events: [Object],
_eventsCount: 2,
db: [Db] } ],
models: {},
modelSchemas: {},
options: { pluralization: true },
_pluralize: [Function: pluralize],
plugins:
[ [ [Function], [Object] ],
[ [Function], [Object] ],
[ [Function], [Object] ],
[ [Function], [Object] ] ] }
@lineus I am using mongoose 5.1.0
@RayKahn the bufferMaxEntries: 0
seems off to me. If i'm understanding the mongodb driver docs for connection setttings here correctly, that will tell the driver to give up right away.
@lineus When I comment out that line, I am back to square 1. That is the original error is back:
(node:16) UnhandledPromiseRejectionWarning: MongoNetworkError: failed to connect to server [worker1:27017] on first connect [MongoNetworkError: connect ECONNREFUSED 104.239.207.44:27017]
at Pool.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/topologies/server.js:505:11)
at Pool.emit (events.js:182:13)
at Connection.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/connection/pool.js:329:12)
at Object.onceWrapper (events.js:273:13)
at Connection.emit (events.js:182:13)
at Socket.<anonymous> (/home/nupp/app/node_modules/mongoose/node_modules/mongodb-core/lib/connection/connection.js:245:50)
at Object.onceWrapper (events.js:273:13)
at Socket.emit (events.js:182:13)
at emitErrorNT (internal/streams/destroy.js:82:8)
at emitErrorAndCloseNT (internal/streams/destroy.js:50:3)
at process._tickCallback (internal/process/next_tick.js:63:19)
(node:16) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)
(node:16) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
```
The following config however doesn't throw any errors but mongoDB never responds.
const options = {
native_parser: true,
poolSize: 5,
user: config.user,
pass: config.pass,
promiseLibrary: global.Promise,
autoIndex: false, // Don't build indexes
reconnectTries: 30, // Retry up to 30 times
reconnectInterval: 500, // Reconnect every 500ms
//bufferMaxEntries: 0,
connectWithNoPrimary: true ,
//readPreference: 'ReadPreference.SECONDARY_PREFERRED',
};
The bizarre thing is that if I run the code, including bufferMaxEntries: 0 in a none container mode, i.e node myApp, there's no error, and I get the result back from the db:
const options = {
native_parser: true,
poolSize: 5,
user: config.user,
pass: config.pass,
promiseLibrary: global.Promise,
autoIndex: false, // Don't build indexes
reconnectTries: 30, // Retry up to 30 times
reconnectInterval: 500, // Reconnect every 500ms
bufferMaxEntries: 0,
connectWithNoPrimary: true ,
readPreference: 'ReadPreference.SECONDARY_PREFERRED',
};
Result from 'http://localhost:3000/provider/2711544172'
{
"about": {
"aboutimages": []
},
"factoids": [],
"provider_id": 2711544172,
"providername": "DD Dental",
"type_id": 1,
"city": "Newport Beach",
"state": "California",
"country": "USA",
"showrating": false
}
```
@lineus I noticed that when I run the app in non-container mode the following:
'$initialConnection': [Object],
vs when in container mode:
'$initialConnection': [Promise],
Is this significant?
I was able to resolve the issue finally. The problem was the options I was passing to mongoose.connect. I don't set any options any more and the problem went away.
mediator.once('boot.ready', () => {
const options = {};
mongoose.connect(getURL(config), options);
mongoose.connection.on('error', (err) => {
mediator.emit('db.error', err);
});
mongoose.connection.on('connected', () => {
mediator.emit('db.ready', mongoose);
});
});
Hope this will help someone else.
I was able to solve this issue, Open hosts file and add the following points
sudo nano /etc/hosts
##
# Host Database
#
# localhost is used to configure the loopback interface
# when the system is booting. Do not change this entry.
##
127.0.0.1 localhost
255.255.255.255 broadcasthost
::1 localhost
Most helpful comment
I was able to resolve the issue finally. The problem was the options I was passing to mongoose.connect. I don't set any options any more and the problem went away.
Hope this will help someone else.