Replica set primary node:
rs.conf()
customer-replica-set:PRIMARY> rs.conf()
{
â_idâ : âcustomer-replica-setâ,
âversionâ : 2,
âprotocolVersionâ : NumberLong(1),
âwriteConcernMajorityJournalDefaultâ : true,
âmembersâ : [
{
â_idâ : 0,
âhostâ : âcustomer-replica-set-0.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âarbiterOnlyâ : false,
âbuildIndexesâ : true,
âhiddenâ : false,
âpriorityâ : 1,
âtagsâ : {
},
"horizons" : {
"customer-prod-db" : "ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671"
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "customer-replica-set-1.customer-replica-set-svc.mongodb.svc.cluster.local:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"horizons" : {
"customer-prod-db" : "ec2-3-17-154-122.us-east-2.compute.amazonaws.com:32595"
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "customer-replica-set-2.customer-replica-set-svc.mongodb.svc.cluster.local:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"horizons" : {
"customer-prod-db" : "ec2-18-218-2-179.us-east-2.compute.amazonaws.com:30432"
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("5f388cc3900792e0998729e1")
}
}
rs.status()
rs.status()
{
âsetâ : âcustomer-replica-setâ,
âdateâ : ISODate(â2020-08-16T13:51:01.340Zâ),
âmyStateâ : 1,
âtermâ : NumberLong(3),
âsyncingToâ : ââ,
âsyncSourceHostâ : ââ,
âsyncSourceIdâ : -1,
âheartbeatIntervalMillisâ : NumberLong(2000),
âmajorityVoteCountâ : 2,
âwriteMajorityCountâ : 2,
âoptimesâ : {
âlastCommittedOpTimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âlastCommittedWallTimeâ : ISODate(â2020-08-16T13:50:59.046Zâ),
âreadConcernMajorityOpTimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âreadConcernMajorityWallTimeâ : ISODate(â2020-08-16T13:50:59.046Zâ),
âappliedOpTimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âdurableOpTimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âlastAppliedWallTimeâ : ISODate(â2020-08-16T13:50:59.046Zâ),
âlastDurableWallTimeâ : ISODate(â2020-08-16T13:50:59.046Zâ)
},
âlastStableRecoveryTimestampâ : Timestamp(1597585840, 8),
âlastStableCheckpointTimestampâ : Timestamp(1597585840, 8),
âelectionCandidateMetricsâ : {
âlastElectionReasonâ : âstepUpRequestSkipDryRunâ,
âlastElectionDateâ : ISODate(â2020-08-16T01:45:37.715Zâ),
âtermAtElectionâ : NumberLong(3),
âlastCommittedOpTimeAtElectionâ : {
âtsâ : Timestamp(1597542328, 1),
âtâ : NumberLong(2)
},
âlastSeenOpTimeAtElectionâ : {
âtsâ : Timestamp(1597542328, 1),
âtâ : NumberLong(2)
},
ânumVotesNeededâ : 2,
âpriorityAtElectionâ : 1,
âelectionTimeoutMillisâ : NumberLong(10000),
âpriorPrimaryMemberIdâ : 1,
ânumCatchUpOpsâ : NumberLong(27017),
ânewTermStartDateâ : ISODate(â2020-08-16T01:45:37.761Zâ),
âwMajorityWriteAvailabilityDateâ : ISODate(â2020-08-16T01:45:38.278Zâ)
},
âmembersâ : [
{
â_idâ : 0,
ânameâ : âcustomer-replica-set-0.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âipâ : â192.168.22.100â,
âhealthâ : 1,
âstateâ : 1,
âstateStrâ : âPRIMARYâ,
âuptimeâ : 43532,
âoptimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âoptimeDateâ : ISODate(â2020-08-16T13:50:59Zâ),
âsyncingToâ : ââ,
âsyncSourceHostâ : ââ,
âsyncSourceIdâ : -1,
âinfoMessageâ : ââ,
âelectionTimeâ : Timestamp(1597542337, 1),
âelectionDateâ : ISODate(â2020-08-16T01:45:37Zâ),
âconfigVersionâ : 2,
âselfâ : true,
âlastHeartbeatMessageâ : ââ
},
{
â_idâ : 1,
ânameâ : âcustomer-replica-set-1.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âipâ : â192.168.80.99â,
âhealthâ : 1,
âstateâ : 2,
âstateStrâ : âSECONDARYâ,
âuptimeâ : 43519,
âoptimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âoptimeDurableâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âoptimeDateâ : ISODate(â2020-08-16T13:50:59Zâ),
âoptimeDurableDateâ : ISODate(â2020-08-16T13:50:59Zâ),
âlastHeartbeatâ : ISODate(â2020-08-16T13:51:00.605Zâ),
âlastHeartbeatRecvâ : ISODate(â2020-08-16T13:51:00.876Zâ),
âpingMsâ : NumberLong(0),
âlastHeartbeatMessageâ : ââ,
âsyncingToâ : âcustomer-replica-set-2.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âsyncSourceHostâ : âcustomer-replica-set-2.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âsyncSourceIdâ : 2,
âinfoMessageâ : ââ,
âconfigVersionâ : 2
},
{
â_idâ : 2,
ânameâ : âcustomer-replica-set-2.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âipâ : â192.168.33.103â,
âhealthâ : 1,
âstateâ : 2,
âstateStrâ : âSECONDARYâ,
âuptimeâ : 43525,
âoptimeâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âoptimeDurableâ : {
âtsâ : Timestamp(1597585859, 1),
âtâ : NumberLong(3)
},
âoptimeDateâ : ISODate(â2020-08-16T13:50:59Zâ),
âoptimeDurableDateâ : ISODate(â2020-08-16T13:50:59Zâ),
âlastHeartbeatâ : ISODate(â2020-08-16T13:51:00.659Zâ),
âlastHeartbeatRecvâ : ISODate(â2020-08-16T13:50:59.778Zâ),
âpingMsâ : NumberLong(0),
âlastHeartbeatMessageâ : ââ,
âsyncingToâ : âcustomer-replica-set-0.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âsyncSourceHostâ : âcustomer-replica-set-0.customer-replica-set-svc.mongodb.svc.cluster.local:27017â,
âsyncSourceIdâ : 0,
âinfoMessageâ : ââ,
âconfigVersionâ : 2
}
],
âokâ : 1,
â$clusterTimeâ : {
âclusterTimeâ : Timestamp(1597585859, 1),
âsignatureâ : {
âhashâ : BinData(0,âAAAAAAAAAAAAAAAAAAAAAAAAAAA=â),
âkeyIdâ : NumberLong(0)
}
},
âoperationTimeâ : Timestamp(1597585859, 1)
}
Connecting to the single host is the same issue:
mongo --host customer-replica-set/ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671 --tls --tlsAllowInvalidCertificates --verbose
MongoDB shell version v4.2.7
connecting to: mongodb://ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671/?compressors=disabled&gssapiServiceName=mongodb&replicaSet=customer-replica-set
2020-08-16T09:13:33.132-0500 D1 NETWORK [js] Starting up task executor for monitoring replica sets in response to request to monitor set: customer-replica-set/ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671
2020-08-16T09:13:33.133-0500 I NETWORK [js] Starting new replica set monitor for customer-replica-set/ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671
2020-08-16T09:13:33.135-0500 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to ec2-18-216-32-24.us-east-2.compute.amazonaws.com:31671
2020-08-16T09:13:33.218-0500 W NETWORK [ReplicaSetMonitor-TaskExecutor] Unable to reach primary for set customer-replica-set
2020-08-16T09:13:33.219-0500 I NETWORK [ReplicaSetMonitor-TaskExecutor] Cannot reach any nodes for set customer-replica-set. Please check network connectivity and the status of the set. This has happened for 1 checks in a row.
telnet ec2-18-216-32-24.us-east-2.compute.amazonaws.com 31671
Trying 18.216.32.24âŠ
telnet: connect to address 18.216.32.24: Connection refused
telnet: Unable to connect to remote host
Deployed an nginx pod to make sure that it is not SG related:
kubectl get services -w
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
customer-replica-set-0 NodePort 10.100.66.6 27017:31671/TCP 12h
customer-replica-set-1 NodePort 10.100.185.237 27017:32595/TCP 12h
customer-replica-set-2 NodePort 10.100.37.222 27017:30432/TCP 12h
customer-replica-set-svc ClusterIP None 27017/TCP 12h
mynginxsvc NodePort 10.100.121.119 80:30180/TCP 3m28s
operator-webhook ClusterIP 10.100.40.145 443/TCP 13h
ops-manager-db-svc ClusterIP None 27017/TCP 13h
ops-manager-svc ClusterIP None 8080/TCP 13h
ops-manager-svc-ext LoadBalancer 10.100.146.190 a326d1d4fefc844e49d9da6d8ce1f229-105300929.us-east-2.elb.amazonaws.com 8080:30187/TCP 13h
telnet ec2-3-17-154-122.us-east-2.compute.amazonaws.com 30180
Trying 3.17.154.122âŠ
Connected to ec2-3-17-154-122.us-east-2.compute.amazonaws.com.
Escape character is â^]â.
@Pavel_Duchovny Seems like this is something specific to MongoDB Operator and the deployment. I would suggest that you run through the same deployment on an EKS cluster and let me know what you find since this should be pretty straight forward for accessing thru node port.