diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 12838bebba8..969227122a3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1,6 +1,6 @@ stepback: true command_type: system -exec_timeout_secs: 1800 +exec_timeout_secs: 900 timeout: - command: shell.exec params: @@ -1106,13 +1106,6 @@ buildvariants: NODE_LTS_NAME: boron CLIENT_ENCRYPTION: true tasks: *ref_3 - - name: ubuntu-16.04-argon - display_name: Ubuntu 16.04 Node Argon - run_on: ubuntu1604-test - expansions: - NODE_LTS_NAME: argon - CLIENT_ENCRYPTION: true - tasks: *ref_3 - name: ubuntu1604-arm64-small-dubnium display_name: Ubuntu 16.04 (ARM64) Node Dubnium run_on: ubuntu1604-arm64-small diff --git a/.evergreen/config.yml.in b/.evergreen/config.yml.in index 64681b5adb3..e43e3eea6f9 100644 --- a/.evergreen/config.yml.in +++ b/.evergreen/config.yml.in @@ -11,7 +11,7 @@ command_type: system # Protect ourself against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 6 minutes is the longest we'll ever run +exec_timeout_secs: 900 # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: diff --git a/.evergreen/generate_evergreen_tasks.js b/.evergreen/generate_evergreen_tasks.js index b3b20012e79..ecff0659dd5 100644 --- a/.evergreen/generate_evergreen_tasks.js +++ b/.evergreen/generate_evergreen_tasks.js @@ -81,7 +81,8 @@ const OPERATING_SYSTEMS = [ display_name: 'Ubuntu 16.04', run_on: 'ubuntu1604-test', mongoVersion: '>=3.2', - clientEncryption: true + clientEncryption: true, + nodeVersions: ['dubnium', 'carbon', 'boron'] }, { name: 'ubuntu1604-arm64-small', diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index e22fab4c966..d031023101a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -27,7 +27,7 @@ if [[ -z "${CLIENT_ENCRYPTION}" ]]; then unset AWS_ACCESS_KEY_ID; unset AWS_SECRET_ACCESS_KEY; else - npm install mongodb-client-encryption@1.0.0 + npm install mongodb-client-encryption fi MONGODB_UNIFIED_TOPOLOGY=${UNIFIED} MONGODB_URI=${MONGODB_URI} npm test diff --git a/HISTORY.md b/HISTORY.md index 62141795c50..127f8de5618 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,42 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +# [3.5.0](https://github.com/mongodb/node-mongodb-native/compare/v3.4.1...v3.5.0) (2020-01-14) + + +### Bug Fixes + +* copy `ssl` option to pool connection options ([563ced6](https://github.com/mongodb/node-mongodb-native/commit/563ced6)) +* destroy connections marked as closed on checkIn / checkOut ([2bd17a6](https://github.com/mongodb/node-mongodb-native/commit/2bd17a6)) +* ensure sync errors are thrown, and don't callback twice ([cca5b49](https://github.com/mongodb/node-mongodb-native/commit/cca5b49)) +* ignore connection errors during pool destruction ([b8805dc](https://github.com/mongodb/node-mongodb-native/commit/b8805dc)) +* not all message payloads are arrays of Buffer ([e4df5f4](https://github.com/mongodb/node-mongodb-native/commit/e4df5f4)) +* recover on network error during initial connect ([a13dc68](https://github.com/mongodb/node-mongodb-native/commit/a13dc68)) +* remove servers with me mismatch in `updateRsFromPrimary` ([95a772e](https://github.com/mongodb/node-mongodb-native/commit/95a772e)) +* report the correct platform in client metadata ([35d0274](https://github.com/mongodb/node-mongodb-native/commit/35d0274)) +* reschedule monitoring before emitting heartbeat events ([7fcbeb5](https://github.com/mongodb/node-mongodb-native/commit/7fcbeb5)) +* socket timeout for handshake should be `connectTimeoutMS` ([c83af9a](https://github.com/mongodb/node-mongodb-native/commit/c83af9a)) +* timed out streams should be destroyed on `timeout` event ([5319ff9](https://github.com/mongodb/node-mongodb-native/commit/5319ff9)) +* use remote address for stream identifier ([f13c20b](https://github.com/mongodb/node-mongodb-native/commit/f13c20b)) +* used weighted RTT calculation for server selection ([d446be5](https://github.com/mongodb/node-mongodb-native/commit/d446be5)) +* **execute-operation:** don't swallow synchronous errors ([0a2d4e9](https://github.com/mongodb/node-mongodb-native/commit/0a2d4e9)) +* **gridfs:** make a copy of chunk before writing to server ([b4ec5b8](https://github.com/mongodb/node-mongodb-native/commit/b4ec5b8)) + + +### Features + +* add a `withConnection` helper to the connection pool ([d59dced](https://github.com/mongodb/node-mongodb-native/commit/d59dced)) +* include `connectionId` for APM with new CMAP connection pool ([9bd360c](https://github.com/mongodb/node-mongodb-native/commit/9bd360c)) +* integrate CMAP connection pool into unified topology ([9dd3939](https://github.com/mongodb/node-mongodb-native/commit/9dd3939)) +* introduce `MongoServerSelectionError` ([0cf7ec9](https://github.com/mongodb/node-mongodb-native/commit/0cf7ec9)) +* introduce a class for tracking stream specific attributes ([f6bf82c](https://github.com/mongodb/node-mongodb-native/commit/f6bf82c)) +* introduce a new `Monitor` type for server monitoring ([2bfe2a1](https://github.com/mongodb/node-mongodb-native/commit/2bfe2a1)) +* relay all CMAP events to MongoClient ([1aea4de](https://github.com/mongodb/node-mongodb-native/commit/1aea4de)) +* support socket timeouts on a per-connection level ([93e8ad0](https://github.com/mongodb/node-mongodb-native/commit/93e8ad0)) + + + ## [3.4.1](https://github.com/mongodb/node-mongodb-native/compare/v3.4.0...v3.4.1) (2019-12-19) diff --git a/docs/reference/content/reference/management/cmap-monitoring.md b/docs/reference/content/reference/management/cmap-monitoring.md new file mode 100644 index 00000000000..9a29344e9eb --- /dev/null +++ b/docs/reference/content/reference/management/cmap-monitoring.md @@ -0,0 +1,146 @@ ++++ +date = "2020-01-14T09:03:26-04:00" +title = "Connection Pool Monitoring" +[menu.main] + parent = "Management" + identifier = "CMAP" + weight = 100 + pre = "" ++++ + +# Connection Pool Monitoring + +The Node.js driver `3.5.0` or higher features Connection Pool Monitoring events, allowing an application or +tool to monitor the internal workings of the driver's connection pool. + +**NOTE:** Connection pool monitoring is only available when the "Unified Topology" is enabled + +## Overview of CMAP events + +| Event | Description | +| :----------| :------------- | +| connectionPoolCreated | Emitted when a connection pool is created | +| connectionPoolClosed | Emitted when a connection pool is closed, prior to server instance destruction | +| connectionCreated | Emitted when a connection is created, but not necessarily when it is used for an operation | +| connectionReady | Emitted after a connection has successfully completed a handshake, and is ready to be used for operations| +| connectionClosed | Emitted when a connection is closed | +| connectionCheckOutStarted | Emitted when an operation attempts to acquire a connection for execution | +| connectionCheckOutFailed | Emitted when an operation fails to acquire a connection for execution | +| connectionCheckedOut | Emitted when an operation successfully acquires a connection for execution | +| connectionCheckedIn | Emitted when a connection is returned to the pool after operation execution | +| connectionPoolCleared | Emitted when the connection pool's generation count is increased | + +## Simple Code Example + +The following example demonstrates connecting to a replica set and printing out all CMAP related events: + +```js +const MongoClient = require('mongodb').MongoClient; +const url = 'mongodb://localhost:31000,localhost:31001/?replicaSet=rs'; +const client = new MongoClient(url); + +client.on('connectionPoolCreated', event => console.dir(event)); +client.on('connectionPoolClosed', event => console.dir(event)); +client.on('connectionCreated', event => console.dir(event)); +client.on('connectionReady', event => console.dir(event)); +client.on('connectionClosed', event => console.dir(event)); +client.on('connectionCheckOutStarted', event => console.dir(event)); +client.on('connectionCheckOutFailed', event => console.dir(event)); +client.on('connectionCheckedOut', event => console.dir(event)); +client.on('connectionCheckedIn', event => console.dir(event)); +client.on('connectionPoolCleared', event => console.dir(event)); + +client.connect((err, client) => { + if (err) throw err; +}); +``` + +## Example Events + +### connectionPoolCreated +```js +ConnectionPoolCreatedEvent { + time: 2020-01-14T13:46:15.536Z, + address: 'localhost:31003', + options: { ... } +} +``` + +### connectionPoolClosed +```js +ConnectionPoolClosedEvent { + time: 2020-01-14T13:54:53.570Z, + address: '127.0.0.1:34849' +} +``` + +### connectionCreated +```js +ConnectionCreatedEvent { + time: 2020-01-14T13:54:53.579Z, + address: '127.0.0.1:34849', + connectionId: 1 +} +``` + +### connectionReady +```js +ConnectionReadyEvent { + time: 2020-01-14T13:54:53.579Z, + address: '127.0.0.1:34849', + connectionId: 1 +} +``` + +### connectionClosed +```js +ConnectionClosedEvent { + time: 2020-01-14T13:54:53.564Z, + address: '127.0.0.1:34849', + connectionId: 2, + reason: ... +} +``` + +### connectionCheckOutStarted +```js +ConnectionCheckOutStartedEvent { + time: 2020-01-14T13:49:59.271Z, + address: 'localhost:31000' +} +``` + +### connectionCheckOutFailed +```js +ConnectionCheckOutFailedEvent { + time: 2020-01-14T13:49:59.271Z, + address: 'localhost:31000' + reason: ... +} +``` + +### connectionCheckedOut +```js +ConnectionCheckedOutEvent { + time: 2020-01-14T13:48:42.541Z, + address: 'localhost:31000', + connectionId: 1 +} +``` + +### connectionCheckedIn +```js +ConnectionCheckedInEvent { + time: 2020-01-14T13:48:42.543Z, + address: 'localhost:31000', + connectionId: 1 +} +``` + +### connectionPoolCleared +```js +ConnectionPoolClearedEvent { + time: 2020-01-14T13:58:11.437Z, + address: '127.0.0.1:45005' +} +``` diff --git a/docs/reference/content/reference/unified-topology/index.md b/docs/reference/content/reference/unified-topology/index.md index 22a729b4e84..6ed2b1d73ec 100644 --- a/docs/reference/content/reference/unified-topology/index.md +++ b/docs/reference/content/reference/unified-topology/index.md @@ -35,6 +35,13 @@ The unified topology no longer supports the following events: - `reconnect` - `reconnectFailed` - `attemptReconnect` +- `joined` +- `left` +- `ping` +- `ha` +- `all` +- `fullsetup` +- `open` It also deprecates the following options passed into the `MongoClient`: - `autoReconnect` @@ -69,7 +76,7 @@ We think the ambiguity of what it means to be "connected" can lead to far more p ### Server Selection -The psuedocode for operation execution looks something like this: +The pseudocode for operation execution looks something like this: ```js function executeOperation(topology, operation, callback) { @@ -95,3 +102,9 @@ The three topology types from the "native" layer (in `lib/topologies`) primarily - There is no collaboration with the server to ensure that queued write operations only happen one time. Imagine running an `updateOne` operation which is interrupted by a network error. The operation was successfully sent to the server, but the server response was lost during the interruption, which means the operation is placed in the callback store to be retried. At the same, another microservice allows a user to update the written data. Once the original client is reconnected to the server, it automatically rexecutes the operation and updates the _newer_ data with an _older_ value. The unified topology completely removes the disconnect handler, in favor of the more robust and consistent [Retryable Reads](https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.rst) and [Retryable Writes](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst) features. Operations now will attempt execution in a server selection loop for up to `serverSelectionTimeoutMS` (default: 30s), and will retry the operation one time in the event of a [retryable error](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms). All errors outside of this loop are returned to the user, since they know best what to do in these scenarios. + +### Deprecated monitoring events + +The `joined`, `left`, `all`, and `fullsetup` events are no longer emitted by the unified topology, primarily +because their behavior is duplicated by the pre-existing SDAM monitoring events: `topologyDescriptionChanged` +and `serverDescriptionChanged`. Please refer to the documentation on [Topology Monitoring]({{}}) diff --git a/index.js b/index.js index 51d06a765e8..4e9e6359e86 100644 --- a/index.js +++ b/index.js @@ -11,6 +11,7 @@ const connect = require('./lib/mongo_client').connect; connect.MongoError = core.MongoError; connect.MongoNetworkError = core.MongoNetworkError; connect.MongoTimeoutError = core.MongoTimeoutError; +connect.MongoServerSelectionError = core.MongoServerSelectionError; connect.MongoParseError = core.MongoParseError; connect.MongoWriteConcernError = core.MongoWriteConcernError; connect.MongoBulkWriteError = require('./lib/bulk/common').BulkWriteError; diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js new file mode 100644 index 00000000000..fbb4a7a5be7 --- /dev/null +++ b/lib/cmap/connection.js @@ -0,0 +1,366 @@ +'use strict'; + +const EventEmitter = require('events'); +const MessageStream = require('./message_stream'); +const MongoError = require('../core/error').MongoError; +const MongoNetworkError = require('../core/error').MongoNetworkError; +const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; +const CommandResult = require('../core/connection/command_result'); +const StreamDescription = require('./stream_description').StreamDescription; +const wp = require('../core/wireprotocol'); +const apm = require('../core/connection/apm'); +const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; +const uuidV4 = require('../core/utils').uuidV4; + +const kStream = Symbol('stream'); +const kQueue = Symbol('queue'); +const kMessageStream = Symbol('messageStream'); +const kGeneration = Symbol('generation'); +const kLastUseTime = Symbol('lastUseTime'); +const kClusterTime = Symbol('clusterTime'); +const kDescription = Symbol('description'); +const kIsMaster = Symbol('ismaster'); +const kAutoEncrypter = Symbol('autoEncrypter'); + +class Connection extends EventEmitter { + constructor(stream, options) { + super(options); + + this.id = options.id; + this.address = streamIdentifier(stream); + this.bson = options.bson; + this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; + this.monitorCommands = + typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; + this.closed = false; + this.destroyed = false; + + this[kDescription] = new StreamDescription(this.address, options); + this[kGeneration] = options.generation; + this[kLastUseTime] = Date.now(); + + // retain a reference to an `AutoEncrypter` if present + if (options.autoEncrypter) { + this[kAutoEncrypter] = options.autoEncrypter; + } + + // setup parser stream and message handling + this[kQueue] = new Map(); + this[kMessageStream] = new MessageStream(options); + this[kMessageStream].on('message', messageHandler(this)); + this[kStream] = stream; + stream.on('error', () => { + /* ignore errors, listen to `close` instead */ + }); + + stream.on('close', () => { + if (this.closed) { + return; + } + + this.closed = true; + this[kQueue].forEach(op => + op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} closed`)) + ); + this[kQueue].clear(); + + this.emit('close'); + }); + + stream.on('timeout', () => { + if (this.closed) { + return; + } + + stream.destroy(); + this.closed = true; + this[kQueue].forEach(op => + op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} timed out`)) + ); + this[kQueue].clear(); + + this.emit('close'); + }); + + // hook the message stream up to the passed in stream + stream.pipe(this[kMessageStream]); + this[kMessageStream].pipe(stream); + } + + get description() { + return this[kDescription]; + } + + get ismaster() { + return this[kIsMaster]; + } + + // the `connect` method stores the result of the handshake ismaster on the connection + set ismaster(response) { + this[kDescription].receiveResponse(response); + + // TODO: remove this, and only use the `StreamDescription` in the future + this[kIsMaster] = response; + } + + get generation() { + return this[kGeneration] || 0; + } + + get idleTime() { + return Date.now() - this[kLastUseTime]; + } + + get clusterTime() { + return this[kClusterTime]; + } + + get stream() { + return this[kStream]; + } + + markAvailable() { + this[kLastUseTime] = Date.now(); + } + + destroy(options, callback) { + if (typeof options === 'function') { + callback = options; + options = {}; + } + + options = Object.assign({ force: false }, options); + if (this[kStream] == null || this.destroyed) { + this.destroyed = true; + if (typeof callback === 'function') { + callback(); + } + + return; + } + + if (options.force) { + this[kStream].destroy(); + this.destroyed = true; + if (typeof callback === 'function') { + callback(); + } + + return; + } + + this[kStream].end(err => { + this.destroyed = true; + if (typeof callback === 'function') { + callback(err); + } + }); + } + + // Wire protocol methods + command(ns, cmd, options, callback) { + wp.command(makeServerTrampoline(this), ns, cmd, options, callback); + } + + query(ns, cmd, cursorState, options, callback) { + wp.query(makeServerTrampoline(this), ns, cmd, cursorState, options, callback); + } + + getMore(ns, cursorState, batchSize, options, callback) { + wp.getMore(makeServerTrampoline(this), ns, cursorState, batchSize, options, callback); + } + + killCursors(ns, cursorState, callback) { + wp.killCursors(makeServerTrampoline(this), ns, cursorState, callback); + } + + insert(ns, ops, options, callback) { + wp.insert(makeServerTrampoline(this), ns, ops, options, callback); + } + + update(ns, ops, options, callback) { + wp.update(makeServerTrampoline(this), ns, ops, options, callback); + } + + remove(ns, ops, options, callback) { + wp.remove(makeServerTrampoline(this), ns, ops, options, callback); + } +} + +/// This lets us emulate a legacy `Server` instance so we can work with the existing wire +/// protocol methods. Eventually, the operation executor will return a `Connection` to execute +/// against. +function makeServerTrampoline(connection) { + const server = { + description: connection.description, + clusterTime: connection[kClusterTime], + s: { + bson: connection.bson, + pool: { write: write.bind(connection), isConnected: () => true } + } + }; + + if (connection[kAutoEncrypter]) { + server.autoEncrypter = connection[kAutoEncrypter]; + } + + return server; +} + +function messageHandler(conn) { + return function messageHandler(message) { + // always emit the message, in case we are streaming + conn.emit('message', message); + if (!conn[kQueue].has(message.responseTo)) { + return; + } + + const operationDescription = conn[kQueue].get(message.responseTo); + conn[kQueue].delete(message.responseTo); + + const callback = operationDescription.cb; + if (operationDescription.socketTimeoutOverride) { + conn[kStream].setTimeout(conn.socketTimeout); + } + + try { + // Pass in the entire description because it has BSON parsing options + message.parse(operationDescription); + } catch (err) { + callback(new MongoError(err)); + return; + } + + if (message.documents[0]) { + const document = message.documents[0]; + const session = operationDescription.session; + if (session) { + updateSessionFromResponse(session, document); + } + + if (document.$clusterTime) { + conn[kClusterTime] = document.$clusterTime; + conn.emit('clusterTimeReceived', document.$clusterTime); + } + + if (document.writeConcernError) { + callback(new MongoWriteConcernError(document.writeConcernError, document)); + return; + } + + if (document.ok === 0 || document.$err || document.errmsg) { + callback(new MongoError(document)); + return; + } + } + + // NODE-2382: reenable in our glorious non-leaky abstraction future + // callback(null, operationDescription.fullResult ? message : message.documents[0]); + + callback( + undefined, + new CommandResult( + operationDescription.fullResult ? message : message.documents[0], + conn, + message + ) + ); + }; +} + +function streamIdentifier(stream) { + if (typeof stream.address === 'function') { + return `${stream.remoteAddress}:${stream.remotePort}`; + } + + return uuidV4().toString('hex'); +} + +// Not meant to be called directly, the wire protocol methods call this assuming it is a `Pool` instance +function write(command, options, callback) { + if (typeof options === 'function') { + callback = options; + } + + options = options || {}; + const operationDescription = { + requestId: command.requestId, + cb: callback, + session: options.session, + fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false, + noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false, + documentsReturnedIn: options.documentsReturnedIn, + + // for BSON parsing + promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, + promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, + promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false, + raw: typeof options.raw === 'boolean' ? options.raw : false + }; + + if (this[kDescription] && this[kDescription].compressor) { + operationDescription.agreedCompressor = this[kDescription].compressor; + + if (this[kDescription].zlibCompressionLevel) { + operationDescription.zlibCompressionLevel = this[kDescription].zlibCompressionLevel; + } + } + + if (typeof options.socketTimeout === 'number') { + operationDescription.socketTimeoutOverride = true; + this[kStream].setTimeout(options.socketTimeout); + } + + // if command monitoring is enabled we need to modify the callback here + if (this.monitorCommands) { + this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); + + operationDescription.started = process.hrtime(); + operationDescription.cb = (err, reply) => { + if (err) { + this.emit( + 'commandFailed', + new apm.CommandFailedEvent(this, command, err, operationDescription.started) + ); + } else { + if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { + this.emit( + 'commandFailed', + new apm.CommandFailedEvent(this, command, reply.result, operationDescription.started) + ); + } else { + this.emit( + 'commandSucceeded', + new apm.CommandSucceededEvent(this, command, reply, operationDescription.started) + ); + } + } + + if (typeof callback === 'function') { + callback(err, reply); + } + }; + } + + if (!operationDescription.noResponse) { + this[kQueue].set(operationDescription.requestId, operationDescription); + } + + try { + this[kMessageStream].writeCommand(command, operationDescription); + } catch (e) { + if (!operationDescription.noResponse) { + this[kQueue].delete(operationDescription.requestId); + operationDescription.cb(e); + return; + } + } + + if (operationDescription.noResponse) { + operationDescription.cb(); + } +} + +module.exports = { + Connection +}; diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js new file mode 100644 index 00000000000..829075aca60 --- /dev/null +++ b/lib/cmap/connection_pool.js @@ -0,0 +1,550 @@ +'use strict'; + +const Denque = require('denque'); +const EventEmitter = require('events').EventEmitter; +const makeCounter = require('../utils').makeCounter; +const MongoError = require('../core/error').MongoError; +const Connection = require('./connection').Connection; +const eachAsync = require('../core/utils').eachAsync; +const connect = require('../core/connection/connect'); +const relayEvents = require('../core/utils').relayEvents; + +const errors = require('./errors'); +const PoolClosedError = errors.PoolClosedError; +const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; + +const events = require('./events'); +const ConnectionPoolCreatedEvent = events.ConnectionPoolCreatedEvent; +const ConnectionPoolClosedEvent = events.ConnectionPoolClosedEvent; +const ConnectionCreatedEvent = events.ConnectionCreatedEvent; +const ConnectionReadyEvent = events.ConnectionReadyEvent; +const ConnectionClosedEvent = events.ConnectionClosedEvent; +const ConnectionCheckOutStartedEvent = events.ConnectionCheckOutStartedEvent; +const ConnectionCheckOutFailedEvent = events.ConnectionCheckOutFailedEvent; +const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; +const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; +const ConnectionPoolClearedEvent = events.ConnectionPoolClearedEvent; + +const kConnections = Symbol('connections'); +const kPermits = Symbol('permits'); +const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); +const kGeneration = Symbol('generation'); +const kConnectionCounter = Symbol('connectionCounter'); +const kCancellationToken = Symbol('cancellationToken'); +const kWaitQueue = Symbol('waitQueue'); +const kCancelled = Symbol('cancelled'); + +const VALID_POOL_OPTIONS = new Set([ + // `connect` options + 'host', + 'port', + 'bson', + 'connectionType', + 'monitorCommands', + 'socketTimeout', + 'credentials', + 'compression', + + // node Net options + 'ssl', + 'localAddress', + 'localPort', + 'family', + 'hints', + 'lookup', + 'checkServerIdentity', + 'rejectUnauthorized', + 'ALPNProtocols', + 'servername', + 'checkServerIdentity', + 'session', + 'minDHSize', + 'secureContext', + + // spec options + 'maxPoolSize', + 'minPoolSize', + 'maxIdleTimeMS', + 'waitQueueTimeoutMS' +]); + +function resolveOptions(options, defaults) { + const newOptions = Array.from(VALID_POOL_OPTIONS).reduce((obj, key) => { + if (options.hasOwnProperty(key)) { + obj[key] = options[key]; + } + + return obj; + }, {}); + + return Object.freeze(Object.assign({}, defaults, newOptions)); +} + +/** + * Configuration options for drivers wrapping the node driver. + * + * @typedef {Object} ConnectionPoolOptions + * @property + * @property {string} [host] The host to connect to + * @property {number} [port] The port to connect to + * @property {bson} [bson] The BSON instance to use for new connections + * @property {number} [maxPoolSize=100] The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections. + * @property {number} [minPoolSize=0] The minimum number of connections that MUST exist at any moment in a single connection pool. + * @property {number} [maxIdleTimeMS] The maximum amount of time a connection should remain idle in the connection pool before being marked idle. + * @property {number} [waitQueueTimeoutMS=0] The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit. + */ + +/** + * A pool of connections which dynamically resizes, and emit events related to pool activity + * + * @property {number} generation An integer representing the SDAM generation of the pool + * @property {number} totalConnectionCount An integer expressing how many total connections (active + in use) the pool currently has + * @property {number} availableConnectionCount An integer expressing how many connections are currently available in the pool. + * @property {string} address The address of the endpoint the pool is connected to + * + * @emits ConnectionPool#connectionPoolCreated + * @emits ConnectionPool#connectionPoolClosed + * @emits ConnectionPool#connectionCreated + * @emits ConnectionPool#connectionReady + * @emits ConnectionPool#connectionClosed + * @emits ConnectionPool#connectionCheckOutStarted + * @emits ConnectionPool#connectionCheckOutFailed + * @emits ConnectionPool#connectionCheckedOut + * @emits ConnectionPool#connectionCheckedIn + * @emits ConnectionPool#connectionPoolCleared + */ +class ConnectionPool extends EventEmitter { + /** + * Create a new Connection Pool + * + * @param {ConnectionPoolOptions} options + */ + constructor(options) { + super(); + options = options || {}; + + this.closed = false; + this.options = resolveOptions(options, { + connectionType: Connection, + maxPoolSize: typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100, + minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, + maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, + waitQueueTimeoutMS: + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0, + autoEncrypter: options.autoEncrypter, + metadata: options.metadata + }); + + if (options.minSize > options.maxSize) { + throw new TypeError('Pool minimum size must not be greater than maxiumum pool size'); + } + + this[kConnections] = new Denque(); + this[kPermits] = this.options.maxPoolSize; + this[kMinPoolSizeTimer] = undefined; + this[kGeneration] = 0; + this[kConnectionCounter] = makeCounter(1); + this[kCancellationToken] = new EventEmitter(); + this[kCancellationToken].setMaxListeners(Infinity); + this[kWaitQueue] = new Denque(); + + process.nextTick(() => { + this.emit('connectionPoolCreated', new ConnectionPoolCreatedEvent(this)); + ensureMinPoolSize(this); + }); + } + + /** + * Check a connection out of this pool. The connection will continue to be tracked, but no reference to it + * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or + * explicitly destroyed by the new owner. + * + * @param {ConnectionPool~checkOutCallback} callback + */ + checkOut(callback) { + this.emit('connectionCheckOutStarted', new ConnectionCheckOutStartedEvent(this)); + + if (this.closed) { + this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(this, 'poolClosed')); + callback(new PoolClosedError(this)); + return; + } + + // add this request to the wait queue + const waitQueueMember = { callback }; + + const pool = this; + const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + if (waitQueueTimeoutMS) { + waitQueueMember.timer = setTimeout(() => { + waitQueueMember[kCancelled] = true; + waitQueueMember.timer = undefined; + + pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); + waitQueueMember.callback(new WaitQueueTimeoutError(pool)); + }, waitQueueTimeoutMS); + } + + // place the member at the end of the wait queue + this[kWaitQueue].push(waitQueueMember); + + // process the wait queue + processWaitQueue(this); + } + + /** + * Check a connection into the pool. + * + * @param {Connection} connection The connection to check in + */ + checkIn(connection) { + const poolClosed = this.closed; + const stale = connectionIsStale(this, connection); + const willDestroy = !!(poolClosed || stale || connection.closed); + + // Properly adjust state of connection + if (!willDestroy) { + connection.markAvailable(); + + this[kConnections].push(connection); + } + + this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); + + if (willDestroy) { + const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale'; + destroyConnection(this, connection, reason); + } + + processWaitQueue(this); + } + + /** + * Clear the pool + * + * Pool reset is handled by incrementing the pool's generation count. Any existing connection of a + * previous generation will eventually be pruned during subsequent checkouts. + */ + clear() { + this[kGeneration] += 1; + this.emit('connectionPoolCleared', new ConnectionPoolClearedEvent(this)); + } + + /** + * Close the pool + * + * @param {object} [options] Optional settings + * @param {boolean} [options.force] Force close connections + * @param {Function} callback + */ + close(options, callback) { + if (typeof options === 'function') { + callback = options; + } + + options = Object.assign({ force: false }, options); + if (this.closed) { + return callback(); + } + + // immediately cancel any in-flight connections + this[kCancellationToken].emit('cancel'); + + // drain the wait queue + while (this[kWaitQueue].length) { + const waitQueueMember = this[kWaitQueue].pop(); + clearTimeout(waitQueueMember.timer); + if (!waitQueueMember[kCancelled]) { + waitQueueMember.callback(new MongoError('connection pool closed')); + } + } + + // clear the min pool size timer + if (this[kMinPoolSizeTimer]) { + clearTimeout(this[kMinPoolSizeTimer]); + } + + // end the connection counter + if (typeof this[kConnectionCounter].return === 'function') { + this[kConnectionCounter].return(); + } + + // mark the pool as closed immediately + this.closed = true; + + eachAsync( + this[kConnections].toArray(), + (conn, cb) => { + this.emit('connectionClosed', new ConnectionClosedEvent(this, conn, 'poolClosed')); + conn.destroy(options, cb); + }, + err => { + this[kConnections].clear(); + this.emit('connectionPoolClosed', new ConnectionPoolClosedEvent(this)); + callback(err); + } + ); + } + + /** + * Runs a lambda with an implicitly checked out connection, checking that connection back in when the lambda + * has completed by calling back. + * + * NOTE: please note the required signature of `fn` + * + * @param {ConnectionPool~withConnectionCallback} fn A function which operates on a managed connection + * @param {Function} callback The original callback + * @return {Promise} + */ + withConnection(fn, callback) { + this.checkOut((err, conn) => { + // don't callback with `err` here, we might want to act upon it inside `fn` + + fn(err, conn, (fnErr, result) => { + if (typeof callback === 'function') { + if (fnErr) { + callback(fnErr); + } else { + callback(undefined, result); + } + } + + if (conn) { + this.checkIn(conn); + } + }); + }); + } + + get generation() { + return this[kGeneration]; + } + + get totalConnectionCount() { + return this[kConnections].length + (this.options.maxPoolSize - this[kPermits]); + } + + get availableConnectionCount() { + return this[kConnections].length; + } + + get address() { + return `${this.options.host}:${this.options.port}`; + } + + // Private Helpers + _propagateError() { + return; + } +} + +function ensureMinPoolSize(pool) { + if (pool.closed) { + return; + } + + const minPoolSize = pool.options.minPoolSize; + for (let i = pool.totalConnectionCount; i < minPoolSize; ++i) { + createConnection(pool); + } + + pool[kMinPoolSizeTimer] = setTimeout(() => ensureMinPoolSize(pool), 10); +} + +function connectionIsStale(pool, connection) { + return connection.generation !== pool[kGeneration]; +} + +function connectionIsIdle(pool, connection) { + return !!(pool.options.maxIdleTimeMS && connection.idleTime > pool.options.maxIdleTimeMS); +} + +function createConnection(pool, callback) { + const connectOptions = Object.assign( + { + id: pool[kConnectionCounter].next().value, + generation: pool[kGeneration] + }, + pool.options + ); + + pool[kPermits]--; + connect(connectOptions, pool[kCancellationToken], (err, connection) => { + if (err) { + pool[kPermits]++; + + // NOTE: integrate logger here + pool._propagateError(err); + if (typeof callback === 'function') { + callback(err); + } + + return; + } + + // The pool might have closed since we started trying to create a connection + if (pool.closed) { + connection.destroy({ force: true }); + return; + } + + // forward all events from the connection to the pool + relayEvents(connection, pool, [ + 'commandStarted', + 'commandFailed', + 'commandSucceeded', + 'clusterTimeReceived' + ]); + + pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); + + connection.markAvailable(); + pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); + + // if a callback has been provided, check out the connection immediately + if (typeof callback === 'function') { + pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); + callback(undefined, connection); + return; + } + + // otherwise add it to the pool for later acquisition, and try to process the wait queue + pool[kConnections].push(connection); + processWaitQueue(pool); + }); +} + +function destroyConnection(pool, connection, reason) { + pool.emit('connectionClosed', new ConnectionClosedEvent(pool, connection, reason)); + + // allow more connections to be created + pool[kPermits]++; + + // destroy the connection + process.nextTick(() => connection.destroy()); +} + +function processWaitQueue(pool) { + if (pool.closed) { + return; + } + + while (pool[kWaitQueue].length && pool.availableConnectionCount) { + const waitQueueMember = pool[kWaitQueue].peekFront(); + if (waitQueueMember[kCancelled]) { + pool[kWaitQueue].shift(); + continue; + } + + const connection = pool[kConnections].shift(); + const isStale = connectionIsStale(pool, connection); + const isIdle = connectionIsIdle(pool, connection); + if (!isStale && !isIdle && !connection.closed) { + pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); + clearTimeout(waitQueueMember.timer); + pool[kWaitQueue].shift(); + waitQueueMember.callback(undefined, connection); + return; + } + + const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle'; + destroyConnection(pool, connection, reason); + } + + const maxPoolSize = pool.options.maxPoolSize; + if (pool[kWaitQueue].length && (maxPoolSize <= 0 || pool.totalConnectionCount < maxPoolSize)) { + createConnection(pool); + return; + } +} + +/** + * A callback provided to `withConnection` + * + * @callback ConnectionPool~withConnectionCallback + * @param {MongoError} error An error instance representing the error during the execution. + * @param {Connection} connection The managed connection which was checked out of the pool. + * @param {Function} callback A function to call back after connection management is complete + */ + +/** + * A callback provided to `checkOut` + * + * @callback ConnectionPool~checkOutCallback + * @param {MongoError} error An error instance representing the error during checkout + * @param {Connection} connection A connection from the pool + */ + +/** + * Emitted once when the connection pool is created + * + * @event ConnectionPool#connectionPoolCreated + * @type {PoolCreatedEvent} + */ + +/** + * Emitted once when the connection pool is closed + * + * @event ConnectionPool#connectionPoolClosed + * @type {PoolClosedEvent} + */ + +/** + * Emitted each time a connection is created + * + * @event ConnectionPool#connectionCreated + * @type {ConnectionCreatedEvent} + */ + +/** + * Emitted when a connection becomes established, and is ready to use + * + * @event ConnectionPool#connectionReady + * @type {ConnectionReadyEvent} + */ + +/** + * Emitted when a connection is closed + * + * @event ConnectionPool#connectionClosed + * @type {ConnectionClosedEvent} + */ + +/** + * Emitted when an attempt to check out a connection begins + * + * @event ConnectionPool#connectionCheckOutStarted + * @type {ConnectionCheckOutStartedEvent} + */ + +/** + * Emitted when an attempt to check out a connection fails + * + * @event ConnectionPool#connectionCheckOutFailed + * @type {ConnectionCheckOutFailedEvent} + */ + +/** + * Emitted each time a connection is successfully checked out of the connection pool + * + * @event ConnectionPool#connectionCheckedOut + * @type {ConnectionCheckedOutEvent} + */ + +/** + * Emitted each time a connection is successfully checked into the connection pool + * + * @event ConnectionPool#connectionCheckedIn + * @type {ConnectionCheckedInEvent} + */ + +/** + * Emitted each time the connection pool is cleared and it's generation incremented + * + * @event ConnectionPool#connectionPoolCleared + * @type {PoolClearedEvent} + */ + +module.exports = { + ConnectionPool +}; diff --git a/lib/cmap/errors.js b/lib/cmap/errors.js new file mode 100644 index 00000000000..d9330195e74 --- /dev/null +++ b/lib/cmap/errors.js @@ -0,0 +1,35 @@ +'use strict'; +const MongoError = require('../core/error').MongoError; + +/** + * An error indicating a connection pool is closed + * + * @property {string} address The address of the connection pool + * @extends MongoError + */ +class PoolClosedError extends MongoError { + constructor(pool) { + super('Attempted to check out a connection from closed connection pool'); + this.name = 'MongoPoolClosedError'; + this.address = pool.address; + } +} + +/** + * An error thrown when a request to check out a connection times out + * + * @property {string} address The address of the connection pool + * @extends MongoError + */ +class WaitQueueTimeoutError extends MongoError { + constructor(pool) { + super('Timed out while checking out a connection from connection pool'); + this.name = 'MongoWaitQueueTimeoutError'; + this.address = pool.address; + } +} + +module.exports = { + PoolClosedError, + WaitQueueTimeoutError +}; diff --git a/lib/cmap/events.js b/lib/cmap/events.js new file mode 100644 index 00000000000..dcc8b6752b9 --- /dev/null +++ b/lib/cmap/events.js @@ -0,0 +1,154 @@ +'use strict'; + +/** + * The base class for all monitoring events published from the connection pool + * + * @property {number} time A timestamp when the event was created + * @property {string} address The address (host/port pair) of the pool + */ +class ConnectionPoolMonitoringEvent { + constructor(pool) { + this.time = new Date(); + this.address = pool.address; + } +} + +/** + * An event published when a connection pool is created + * + * @property {Object} options The options used to create this connection pool + */ +class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool) { + super(pool); + this.options = pool.options; + } +} + +/** + * An event published when a connection pool is closed + */ +class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool) { + super(pool); + } +} + +/** + * An event published when a connection pool creates a new connection + * + * @property {number} connectionId A monotonically increasing, per-pool id for the newly created connection + */ +class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, connection) { + super(pool); + this.connectionId = connection.id; + } +} + +/** + * An event published when a connection is ready for use + * + * @property {number} connectionId The id of the connection + */ +class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, connection) { + super(pool); + this.connectionId = connection.id; + } +} + +/** + * An event published when a connection is closed + * + * @property {number} connectionId The id of the connection + * @property {string} reason The reason the connection was closed + */ +class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, connection, reason) { + super(pool); + this.connectionId = connection.id; + this.reason = reason || 'unknown'; + } +} + +/** + * An event published when a request to check a connection out begins + */ +class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool) { + super(pool); + } +} + +/** + * An event published when a request to check a connection out fails + * + * @property {string} reason The reason the attempt to check out failed + */ +class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, reason) { + super(pool); + this.reason = reason; + } +} + +/** + * An event published when a connection is checked out of the connection pool + * + * @property {number} connectionId The id of the connection + */ +class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, connection) { + super(pool); + this.connectionId = connection.id; + } +} + +/** + * An event published when a connection is checked into the connection pool + * + * @property {number} connectionId The id of the connection + */ +class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { + constructor(pool, connection) { + super(pool); + this.connectionId = connection.id; + } +} + +/** + * An event published when a connection pool is cleared + */ +class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent { + constructor(pool) { + super(pool); + } +} + +const CMAP_EVENT_NAMES = [ + 'connectionPoolCreated', + 'connectionPoolClosed', + 'connectionCreated', + 'connectionReady', + 'connectionClosed', + 'connectionCheckOutStarted', + 'connectionCheckOutFailed', + 'connectionCheckedOut', + 'connectionCheckedIn', + 'connectionPoolCleared' +]; + +module.exports = { + CMAP_EVENT_NAMES, + ConnectionPoolCreatedEvent, + ConnectionPoolClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + ConnectionClosedEvent, + ConnectionCheckOutStartedEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckedOutEvent, + ConnectionCheckedInEvent, + ConnectionPoolClearedEvent +}; diff --git a/lib/core/cmap/message_stream.js b/lib/cmap/message_stream.js similarity index 82% rename from lib/core/cmap/message_stream.js rename to lib/cmap/message_stream.js index 7195e3f7457..4c648957243 100644 --- a/lib/core/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -2,20 +2,20 @@ const Duplex = require('stream').Duplex; const BufferList = require('bl'); -const MongoParseError = require('../error').MongoParseError; -const decompress = require('../wireprotocol/compression').decompress; -const Response = require('../connection/commands').Response; -const BinMsg = require('../connection/msg').BinMsg; -const MongoError = require('../error').MongoError; -const OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED; -const OP_MSG = require('../wireprotocol/shared').opcodes.OP_MSG; -const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; -const COMPRESSION_DETAILS_SIZE = require('../wireprotocol/shared').COMPRESSION_DETAILS_SIZE; -const opcodes = require('../wireprotocol/shared').opcodes; -const compress = require('../wireprotocol/compression').compress; -const compressorIDs = require('../wireprotocol/compression').compressorIDs; -const uncompressibleCommands = require('../wireprotocol/compression').uncompressibleCommands; -const Msg = require('../connection/msg').Msg; +const MongoParseError = require('../core/error').MongoParseError; +const decompress = require('../core/wireprotocol/compression').decompress; +const Response = require('../core/connection/commands').Response; +const BinMsg = require('../core/connection/msg').BinMsg; +const MongoError = require('../core/error').MongoError; +const OP_COMPRESSED = require('../core/wireprotocol/shared').opcodes.OP_COMPRESSED; +const OP_MSG = require('../core/wireprotocol/shared').opcodes.OP_MSG; +const MESSAGE_HEADER_SIZE = require('../core/wireprotocol/shared').MESSAGE_HEADER_SIZE; +const COMPRESSION_DETAILS_SIZE = require('../core/wireprotocol/shared').COMPRESSION_DETAILS_SIZE; +const opcodes = require('../core/wireprotocol/shared').opcodes; +const compress = require('../core/wireprotocol/compression').compress; +const compressorIDs = require('../core/wireprotocol/compression').compressorIDs; +const uncompressibleCommands = require('../core/wireprotocol/compression').uncompressibleCommands; +const Msg = require('../core/connection/msg').Msg; const kDefaultMaxBsonMessageSize = 1024 * 1024 * 16 * 4; const kBuffer = Symbol('buffer'); @@ -61,8 +61,9 @@ class MessageStream extends Duplex { } const messageBuffer = buffer.slice(0, sizeOfMessage); - processMessage(this, messageBuffer, callback); buffer.consume(sizeOfMessage); + + processMessage(this, messageBuffer, callback); } } @@ -76,7 +77,8 @@ class MessageStream extends Duplex { // TODO: agreed compressor should live in `StreamDescription` const shouldCompress = operationDescription && !!operationDescription.agreedCompressor; if (!shouldCompress || !canCompress(command)) { - this.push(Buffer.concat(command.toBin())); + const data = command.toBin(); + this.push(Array.isArray(data) ? Buffer.concat(data) : data); return; } @@ -125,7 +127,7 @@ function canCompress(command) { function processMessage(stream, message, callback) { const messageHeader = { - messageLength: message.readInt32LE(0), + length: message.readInt32LE(0), requestId: message.readInt32LE(4), responseTo: message.readInt32LE(8), opCode: message.readInt32LE(12) diff --git a/lib/cmap/stream_description.js b/lib/cmap/stream_description.js new file mode 100644 index 00000000000..e806a5f6522 --- /dev/null +++ b/lib/cmap/stream_description.js @@ -0,0 +1,45 @@ +'use strict'; +const parseServerType = require('../core/sdam/server_description').parseServerType; + +const RESPONSE_FIELDS = [ + 'minWireVersion', + 'maxWireVersion', + 'maxBsonObjectSize', + 'maxMessageSizeBytes', + 'maxWriteBatchSize', + '__nodejs_mock_server__' +]; + +class StreamDescription { + constructor(address, options) { + this.address = address; + this.type = parseServerType(null); + this.minWireVersion = undefined; + this.maxWireVersion = undefined; + this.maxBsonObjectSize = 16777216; + this.maxMessageSizeBytes = 48000000; + this.maxWriteBatchSize = 100000; + this.compressors = + options && options.compression && Array.isArray(options.compression.compressors) + ? options.compression.compressors + : []; + } + + receiveResponse(response) { + this.type = parseServerType(response); + + RESPONSE_FIELDS.forEach(field => { + if (typeof response[field] !== 'undefined') { + this[field] = response[field]; + } + }); + + if (response.compression) { + this.compressor = this.compressors.filter(c => response.compression.indexOf(c) !== -1)[0]; + } + } +} + +module.exports = { + StreamDescription +}; diff --git a/lib/core/cmap/connection.js b/lib/core/cmap/connection.js deleted file mode 100644 index 469d91d98db..00000000000 --- a/lib/core/cmap/connection.js +++ /dev/null @@ -1,220 +0,0 @@ -'use strict'; - -const EventEmitter = require('events'); -const MessageStream = require('./message_stream'); -const MongoError = require('../error').MongoError; -const MongoWriteConcernError = require('../error').MongoWriteConcernError; -const wp = require('../wireprotocol'); -const apm = require('../connection/apm'); -const updateSessionFromResponse = require('../sessions').updateSessionFromResponse; -const uuidV4 = require('../utils').uuidV4; - -const kStream = Symbol('stream'); -const kQueue = Symbol('queue'); -const kMessageStream = Symbol('messageStream'); - -class Connection extends EventEmitter { - constructor(stream, options) { - super(options); - - this.id = streamIdentifier(stream); - this.bson = options.bson; - this.description = null; - this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; - this.monitorCommands = - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; - - // setup parser stream and message handling - this[kQueue] = new Map(); - this[kMessageStream] = new MessageStream(options); - this[kMessageStream].on('message', messageHandler(this)); - this[kStream] = stream; - stream.on('error', () => { - /* ignore errors, listen to `close` instead */ - }); - - stream.on('close', () => { - this[kQueue].forEach(op => op.callback(new MongoError('Connection closed'))); - this[kQueue].clear(); - - this.emit('close'); - }); - - // hook the message stream up to the passed in stream - stream.pipe(this[kMessageStream]); - this[kMessageStream].pipe(stream); - } - - // the `connect` method stores the result of the handshake ismaster on the connection - set ismaster(response) { - this.description = response; - } - - destroy(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = Object.assign({ force: false }, options); - if (this[kStream] == null || this.destroyed) { - this.destroyed = true; - return; - } - - if (options.force) { - this[kStream].destroy(); - this.destroyed = true; - if (typeof callback === 'function') { - callback(null, null); - } - - return; - } - - this[kStream].end(err => { - this.destroyed = true; - if (typeof callback === 'function') { - callback(err, null); - } - }); - } - - command(ns, cmd, options, callback) { - // NOTE: The wire protocol methods will eventually be migrated to this class, but for now - // we need to pretend we _are_ a server. - const server = { - description: this.description, - s: { - bson: this.bson, - pool: { write: write.bind(this) } - } - }; - - wp.command(server, ns, cmd, options, callback); - } -} - -function messageHandler(conn) { - return function(message) { - // always emit the message, in case we are streaming - conn.emit('message', message); - if (!conn[kQueue].has(message.responseTo)) { - return; - } - - const operationDescription = conn[kQueue].get(message.responseTo); - conn[kQueue].delete(message.responseTo); - - const callback = operationDescription.cb; - if (operationDescription.socketTimeoutOverride) { - this[kStream].setSocketTimeout(this.socketTimeout); - } - - try { - // Pass in the entire description because it has BSON parsing options - message.parse(operationDescription); - } catch (err) { - callback(new MongoError(err)); - return; - } - - if (message.documents[0]) { - const document = message.documents[0]; - const session = operationDescription.session; - if (session) { - updateSessionFromResponse(session, document); - } - - if (document.$clusterTime) { - this.emit('clusterTimeReceived', document.$clusterTime); - } - - if (document.writeConcernError) { - callback(new MongoWriteConcernError(document.writeConcernError, document)); - return; - } - - if (document.ok === 0 || document.$err || document.errmsg || document.code) { - callback(new MongoError(document)); - return; - } - } - - callback(null, operationDescription.fullResult ? message : message.documents[0]); - }; -} - -function streamIdentifier(stream) { - if (typeof stream.address === 'function') { - return `${stream.address().address}:${stream.address().port}`; - } - - return uuidV4().toString('hex'); -} - -// Not meant to be called directly, the wire protocol methods call this assuming it is a `Pool` instance -function write(command, options, callback) { - if (typeof options === 'function') { - callback = options; - } - - options = options || {}; - const operationDescription = { - requestId: command.requestId, - cb: callback, - fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false, - session: options.session, - - // For BSON parsing - promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, - promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, - promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false, - raw: typeof options.raw === 'boolean' ? options.raw : false, - - // NOTE: This property is set on the connection as part of `connect`, but should - // eventually live in the `StreamDescription` attached to this connection. - agreedCompressor: this.agreedCompressor - }; - - if (typeof options.socketTimeout === 'number') { - operationDescription.socketTimeoutOverride = true; - this[kStream].setSocketTimeout(options.socketTimeout); - } - - // if command monitoring is enabled we need to modify the callback here - if (this.monitorCommands) { - this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); - - operationDescription.started = process.hrtime(); - operationDescription.cb = (err, reply) => { - if (err) { - this.emit( - 'commandFailed', - new apm.CommandFailedEvent(this, command, err, operationDescription.started) - ); - } else { - if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { - this.emit( - 'commandFailed', - new apm.CommandFailedEvent(this, command, reply.result, operationDescription.started) - ); - } else { - this.emit( - 'commandSucceeded', - new apm.CommandSucceededEvent(this, command, reply, operationDescription.started) - ); - } - } - - if (typeof callback === 'function') { - callback(err, reply); - } - }; - } - - this[kQueue].set(operationDescription.requestId, operationDescription); - this[kMessageStream].writeCommand(command, operationDescription); -} - -module.exports = Connection; diff --git a/lib/core/connection/apm.js b/lib/core/connection/apm.js index 9bec4cec4d5..82858efda76 100644 --- a/lib/core/connection/apm.js +++ b/lib/core/connection/apm.js @@ -23,8 +23,9 @@ const namespace = command => command.ns; const databaseName = command => command.ns.split('.')[0]; const collectionName = command => command.ns.split('.')[1]; const generateConnectionId = pool => - pool.options ? `${pool.options.host}:${pool.options.port}` : pool.id; + pool.options ? `${pool.options.host}:${pool.options.port}` : pool.address; const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); +const isLegacyPool = pool => pool.s && pool.queue; const LEGACY_FIND_QUERY_MAP = { $query: 'filter', @@ -151,6 +152,22 @@ const extractReply = (command, reply) => { return reply && reply.result ? reply.result : reply; }; +const extractConnectionDetails = pool => { + if (isLegacyPool(pool)) { + return { + connectionId: generateConnectionId(pool) + }; + } + + // APM in the modern pool is done at the `Connection` level, so we rename it here for + // readability. + const connection = pool; + return { + address: connection.address, + connectionId: connection.id + }; +}; + /** An event indicating the start of a given command */ class CommandStartedEvent { /** @@ -162,6 +179,7 @@ class CommandStartedEvent { constructor(pool, command) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); // NOTE: remove in major revision, this is not spec behavior if (SENSITIVE_COMMANDS.has(commandName)) { @@ -169,8 +187,7 @@ class CommandStartedEvent { this.commandObj[commandName] = true; } - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, databaseName: databaseName(command), commandName, @@ -192,9 +209,9 @@ class CommandSucceededEvent { constructor(pool, command, reply, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, commandName, duration: calculateDurationInMs(started), @@ -216,9 +233,9 @@ class CommandFailedEvent { constructor(pool, command, error, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, commandName, duration: calculateDurationInMs(started), diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index 3fd8be6f8a0..2922c61b45a 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -3,11 +3,11 @@ const net = require('net'); const tls = require('tls'); const Connection = require('./connection'); const Query = require('./commands').Query; -const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const defaultAuthProviders = require('../auth/defaultAuthProviders').defaultAuthProviders; const WIRE_CONSTANTS = require('../wireprotocol/constants'); +const makeClientMetadata = require('../utils').makeClientMetadata; const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; @@ -36,6 +36,10 @@ function connect(options, cancellationToken, callback) { }); } +function isModernConnectionType(conn) { + return typeof conn.command === 'function'; +} + function getSaslSupportedMechs(options) { if (!(options && options.credentials)) { return {}; @@ -101,42 +105,51 @@ function performInitialHandshake(conn, options, _callback) { const handshakeDoc = Object.assign( { ismaster: true, - client: createClientInfo(options), + client: options.metadata || makeClientMetadata(options), compression: compressors }, getSaslSupportedMechs(options) ); + const handshakeOptions = Object.assign({}, options); + + // The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS + if (options.connectTimeoutMS || options.connectionTimeout) { + handshakeOptions.socketTimeout = options.connectTimeoutMS || options.connectionTimeout; + } + const start = new Date().getTime(); - runCommand(conn, 'admin.$cmd', handshakeDoc, options, (err, ismaster) => { + runCommand(conn, 'admin.$cmd', handshakeDoc, handshakeOptions, (err, ismaster) => { if (err) { - callback(err, null); + callback(err); return; } if (ismaster.ok === 0) { - callback(new MongoError(ismaster), null); + callback(new MongoError(ismaster)); return; } const supportedServerErr = checkSupportedServer(ismaster, options); if (supportedServerErr) { - callback(supportedServerErr, null); + callback(supportedServerErr); return; } - // resolve compression - if (ismaster.compression) { - const agreedCompressors = compressors.filter( - compressor => ismaster.compression.indexOf(compressor) !== -1 - ); + if (!isModernConnectionType(conn)) { + // resolve compression + if (ismaster.compression) { + const agreedCompressors = compressors.filter( + compressor => ismaster.compression.indexOf(compressor) !== -1 + ); - if (agreedCompressors.length) { - conn.agreedCompressor = agreedCompressors[0]; - } + if (agreedCompressors.length) { + conn.agreedCompressor = agreedCompressors[0]; + } - if (options.compression && options.compression.zlibCompressionLevel) { - conn.zlibCompressionLevel = options.compression.zlibCompressionLevel; + if (options.compression && options.compression.zlibCompressionLevel) { + conn.zlibCompressionLevel = options.compression.zlibCompressionLevel; + } } } @@ -153,7 +166,7 @@ function performInitialHandshake(conn, options, _callback) { return; } - callback(null, conn); + callback(undefined, conn); }); } @@ -229,7 +242,11 @@ function makeConnection(family, options, cancellationToken, _callback) { typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000; const noDelay = typeof options.noDelay === 'boolean' ? options.noDelay : true; const connectionTimeout = - typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000; + typeof options.connectionTimeout === 'number' + ? options.connectionTimeout + : typeof options.connectTimeoutMS === 'number' + ? options.connectTimeoutMS + : 30000; const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const rejectUnauthorized = typeof options.rejectUnauthorized === 'boolean' ? options.rejectUnauthorized : true; @@ -302,12 +319,23 @@ function makeConnection(family, options, cancellationToken, _callback) { const CONNECTION_ERROR_EVENTS = ['error', 'close', 'timeout', 'parseError']; function runCommand(conn, ns, command, options, callback) { - if (typeof conn.command === 'function') { - conn.command(ns, command, options, callback); + if (typeof options === 'function') (callback = options), (options = {}); + + // are we using the new connection type? if so, no need to simulate a rpc `command` method + if (isModernConnectionType(conn)) { + conn.command(ns, command, options, (err, result) => { + if (err) { + callback(err); + return; + } + + // NODE-2382: raw wire protocol messages, or command results should not be used anymore + callback(undefined, result.result); + }); + return; } - if (typeof options === 'function') (callback = options), (options = {}); const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const bson = conn.options.bson; const query = new Query(bson, ns, command, { @@ -333,7 +361,7 @@ function runCommand(conn, ns, command, options, callback) { // ignore all future errors conn.on('error', noop); - _callback(err, null); + _callback(err); } function messageHandler(msg) { @@ -346,7 +374,7 @@ function runCommand(conn, ns, command, options, callback) { conn.removeListener('message', messageHandler); msg.parse({ promoteValues: true }); - _callback(null, msg.documents[0]); + _callback(undefined, msg.documents[0]); } conn.setSocketTimeout(socketTimeout); @@ -365,7 +393,7 @@ function authenticate(conn, credentials, callback) { const provider = AUTH_PROVIDERS[mechanism]; provider.auth(runCommand, [conn], credentials, err => { if (err) return callback(err); - callback(null, conn); + callback(undefined, conn); }); } diff --git a/lib/core/connection/pool.js b/lib/core/connection/pool.js index 000dbeb01ed..56d427e99c0 100644 --- a/lib/core/connection/pool.js +++ b/lib/core/connection/pool.js @@ -255,22 +255,6 @@ function connectionFailureHandler(pool, event, err, conn) { // Remove the connection removeConnection(pool, conn); - if ( - pool.state !== DRAINING && - pool.state !== DESTROYED && - pool.options.legacyCompatMode === false - ) { - // since an error/close/timeout means pool invalidation in a - // pre-CMAP world, we will issue a custom `drain` event here to - // signal that the server should be recycled - stateTransition(pool, DRAINING); - pool.emit('drain', err); - - // wait to flush work items so this server isn't selected again immediately - process.nextTick(() => conn.flush(err)); - return; - } - // flush remaining work items conn.flush(err); } @@ -641,6 +625,9 @@ function destroy(self, connections, options, callback) { conn.removeAllListeners(eventName); } + // ignore any errors during destruction + conn.on('error', () => {}); + conn.destroy(options, cb); }, err => { diff --git a/lib/core/error.js b/lib/core/error.js index 200d8a11267..fd1eaf7b4c1 100644 --- a/lib/core/error.js +++ b/lib/core/error.js @@ -95,14 +95,35 @@ class MongoParseError extends MongoError { */ class MongoTimeoutError extends MongoError { constructor(message, reason) { - super(message); + if (reason && reason.error) { + super(reason.error.message || reason.error); + } else { + super(message); + } + this.name = 'MongoTimeoutError'; - if (reason != null) { + if (reason) { this.reason = reason; } } } +/** + * An error signifying a client-side server selection error + * + * @param {Error|string|object} message The error message + * @param {string|object} [reason] The reason the timeout occured + * @property {string} message The error message + * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers + * @extends MongoError + */ +class MongoServerSelectionError extends MongoTimeoutError { + constructor(message, reason) { + super(message, reason); + this.name = 'MongoServerSelectionError'; + } +} + function makeWriteConcernResultObject(input) { const output = Object.assign({}, input); @@ -241,6 +262,7 @@ module.exports = { MongoNetworkError, MongoParseError, MongoTimeoutError, + MongoServerSelectionError, MongoWriteConcernError, mongoErrorContextSymbol, isRetryableError, diff --git a/lib/core/index.js b/lib/core/index.js index a7f80738d9c..2da5573a47c 100644 --- a/lib/core/index.js +++ b/lib/core/index.js @@ -20,6 +20,7 @@ module.exports = { MongoNetworkError: require('./error').MongoNetworkError, MongoParseError: require('./error').MongoParseError, MongoTimeoutError: require('./error').MongoTimeoutError, + MongoServerSelectionError: require('./error').MongoServerSelectionError, MongoWriteConcernError: require('./error').MongoWriteConcernError, mongoErrorContextSymbol: require('./error').mongoErrorContextSymbol, // Core diff --git a/lib/core/sdam/events.js b/lib/core/sdam/events.js new file mode 100644 index 00000000000..08a14adca54 --- /dev/null +++ b/lib/core/sdam/events.js @@ -0,0 +1,124 @@ +'use strict'; + +/** + * Published when server description changes, but does NOT include changes to the RTT. + * + * @property {Object} topologyId A unique identifier for the topology + * @property {ServerAddress} address The address (host/port pair) of the server + * @property {ServerDescription} previousDescription The previous server description + * @property {ServerDescription} newDescription The new server description + */ +class ServerDescriptionChangedEvent { + constructor(topologyId, address, previousDescription, newDescription) { + Object.assign(this, { topologyId, address, previousDescription, newDescription }); + } +} + +/** + * Published when server is initialized. + * + * @property {Object} topologyId A unique identifier for the topology + * @property {ServerAddress} address The address (host/port pair) of the server + */ +class ServerOpeningEvent { + constructor(topologyId, address) { + Object.assign(this, { topologyId, address }); + } +} + +/** + * Published when server is closed. + * + * @property {ServerAddress} address The address (host/port pair) of the server + * @property {Object} topologyId A unique identifier for the topology + */ +class ServerClosedEvent { + constructor(topologyId, address) { + Object.assign(this, { topologyId, address }); + } +} + +/** + * Published when topology description changes. + * + * @property {Object} topologyId + * @property {TopologyDescription} previousDescription The old topology description + * @property {TopologyDescription} newDescription The new topology description + */ +class TopologyDescriptionChangedEvent { + constructor(topologyId, previousDescription, newDescription) { + Object.assign(this, { topologyId, previousDescription, newDescription }); + } +} + +/** + * Published when topology is initialized. + * + * @param {Object} topologyId A unique identifier for the topology + */ +class TopologyOpeningEvent { + constructor(topologyId) { + Object.assign(this, { topologyId }); + } +} + +/** + * Published when topology is closed. + * + * @param {Object} topologyId A unique identifier for the topology + */ +class TopologyClosedEvent { + constructor(topologyId) { + Object.assign(this, { topologyId }); + } +} + +/** + * Fired when the server monitor’s ismaster command is started - immediately before + * the ismaster command is serialized into raw BSON and written to the socket. + * + * @property {Object} connectionId The connection id for the command + */ +class ServerHeartbeatStartedEvent { + constructor(connectionId) { + Object.assign(this, { connectionId }); + } +} + +/** + * Fired when the server monitor’s ismaster succeeds. + * + * @param {Number} duration The execution time of the event in ms + * @param {Object} reply The command reply + * @param {Object} connectionId The connection id for the command + */ +class ServerHeartbeatSucceededEvent { + constructor(duration, reply, connectionId) { + Object.assign(this, { connectionId, duration, reply }); + } +} + +/** + * Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception. + * + * @param {Number} duration The execution time of the event in ms + * @param {MongoError|Object} failure The command failure + * @param {Object} connectionId The connection id for the command + */ +class ServerHeartbeatFailedEvent { + constructor(duration, failure, connectionId) { + Object.assign(this, { connectionId, duration, failure }); + } +} + +module.exports = { + ServerDescriptionChangedEvent, + ServerOpeningEvent, + ServerClosedEvent, + TopologyDescriptionChangedEvent, + TopologyOpeningEvent, + TopologyClosedEvent, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerHeartbeatFailedEvent +}; diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js new file mode 100644 index 00000000000..a5ddff85a39 --- /dev/null +++ b/lib/core/sdam/monitor.js @@ -0,0 +1,251 @@ +'use strict'; + +const ServerType = require('./common').ServerType; +const calculateDurationInMs = require('../utils').calculateDurationInMs; +const EventEmitter = require('events'); +const connect = require('../connection/connect'); +const Connection = require('../../cmap/connection').Connection; +const common = require('./common'); +const makeStateMachine = require('../utils').makeStateMachine; +const MongoError = require('../error').MongoError; + +const sdamEvents = require('./events'); +const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent; +const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent; +const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent; + +const kServer = Symbol('server'); +const kMonitorId = Symbol('monitorId'); +const kConnection = Symbol('connection'); +const kCancellationToken = Symbol('cancellationToken'); +const kLastCheckTime = Symbol('lastCheckTime'); + +const STATE_CLOSED = common.STATE_CLOSED; +const STATE_CLOSING = common.STATE_CLOSING; +const STATE_IDLE = 'idle'; +const STATE_MONITORING = 'monitoring'; +const stateTransition = makeStateMachine({ + [STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED], + [STATE_CLOSED]: [STATE_CLOSED, STATE_MONITORING], + [STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, STATE_CLOSING], + [STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, STATE_CLOSING] +}); + +const INVALID_REQUEST_CHECK_STATES = new Set([STATE_CLOSING, STATE_CLOSED, STATE_MONITORING]); + +class Monitor extends EventEmitter { + constructor(server, options) { + super(options); + + this[kServer] = server; + this[kConnection] = undefined; + this[kCancellationToken] = new EventEmitter(); + this[kCancellationToken].setMaxListeners(Infinity); + this.s = { + state: STATE_CLOSED + }; + + this.address = server.description.address; + this.options = Object.freeze({ + connectTimeoutMS: + typeof options.connectionTimeout === 'number' + ? options.connectionTimeout + : typeof options.connectTimeoutMS === 'number' + ? options.connectTimeoutMS + : 10000, + heartbeatFrequencyMS: + typeof options.heartbeatFrequencyMS === 'number' ? options.heartbeatFrequencyMS : 10000, + minHeartbeatFrequencyMS: + typeof options.minHeartbeatFrequencyMS === 'number' ? options.minHeartbeatFrequencyMS : 500 + }); + + // TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration + const addressParts = server.description.address.split(':'); + this.connectOptions = Object.freeze( + Object.assign( + { + id: '', + host: addressParts[0], + port: parseInt(addressParts[1], 10), + bson: server.s.bson, + connectionType: Connection + }, + server.s.options, + this.options, + + // force BSON serialization options + { + raw: false, + promoteLongs: true, + promoteValues: true, + promoteBuffers: true + } + ) + ); + } + + connect() { + if (this.s.state !== STATE_CLOSED) { + return; + } + + monitorServer(this); + } + + requestCheck() { + if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) { + return; + } + + const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS; + const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS; + const remainingTime = heartbeatFrequencyMS - calculateDurationInMs(this[kLastCheckTime]); + if (remainingTime > minHeartbeatFrequencyMS && this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + rescheduleMonitoring(this, minHeartbeatFrequencyMS); + return; + } + + if (this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + } + + monitorServer(this); + } + + close() { + if (this.s.state === STATE_CLOSED || this.s.state === STATE_CLOSING) { + return; + } + + stateTransition(this, STATE_CLOSING); + this[kCancellationToken].emit('cancel'); + if (this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + } + + if (this[kConnection]) { + this[kConnection].destroy({ force: true }); + } + + this.emit('close'); + stateTransition(this, STATE_CLOSED); + } +} + +function checkServer(monitor, callback) { + if (monitor[kConnection] && monitor[kConnection].closed) { + monitor[kConnection] = undefined; + } + + const start = process.hrtime(); + monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address)); + + function failureHandler(err) { + monitor.emit( + 'serverHeartbeatFailed', + new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) + ); + + callback(err); + } + + function successHandler(isMaster) { + monitor.emit( + 'serverHeartbeatSucceeded', + new ServerHeartbeatSucceededEvent(calculateDurationInMs(start), isMaster, monitor.address) + ); + + return callback(undefined, isMaster); + } + + if (monitor[kConnection] != null) { + const connectTimeoutMS = monitor.options.connectTimeoutMS; + monitor[kConnection].command( + 'admin.$cmd', + { ismaster: true }, + { socketTimeout: connectTimeoutMS }, + (err, result) => { + if (err) { + failureHandler(err); + return; + } + + successHandler(result.result); + } + ); + + return; + } + + // connecting does an implicit `ismaster` + connect(monitor.connectOptions, monitor[kCancellationToken], (err, conn) => { + if (err) { + monitor[kConnection] = undefined; + failureHandler(err); + return; + } + + if (monitor.s.state === STATE_CLOSING || monitor.s.state === STATE_CLOSED) { + conn.destroy({ force: true }); + failureHandler(new MongoError('monitor was destroyed')); + return; + } + + monitor[kConnection] = conn; + successHandler(conn.ismaster); + }); +} + +function monitorServer(monitor) { + stateTransition(monitor, STATE_MONITORING); + + // TODO: the next line is a legacy event, remove in v4 + process.nextTick(() => monitor.emit('monitoring', monitor[kServer])); + + checkServer(monitor, e0 => { + if (e0 == null) { + rescheduleMonitoring(monitor); + return; + } + + // otherwise an error occured on initial discovery, also bail + if (monitor[kServer].description.type === ServerType.Unknown) { + monitor.emit('resetServer', e0); + rescheduleMonitoring(monitor); + return; + } + + // According to the SDAM specification's "Network error during server check" section, if + // an ismaster call fails we reset the server's pool. If a server was once connected, + // change its type to `Unknown` only after retrying once. + monitor.emit('resetConnectionPool'); + + checkServer(monitor, e1 => { + if (e1) { + monitor.emit('resetServer', e1); + } + + rescheduleMonitoring(monitor); + }); + }); +} + +function rescheduleMonitoring(monitor, ms) { + const heartbeatFrequencyMS = monitor.options.heartbeatFrequencyMS; + if (monitor.s.state === STATE_CLOSING || monitor.s.state === STATE_CLOSED) { + return; + } + + stateTransition(monitor, STATE_IDLE); + + monitor[kLastCheckTime] = process.hrtime(); + monitor[kMonitorId] = setTimeout(() => { + monitor[kMonitorId] = undefined; + monitor.requestCheck(); + }, ms || heartbeatFrequencyMS); +} + +module.exports = { + Monitor +}; diff --git a/lib/core/sdam/monitoring.js b/lib/core/sdam/monitoring.js deleted file mode 100644 index 4cbf2f1730f..00000000000 --- a/lib/core/sdam/monitoring.js +++ /dev/null @@ -1,241 +0,0 @@ -'use strict'; - -const ServerDescription = require('./server_description').ServerDescription; -const calculateDurationInMs = require('../utils').calculateDurationInMs; - -// pulled from `Server` implementation -const STATE_CLOSED = 'closed'; -const STATE_CLOSING = 'closing'; - -/** - * Published when server description changes, but does NOT include changes to the RTT. - * - * @property {Object} topologyId A unique identifier for the topology - * @property {ServerAddress} address The address (host/port pair) of the server - * @property {ServerDescription} previousDescription The previous server description - * @property {ServerDescription} newDescription The new server description - */ -class ServerDescriptionChangedEvent { - constructor(topologyId, address, previousDescription, newDescription) { - Object.assign(this, { topologyId, address, previousDescription, newDescription }); - } -} - -/** - * Published when server is initialized. - * - * @property {Object} topologyId A unique identifier for the topology - * @property {ServerAddress} address The address (host/port pair) of the server - */ -class ServerOpeningEvent { - constructor(topologyId, address) { - Object.assign(this, { topologyId, address }); - } -} - -/** - * Published when server is closed. - * - * @property {ServerAddress} address The address (host/port pair) of the server - * @property {Object} topologyId A unique identifier for the topology - */ -class ServerClosedEvent { - constructor(topologyId, address) { - Object.assign(this, { topologyId, address }); - } -} - -/** - * Published when topology description changes. - * - * @property {Object} topologyId - * @property {TopologyDescription} previousDescription The old topology description - * @property {TopologyDescription} newDescription The new topology description - */ -class TopologyDescriptionChangedEvent { - constructor(topologyId, previousDescription, newDescription) { - Object.assign(this, { topologyId, previousDescription, newDescription }); - } -} - -/** - * Published when topology is initialized. - * - * @param {Object} topologyId A unique identifier for the topology - */ -class TopologyOpeningEvent { - constructor(topologyId) { - Object.assign(this, { topologyId }); - } -} - -/** - * Published when topology is closed. - * - * @param {Object} topologyId A unique identifier for the topology - */ -class TopologyClosedEvent { - constructor(topologyId) { - Object.assign(this, { topologyId }); - } -} - -/** - * Fired when the server monitor’s ismaster command is started - immediately before - * the ismaster command is serialized into raw BSON and written to the socket. - * - * @property {Object} connectionId The connection id for the command - */ -class ServerHeartbeatStartedEvent { - constructor(connectionId) { - Object.assign(this, { connectionId }); - } -} - -/** - * Fired when the server monitor’s ismaster succeeds. - * - * @param {Number} duration The execution time of the event in ms - * @param {Object} reply The command reply - * @param {Object} connectionId The connection id for the command - */ -class ServerHeartbeatSucceededEvent { - constructor(duration, reply, connectionId) { - Object.assign(this, { duration, reply, connectionId }); - } -} - -/** - * Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception. - * - * @param {Number} duration The execution time of the event in ms - * @param {MongoError|Object} failure The command failure - * @param {Object} connectionId The connection id for the command - */ -class ServerHeartbeatFailedEvent { - constructor(duration, failure, connectionId) { - Object.assign(this, { duration, failure, connectionId }); - } -} - -/** - * Performs a server check as described by the SDAM spec. - * - * NOTE: This method automatically reschedules itself, so that there is always an active - * monitoring process - * - * @param {Server} server The server to monitor - */ -function monitorServer(server, options) { - options = options || {}; - const heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; - - if (options.initial === true) { - server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); - return; - } - - const rescheduleMonitoring = () => { - server.s.monitoring = false; - server.s.monitorId = setTimeout(() => { - server.s.monitorId = undefined; - server.monitor(); - }, heartbeatFrequencyMS); - }; - - // executes a single check of a server - const checkServer = callback => { - let start = process.hrtime(); - - // emit a signal indicating we have started the heartbeat - server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name)); - - // NOTE: legacy monitoring event - process.nextTick(() => server.emit('monitoring', server)); - - server.command( - 'admin.$cmd', - { ismaster: true }, - { - monitoring: true, - socketTimeout: server.s.options.connectionTimeout || 2000 - }, - (err, result) => { - let duration = calculateDurationInMs(start); - - if (err) { - server.emit( - 'serverHeartbeatFailed', - new ServerHeartbeatFailedEvent(duration, err, server.name) - ); - - return callback(err, null); - } - - // save round trip time - server.description.roundTripTime = duration; - - const isMaster = result.result; - server.emit( - 'serverHeartbeatSucceeded', - new ServerHeartbeatSucceededEvent(duration, isMaster, server.name) - ); - - return callback(null, isMaster); - } - ); - }; - - const successHandler = isMaster => { - // emit an event indicating that our description has changed - server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster)); - if (server.s.state === STATE_CLOSED || server.s.state === STATE_CLOSING) { - return; - } - - rescheduleMonitoring(); - }; - - // run the actual monitoring loop - server.s.monitoring = true; - checkServer((err, isMaster) => { - if (!err) { - successHandler(isMaster); - return; - } - - // According to the SDAM specification's "Network error during server check" section, if - // an ismaster call fails we reset the server's pool. If a server was once connected, - // change its type to `Unknown` only after retrying once. - server.s.pool.reset(() => { - // otherwise re-attempt monitoring once - checkServer((error, isMaster) => { - if (error) { - // we revert to an `Unknown` by emitting a default description with no isMaster - server.emit( - 'descriptionReceived', - new ServerDescription(server.description.address, null, { error }) - ); - - rescheduleMonitoring(); - return; - } - - successHandler(isMaster); - }); - }); - }); -} - -module.exports = { - ServerDescriptionChangedEvent, - ServerOpeningEvent, - ServerClosedEvent, - TopologyDescriptionChangedEvent, - TopologyOpeningEvent, - TopologyClosedEvent, - ServerHeartbeatStartedEvent, - ServerHeartbeatSucceededEvent, - ServerHeartbeatFailedEvent, - monitorServer -}; diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 32cea9b2278..0da1031b467 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -1,16 +1,14 @@ 'use strict'; const EventEmitter = require('events'); +const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool; +const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; const MongoError = require('../error').MongoError; -const Pool = require('../connection/pool'); const relayEvents = require('../utils').relayEvents; -const wireProtocol = require('../wireprotocol'); const BSON = require('../connection/utils').retrieveBSON(); -const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); -const monitorServer = require('./monitoring').monitorServer; -const MongoParseError = require('../error').MongoParseError; +const Monitor = require('./monitor').Monitor; const MongoNetworkError = require('../error').MongoNetworkError; const collationNotSupported = require('../utils').collationNotSupported; const debugOptions = require('../connection/utils').debugOptions; @@ -57,6 +55,8 @@ const stateTransition = makeStateMachine({ [STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED] }); +const kMonitor = Symbol('monitor'); + /** * * @fires Server#serverHeartbeatStarted @@ -99,19 +99,68 @@ class Server extends EventEmitter { BSON.Symbol, BSON.Timestamp ]), - // client metadata for the initial handshake - clientInfo: createClientInfo(options), - // state variable to determine if there is an active server check in progress - monitoring: false, - // the implementation of the monitoring method - monitorFunction: options.monitorFunction || monitorServer, - // the connection pool - pool: null, // the server state state: STATE_CLOSED, credentials: options.credentials, topology }; + + // create the connection pool + // NOTE: this used to happen in `connect`, we supported overriding pool options there + const addressParts = this.description.address.split(':'); + const poolOptions = Object.assign( + { host: addressParts[0], port: parseInt(addressParts[1], 10), bson: this.s.bson }, + options + ); + + this.s.pool = new ConnectionPool(poolOptions); + relayEvents( + this.s.pool, + this, + ['commandStarted', 'commandSucceeded', 'commandFailed'].concat(CMAP_EVENT_NAMES) + ); + + this.s.pool.on('clusterTimeReceived', clusterTime => { + this.clusterTime = clusterTime; + }); + + // create the monitor + this[kMonitor] = new Monitor(this, this.s.options); + relayEvents(this[kMonitor], this, [ + 'serverHeartbeatStarted', + 'serverHeartbeatSucceeded', + 'serverHeartbeatFailed', + + // legacy events + 'monitoring' + ]); + + this[kMonitor].on('resetConnectionPool', () => { + this.s.pool.clear(); + }); + + this[kMonitor].on('resetServer', error => { + // Revert to an `Unknown` state by emitting a default description with no isMaster, and the + // error from the heartbeat attempt + this.emit( + 'descriptionReceived', + new ServerDescription(this.description.address, null, { error }) + ); + }); + + this[kMonitor].on('serverHeartbeatSucceeded', event => { + this.emit( + 'descriptionReceived', + new ServerDescription(this.description.address, event.reply, { + roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration) + }) + ); + + if (this.s.state === STATE_CONNECTING) { + stateTransition(this, STATE_CONNECTED); + this.emit('connect', this); + } + }); } get description() { @@ -132,57 +181,25 @@ class Server extends EventEmitter { /** * Initiate server connect */ - connect(options) { - options = options || {}; - - // do not allow connect to be called on anything that's not disconnected - if (this.s.pool && !this.s.pool.isDisconnected() && !this.s.pool.isDestroyed()) { - throw new MongoError(`Server instance in invalid state ${this.s.pool.state}`); + connect() { + if (this.s.state !== STATE_CLOSED) { + return; } - // create a pool - const addressParts = this.description.address.split(':'); - const poolOptions = Object.assign( - { host: addressParts[0], port: parseInt(addressParts[1], 10) }, - this.s.options, - options, - { bson: this.s.bson } - ); - - // NOTE: reconnect is explicitly false because of the server selection loop - poolOptions.reconnect = false; - poolOptions.legacyCompatMode = false; - - this.s.pool = new Pool(this, poolOptions); - - // setup listeners - this.s.pool.on('parseError', parseErrorEventHandler(this)); - - this.s.pool.on('drain', err => { - this.emit('error', err); - }); - - // it is unclear whether consumers should even know about these events - // this.s.pool.on('timeout', timeoutEventHandler(this)); - // this.s.pool.on('reconnect', reconnectEventHandler(this)); - // this.s.pool.on('reconnectFailed', errorEventHandler(this)); - - // relay all command monitoring events - relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']); - stateTransition(this, STATE_CONNECTING); - - this.s.pool.connect(connectEventHandler(this)); + this[kMonitor].connect(); } /** * Destroy the server connection * + * @param {object} [options] Optional settings * @param {Boolean} [options.force=false] Force destroy the pool */ destroy(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, { force: false }, options); + if (this.s.state === STATE_CLOSED) { if (typeof callback === 'function') { callback(); @@ -193,38 +210,22 @@ class Server extends EventEmitter { stateTransition(this, STATE_CLOSING); - const done = err => { + this[kMonitor].close(); + this.s.pool.close(options, err => { stateTransition(this, STATE_CLOSED); this.emit('closed'); if (typeof callback === 'function') { - callback(err, null); + callback(err); } - }; - - if (!this.s.pool) { - return done(); - } - - ['close', 'error', 'timeout', 'parseError', 'connect'].forEach(event => { - this.s.pool.removeAllListeners(event); }); - - if (this.s.monitorId) { - clearTimeout(this.s.monitorId); - } - - this.s.pool.destroy(options.force, done); } /** * Immediately schedule monitoring of this server. If there already an attempt being made * this will be a no-op. */ - monitor(options) { - options = options || {}; - if (this.s.state !== STATE_CONNECTED || this.s.monitoring) return; - if (this.s.monitorId) clearTimeout(this.s.monitorId); - this.s.monitorFunction(this, options); + requestCheck() { + this[kMonitor].requestCheck(); } /** @@ -232,12 +233,13 @@ class Server extends EventEmitter { * * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash + * @param {object} [options] Optional settings * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ command(ns, cmd, options, callback) { @@ -252,7 +254,7 @@ class Server extends EventEmitter { const error = basicReadValidations(this, options); if (error) { - return callback(error, null); + return callback(error); } // Clone the options @@ -275,19 +277,23 @@ class Server extends EventEmitter { return; } - wireProtocol.command(this, ns, cmd, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.command(ns, cmd, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -304,19 +310,23 @@ class Server extends EventEmitter { return; } - wireProtocol.query(this, ns, cmd, cursorState, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.query(ns, cmd, cursorState, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -333,19 +343,23 @@ class Server extends EventEmitter { return; } - wireProtocol.getMore(this, ns, cursorState, batchSize, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.getMore(ns, cursorState, batchSize, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -364,15 +378,17 @@ class Server extends EventEmitter { return; } - wireProtocol.killCursors(this, ns, cursorState, (err, result) => { - if (err && isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (typeof callback === 'function') { - callback(err, result); - } - }); + conn.killCursors(ns, cursorState, (err, result) => { + if (err && isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } + + cb(err, result); + }); + }, callback); } /** @@ -384,7 +400,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ insert(ns, ops, options, callback) { @@ -400,7 +416,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ update(ns, ops, options, callback) { @@ -416,7 +432,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ remove(ns, ops, options, callback) { @@ -433,24 +449,12 @@ Object.defineProperty(Server.prototype, 'clusterTime', { } }); -function basicWriteValidations(server) { - if (!server.s.pool) { - return new MongoError('server instance is not connected'); - } - - if (server.s.pool.isDestroyed()) { - return new MongoError('server instance pool was destroyed'); - } - - return null; +function calculateRoundTripTime(oldRtt, duration) { + const alpha = 0.2; + return alpha * duration + (1 - alpha) * oldRtt; } function basicReadValidations(server, options) { - const error = basicWriteValidations(server, options); - if (error) { - return error; - } - if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { return new MongoError('readPreference must be an instance of ReadPreference'); } @@ -471,82 +475,28 @@ function executeWriteOperation(args, options, callback) { return; } - const error = basicWriteValidations(server, options); - if (error) { - callback(error, null); - return; - } - if (collationNotSupported(server, options)) { callback(new MongoError(`server ${server.name} does not support collation`)); return; } - return wireProtocol[op](server, ns, ops, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + server.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, server)) { - server.emit('error', err); - } - } - - callback(err, result); - }); -} - -function connectEventHandler(server) { - return function(err, conn) { - if (server.s.state === STATE_CLOSING || server.s.state === STATE_CLOSED) { - return; - } - - if (err) { - server.emit('error', new MongoNetworkError(err)); - - stateTransition(server, STATE_CLOSED); - server.emit('close'); - return; - } - - const ismaster = conn.ismaster; - server.s.lastIsMasterMS = conn.lastIsMasterMS; - if (conn.agreedCompressor) { - server.s.pool.options.agreedCompressor = conn.agreedCompressor; - } - - if (conn.zlibCompressionLevel) { - server.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; - } - - if (conn.ismaster.$clusterTime) { - const $clusterTime = conn.ismaster.$clusterTime; - server.s.sclusterTime = $clusterTime; - } - - // log the connection event if requested - if (server.s.logger.isInfo()) { - server.s.logger.info( - `server ${server.name} connected with ismaster [${JSON.stringify(ismaster)}]` - ); - } - - // we are connected and handshaked (guaranteed by the pool) - stateTransition(server, STATE_CONNECTED); - server.emit('connect', server); + conn[op](ns, ops, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } - // emit an event indicating that our description has changed - server.emit('descriptionReceived', new ServerDescription(server.description.address, ismaster)); - }; -} + if (isSDAMUnrecoverableError(err, server)) { + server.emit('error', err); + } + } -function parseErrorEventHandler(server) { - return function(err) { - stateTransition(this, STATE_CLOSED); - server.emit('error', new MongoParseError(err)); - }; + cb(err, result); + }); + }, callback); } module.exports = { diff --git a/lib/core/sdam/server_description.js b/lib/core/sdam/server_description.js index fe528814a09..1a26c0609fc 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/core/sdam/server_description.js @@ -2,6 +2,7 @@ const arrayStrictEqual = require('../utils').arrayStrictEqual; const tagsStrictEqual = require('../utils').tagsStrictEqual; +const errorStrictEqual = require('../utils').errorStrictEqual; const ServerType = require('./common').ServerType; const WRITABLE_SERVER_TYPES = new Set([ @@ -67,8 +68,8 @@ class ServerDescription { ); this.address = address; - this.error = options.error || null; - this.roundTripTime = options.roundTripTime || 0; + this.error = options.error; + this.roundTripTime = options.roundTripTime || -1; this.lastUpdateTime = Date.now(); this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null; this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null; @@ -121,7 +122,7 @@ class ServerDescription { equals(other) { return ( other != null && - this.error === other.error && + errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && this.me === other.me && @@ -175,5 +176,6 @@ function parseServerType(ismaster) { } module.exports = { - ServerDescription + ServerDescription, + parseServerType }; diff --git a/lib/core/sdam/server_selection.js b/lib/core/sdam/server_selection.js index bb01b1d6a98..483703c680a 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/core/sdam/server_selection.js @@ -4,7 +4,7 @@ const TopologyType = require('./common').TopologyType; const ReadPreference = require('../topologies/read_preference'); const MongoError = require('../error').MongoError; const calculateDurationInMs = require('../utils').calculateDurationInMs; -const MongoTimeoutError = require('../error').MongoTimeoutError; +const MongoServerSelectionError = require('../error').MongoServerSelectionError; const common = require('./common'); const STATE_CLOSED = common.STATE_CLOSED; @@ -260,8 +260,8 @@ function selectServers(topology, selector, timeout, start, callback) { const duration = calculateDurationInMs(start); if (duration >= timeout) { return callback( - new MongoTimeoutError(`Server selection timed out after ${timeout} ms`), - topology.description.error + new MongoServerSelectionError(`Server selection timed out after ${timeout} ms`), + topology.description ); } @@ -299,16 +299,14 @@ function selectServers(topology, selector, timeout, start, callback) { const retrySelection = () => { // ensure all server monitors attempt monitoring soon - topology.s.servers.forEach(server => - server.monitor({ heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS }) - ); + topology.s.servers.forEach(server => process.nextTick(() => server.requestCheck())); const iterationTimer = setTimeout(() => { topology.removeListener('topologyDescriptionChanged', descriptionChangedHandler); callback( - new MongoTimeoutError( + new MongoServerSelectionError( `Server selection timed out after ${timeout} ms`, - topology.description.error + topology.description ) ); }, timeout - duration); diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 0863c7c164e..f15e2b6352c 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -4,7 +4,7 @@ const ServerDescription = require('./server_description').ServerDescription; const ServerType = require('./common').ServerType; const TopologyDescription = require('./topology_description').TopologyDescription; const TopologyType = require('./common').TopologyType; -const monitoring = require('./monitoring'); +const events = require('./events'); const Server = require('./server').Server; const relayEvents = require('../utils').relayEvents; const ReadPreference = require('../topologies/read_preference'); @@ -16,7 +16,6 @@ const createCompressionInfo = require('../topologies/shared').createCompressionI const isRetryableError = require('../error').isRetryableError; const isSDAMUnrecoverableError = require('../error').isSDAMUnrecoverableError; const ClientSession = require('../sessions').ClientSession; -const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const resolveClusterTime = require('../topologies/shared').resolveClusterTime; const SrvPoller = require('./srv_polling').SrvPoller; @@ -24,6 +23,9 @@ const getMMAPError = require('../topologies/shared').getMMAPError; const makeStateMachine = require('../utils').makeStateMachine; const eachAsync = require('../utils').eachAsync; const emitDeprecationWarning = require('../../utils').emitDeprecationWarning; +const ServerSessionPool = require('../sessions').ServerSessionPool; +const makeClientMetadata = require('../utils').makeClientMetadata; +const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; const common = require('./common'); const drainTimerQueue = common.drainTimerQueue; @@ -48,16 +50,10 @@ const SERVER_RELAY_EVENTS = [ // NOTE: Legacy events 'monitoring' -]; +].concat(CMAP_EVENT_NAMES); // all events we listen to from `Server` instances -const LOCAL_SERVER_EVENTS = SERVER_RELAY_EVENTS.concat([ - 'error', - 'connect', - 'descriptionReceived', - 'close', - 'ended' -]); +const LOCAL_SERVER_EVENTS = ['error', 'connect', 'descriptionReceived', 'close', 'ended']; const STATE_CLOSING = common.STATE_CLOSING; const STATE_CLOSED = common.STATE_CLOSED; @@ -118,6 +114,13 @@ class Topology extends EventEmitter { } options = Object.assign({}, common.TOPOLOGY_DEFAULTS, options); + options = Object.freeze( + Object.assign(options, { + metadata: makeClientMetadata(options), + compression: { compressors: createCompressionInfo(options) } + }) + ); + DEPRECATED_OPTIONS.forEach(optionName => { if (options[optionName]) { emitDeprecationWarning( @@ -182,7 +185,7 @@ class Topology extends EventEmitter { // a map of server instances to normalized addresses servers: new Map(), // Server Session Pool - sessionPool: null, + sessionPool: new ServerSessionPool(this), // Active client sessions sessions: new Set(), // Promise library @@ -195,12 +198,6 @@ class Topology extends EventEmitter { connectionTimers: new Set() }; - // amend options for server instance creation - this.s.options.compression = { compressors: createCompressionInfo(options) }; - - // add client info - this.s.clientInfo = createClientInfo(options); - if (options.srvHost) { this.s.srvPoller = options.srvPoller || @@ -239,17 +236,6 @@ class Topology extends EventEmitter { return BSON.native ? 'c++' : 'js'; } - /** - * All raw connections - * @method - * @return {Connection[]} - */ - connections() { - return Array.from(this.s.servers.values()).reduce((result, server) => { - return result.concat(server.s.pool.allConnections()); - }, []); - } - /** * Initiate server connect * @@ -271,12 +257,12 @@ class Topology extends EventEmitter { stateTransition(this, STATE_CONNECTING); // emit SDAM monitoring events - this.emit('topologyOpening', new monitoring.TopologyOpeningEvent(this.s.id)); + this.emit('topologyOpening', new events.TopologyOpeningEvent(this.s.id)); // emit an event for the topology change this.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( this.s.id, new TopologyDescription(TopologyType.Unknown), // initial is always Unknown this.s.description @@ -290,9 +276,10 @@ class Topology extends EventEmitter { const readPreference = options.readPreference || ReadPreference.primary; this.selectServer(readPreferenceServerSelector(readPreference), options, (err, server) => { if (err) { - stateTransition(this, STATE_CLOSED); + this.close(); + if (typeof callback === 'function') { - callback(err, null); + callback(err); } else { this.emit('error', err); } @@ -315,7 +302,6 @@ class Topology extends EventEmitter { if (typeof callback === 'function') callback(err, this); }; - const STATE_CONNECTING = 1; if (server.s.state === STATE_CONNECTING) { server.once('error', errorHandler); server.once('connect', connectHandler); @@ -352,11 +338,6 @@ class Topology extends EventEmitter { drainTimerQueue(this.s.iterationTimers); drainTimerQueue(this.s.connectionTimers); - if (this.s.sessionPool) { - this.s.sessions.forEach(session => session.endSession()); - this.s.sessionPool.endAllPooledSessions(); - } - if (this.s.srvPoller) { this.s.srvPoller.stop(); if (this.s.handleSrvPolling) { @@ -370,26 +351,28 @@ class Topology extends EventEmitter { delete this.s.detectTopologyDescriptionChange; } - // defer state transition because we may need to send an `endSessions` command above stateTransition(this, STATE_CLOSING); - eachAsync( - Array.from(this.s.servers.values()), - (server, cb) => destroyServer(server, this, options, cb), - () => { - this.s.servers.clear(); + this.s.sessions.forEach(session => session.endSession()); + this.s.sessionPool.endAllPooledSessions(() => { + eachAsync( + Array.from(this.s.servers.values()), + (server, cb) => destroyServer(server, this, options, cb), + err => { + this.s.servers.clear(); - // emit an event for close - this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id)); + // emit an event for close + this.emit('topologyClosed', new events.TopologyClosedEvent(this.s.id)); - stateTransition(this, STATE_CLOSED); - this.emit('close'); + stateTransition(this, STATE_CLOSED); + this.emit('close'); - if (typeof callback === 'function') { - callback(); + if (typeof callback === 'function') { + callback(err); + } } - } - ); + ); + }); } /** @@ -550,7 +533,7 @@ class Topology extends EventEmitter { // emit monitoring events for this change this.emit( 'serverDescriptionChanged', - new monitoring.ServerDescriptionChangedEvent( + new events.ServerDescriptionChangedEvent( this.s.id, serverDescription.address, previousServerDescription, @@ -563,7 +546,7 @@ class Topology extends EventEmitter { this.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( this.s.id, previousTopologyDescription, this.s.description @@ -718,8 +701,8 @@ class Topology extends EventEmitter { return new CursorClass(topology, ns, cmd, options); } - get clientInfo() { - return this.s.clientInfo; + get clientMetadata() { + return this.s.options.metadata; } isConnected() { @@ -788,10 +771,13 @@ function destroyServer(server, topology, options, callback) { server.destroy(options, () => { topology.emit( 'serverClosed', - new monitoring.ServerClosedEvent(topology.s.id, server.description.address) + new events.ServerClosedEvent(topology.s.id, server.description.address) ); - if (typeof callback === 'function') callback(null, null); + SERVER_RELAY_EVENTS.forEach(event => server.removeAllListeners(event)); + if (typeof callback === 'function') { + callback(); + } }); } @@ -821,13 +807,12 @@ function randomSelection(array) { function createAndConnectServer(topology, serverDescription, connectDelay) { topology.emit( 'serverOpening', - new monitoring.ServerOpeningEvent(topology.s.id, serverDescription.address) + new events.ServerOpeningEvent(topology.s.id, serverDescription.address) ); const server = new Server(serverDescription, topology.s.options, topology); relayEvents(server, topology, SERVER_RELAY_EVENTS); - server.once('connect', serverConnectEventHandler(server, topology)); server.on('descriptionReceived', topology.serverUpdateHandler.bind(topology)); server.on('error', serverErrorEventHandler(server, topology)); @@ -845,25 +830,6 @@ function createAndConnectServer(topology, serverDescription, connectDelay) { return server; } -function resetServer(topology, serverDescription) { - if (!topology.s.servers.has(serverDescription.address)) { - return; - } - - // first remove the old server - const server = topology.s.servers.get(serverDescription.address); - destroyServer(server, topology); - - // add the new server, and attempt connection after a delay - const newServer = createAndConnectServer( - topology, - serverDescription, - topology.s.minHeartbeatFrequencyMS - ); - - topology.s.servers.set(serverDescription.address, newServer); -} - /** * Create `Server` instances for all initially known servers, connect them, and assign * them to the passed in `Topology`. @@ -880,15 +846,6 @@ function connectServers(topology, serverDescriptions) { } function updateServers(topology, incomingServerDescription) { - // if the server was reset internally because of an error, we need to replace the - // `Server` instance for it so we can attempt reconnect. - // - // TODO: this logical can change once CMAP is put in place - if (incomingServerDescription && incomingServerDescription.error) { - resetServer(topology, incomingServerDescription); - return; - } - // update the internal server's description if (incomingServerDescription && topology.s.servers.has(incomingServerDescription.address)) { const server = topology.s.servers.get(incomingServerDescription.address); @@ -918,15 +875,6 @@ function updateServers(topology, incomingServerDescription) { } } -function serverConnectEventHandler(server, topology) { - return function(/* isMaster, err */) { - server.monitor({ - initial: true, - heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS - }); - }; -} - function serverErrorEventHandler(server, topology) { return function(err) { if (topology.s.state === STATE_CLOSING || topology.s.state === STATE_CLOSED) { @@ -936,7 +884,7 @@ function serverErrorEventHandler(server, topology) { if (isSDAMUnrecoverableError(err, server)) { // NOTE: this must be commented out until we switch to the new CMAP pool because // we presently _always_ clear the pool on error. - resetServerState(server, err /*, { clearPool: true } */); + resetServerState(server, err, { clearPool: true }); return; } @@ -1009,21 +957,16 @@ function executeWriteOperation(args, options, callback) { function resetServerState(server, error, options) { options = Object.assign({}, { clearPool: false }, options); - function resetState() { - server.emit( - 'descriptionReceived', - new ServerDescription(server.description.address, null, { error }) - ); - - process.nextTick(() => server.monitor()); - } - if (options.clearPool && server.s.pool) { - server.s.pool.reset(() => resetState()); - return; + server.s.pool.clear(); } - resetState(); + server.emit( + 'descriptionReceived', + new ServerDescription(server.description.address, null, { error }) + ); + + process.nextTick(() => server.requestCheck()); } function translateReadPreference(options) { @@ -1061,7 +1004,7 @@ function srvPollingHandler(topology) { topology.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( topology.s.id, previousTopologyDescription, topology.s.description diff --git a/lib/core/sdam/topology_description.js b/lib/core/sdam/topology_description.js index 51b4ecde9ad..ba6a2507ee6 100644 --- a/lib/core/sdam/topology_description.js +++ b/lib/core/sdam/topology_description.js @@ -28,8 +28,7 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - options, - error + options ) { options = options || {}; @@ -47,7 +46,6 @@ class TopologyDescription { this.logicalSessionTimeoutMinutes = null; this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0; this.localThresholdMS = options.localThresholdMS || 0; - this.error = error; this.commonWireVersion = commonWireVersion || null; // save this locally, but don't display when printing the instance out @@ -133,7 +131,6 @@ class TopologyDescription { let maxSetVersion = this.maxSetVersion; let maxElectionId = this.maxElectionId; let commonWireVersion = this.commonWireVersion; - let error = serverDescription.error || this.error; const serverType = serverDescription.type; let serverDescriptions = new Map(this.servers); @@ -159,8 +156,7 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - this.options, - error + this.options ); } @@ -241,11 +237,17 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - this.options, - error + this.options ); } + get error() { + const descriptionsWithError = Array.from(this.servers.values()).filter(sd => sd.error); + if (descriptionsWithError.length > 0) { + return descriptionsWithError[0].error; + } + } + /** * Determines if the topology description has any known servers */ diff --git a/lib/core/sessions.js b/lib/core/sessions.js index 13576a46cc9..fcd3384645e 100644 --- a/lib/core/sessions.js +++ b/lib/core/sessions.js @@ -588,10 +588,23 @@ class ServerSessionPool { * Ends all sessions in the session pool. * @ignore */ - endAllPooledSessions() { + endAllPooledSessions(callback) { if (this.sessions.length) { - this.topology.endSessions(this.sessions.map(session => session.id)); - this.sessions = []; + this.topology.endSessions( + this.sessions.map(session => session.id), + () => { + this.sessions = []; + if (typeof callback === 'function') { + callback(); + } + } + ); + + return; + } + + if (typeof callback === 'function') { + callback(); } } diff --git a/lib/core/topologies/mongos.js b/lib/core/topologies/mongos.js index 681b01fd70e..29371931af7 100644 --- a/lib/core/topologies/mongos.js +++ b/lib/core/topologies/mongos.js @@ -8,16 +8,15 @@ const Logger = require('../connection/logger'); const retrieveBSON = require('../connection/utils').retrieveBSON; const MongoError = require('../error').MongoError; const Server = require('./server'); -const clone = require('./shared').clone; const diff = require('./shared').diff; const cloneOptions = require('./shared').cloneOptions; -const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); const getMMAPError = require('./shared').getMMAPError; +const makeClientMetadata = require('../utils').makeClientMetadata; /** * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is @@ -116,7 +115,7 @@ var Mongos = function(seedlist, options) { // Internal state this.s = { - options: Object.assign({}, options), + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // BSON instance bson: options.bson || @@ -153,14 +152,9 @@ var Mongos = function(seedlist, options) { // Are we running in debug mode debug: typeof options.debug === 'boolean' ? options.debug : false, // localThresholdMS - localThresholdMS: options.localThresholdMS || 15, - // Client info - clientInfo: createClientInfo(options) + localThresholdMS: options.localThresholdMS || 15 }; - // Set the client info - this.s.options.clientInfo = createClientInfo(options); - // Log info warning if the socketTimeout < haInterval as it will cause // a lot of recycled connections to happen. if ( @@ -265,8 +259,7 @@ Mongos.prototype.connect = function(options) { Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); @@ -607,8 +600,7 @@ function reconnectProxies(self, proxies, callback) { port: parseInt(_server.name.split(':')[1], 10), reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); diff --git a/lib/core/topologies/replset.js b/lib/core/topologies/replset.js index 0f03e9940de..b289d59a345 100644 --- a/lib/core/topologies/replset.js +++ b/lib/core/topologies/replset.js @@ -10,10 +10,8 @@ const Logger = require('../connection/logger'); const MongoError = require('../error').MongoError; const Server = require('./server'); const ReplSetState = require('./replset_state'); -const clone = require('./shared').clone; const Timeout = require('./shared').Timeout; const Interval = require('./shared').Interval; -const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; @@ -21,6 +19,7 @@ const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); const calculateDurationInMs = require('../utils').calculateDurationInMs; const getMMAPError = require('./shared').getMMAPError; +const makeClientMetadata = require('../utils').makeClientMetadata; // // States @@ -140,7 +139,7 @@ var ReplSet = function(seedlist, options) { // Internal state this.s = { - options: Object.assign({}, options), + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // BSON instance bson: options.bson || @@ -187,9 +186,7 @@ var ReplSet = function(seedlist, options) { // Connect function options passed in connectOptions: {}, // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false, - // Client info - clientInfo: createClientInfo(options) + debug: typeof options.debug === 'boolean' ? options.debug : false }; // Add handler for topology change @@ -369,8 +366,7 @@ function connectNewServers(self, servers, callback) { port: parseInt(_server.split(':')[1], 10), reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); @@ -918,8 +914,7 @@ ReplSet.prototype.connect = function(options) { Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); }); diff --git a/lib/core/topologies/server.js b/lib/core/topologies/server.js index c81d5e8e40c..6f6de12eaa7 100644 --- a/lib/core/topologies/server.js +++ b/lib/core/topologies/server.js @@ -13,13 +13,13 @@ var inherits = require('util').inherits, wireProtocol = require('../wireprotocol'), CoreCursor = require('../cursor').CoreCursor, sdam = require('./shared'), - createClientInfo = require('./shared').createClientInfo, createCompressionInfo = require('./shared').createCompressionInfo, resolveClusterTime = require('./shared').resolveClusterTime, SessionMixins = require('./shared').SessionMixins, relayEvents = require('../utils').relayEvents; const collationNotSupported = require('../utils').collationNotSupported; +const makeClientMetadata = require('../utils').makeClientMetadata; // Used for filtering out fields for loggin var debugFields = [ @@ -120,7 +120,7 @@ var Server = function(options) { // Internal state this.s = { // Options - options: options, + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // Logger logger: Logger('Server', options), // Factory overrides @@ -175,8 +175,6 @@ var Server = function(options) { this.initialConnect = true; // Default type this._type = 'server'; - // Set the client info - this.clientInfo = createClientInfo(options); // Max Stalleness values // last time we updated the ismaster state @@ -212,6 +210,13 @@ Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', { } }); +Object.defineProperty(Server.prototype, 'clientMetadata', { + enumerable: true, + get: function() { + return this.s.options.metadata; + } +}); + // In single server deployments we track the clusterTime directly on the topology, however // in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the // tracking objects so we can ensure we are gossiping the maximum time received from the diff --git a/lib/core/topologies/shared.js b/lib/core/topologies/shared.js index d69e88bf40d..c0d0f14d69c 100644 --- a/lib/core/topologies/shared.js +++ b/lib/core/topologies/shared.js @@ -1,8 +1,5 @@ 'use strict'; - -const os = require('os'); const ReadPreference = require('./read_preference'); -const Buffer = require('safe-buffer').Buffer; const TopologyType = require('../sdam/common').TopologyType; const MongoError = require('../error').MongoError; @@ -18,62 +15,6 @@ function emitSDAMEvent(self, event, description) { } } -// Get package.json variable -const driverVersion = require('../../../package.json').version; -const nodejsVersion = `'Node.js ${process.version}, ${os.endianness}`; -const type = os.type(); -const name = process.platform; -const architecture = process.arch; -const release = os.release(); - -function createClientInfo(options) { - const clientInfo = options.clientInfo - ? clone(options.clientInfo) - : { - driver: { - name: 'nodejs', - version: driverVersion - }, - os: { - type: type, - name: name, - architecture: architecture, - version: release - } - }; - - if (options.useUnifiedTopology) { - clientInfo.platform = `${nodejsVersion} (${options.useUnifiedTopology ? 'unified' : 'legacy'})`; - } - - // Do we have an application specific string - if (options.appname) { - // Cut at 128 bytes - var buffer = Buffer.from(options.appname); - // Return the truncated appname - var appname = buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname; - // Add to the clientInfo - clientInfo.application = { name: appname }; - } - - // support optionally provided wrapping driver info - if (options.driverInfo) { - if (options.driverInfo.name) { - clientInfo.driver.name = `${clientInfo.driver.name}|${options.driverInfo.name}`; - } - - if (options.driverInfo.version) { - clientInfo.driver.version = `${clientInfo.driver.version}|${options.driverInfo.version}`; - } - - if (options.driverInfo.platform) { - clientInfo.platform = `${clientInfo.platform}|${options.driverInfo.platform}`; - } - } - - return clientInfo; -} - function createCompressionInfo(options) { if (!options.compression || !options.compression.compressors) { return []; @@ -475,7 +416,6 @@ module.exports.getTopologyType = getTopologyType; module.exports.emitServerDescriptionChanged = emitServerDescriptionChanged; module.exports.emitTopologyDescriptionChanged = emitTopologyDescriptionChanged; module.exports.cloneOptions = cloneOptions; -module.exports.createClientInfo = createClientInfo; module.exports.createCompressionInfo = createCompressionInfo; module.exports.clone = clone; module.exports.diff = diff; diff --git a/lib/core/utils.js b/lib/core/utils.js index 5abd467a3ec..d9f487db01e 100644 --- a/lib/core/utils.js +++ b/lib/core/utils.js @@ -1,5 +1,5 @@ 'use strict'; - +const os = require('os'); const crypto = require('crypto'); const requireOptional = require('require_optional'); @@ -131,30 +131,29 @@ function isPromiseLike(maybePromise) { * @param {function} callback The callback called after every item has been iterated */ function eachAsync(arr, eachFn, callback) { - if (arr.length === 0) { - callback(null); + arr = arr || []; + + let idx = 0; + let awaiting = 0; + for (idx = 0; idx < arr.length; ++idx) { + awaiting++; + eachFn(arr[idx], eachCallback); + } + + if (awaiting === 0) { + callback(); return; } - const length = arr.length; - let completed = 0; function eachCallback(err) { + awaiting--; if (err) { - callback(err, null); + callback(err); return; } - if (++completed === length) { - callback(null); - } - } - - for (let idx = 0; idx < length; ++idx) { - try { - eachFn(arr[idx], eachCallback); - } catch (err) { - callback(err); - return; + if (idx === arr.length && awaiting <= 0) { + callback(); } } } @@ -177,6 +176,26 @@ function tagsStrictEqual(tags, tags2) { return tagsKeys.length === tags2Keys.length && tagsKeys.every(key => tags2[key] === tags[key]); } +function errorStrictEqual(lhs, rhs) { + if (lhs === rhs) { + return true; + } + + if ((lhs == null && rhs != null) || (lhs != null && rhs == null)) { + return false; + } + + if (lhs.constructor.name !== rhs.constructor.name) { + return false; + } + + if (lhs.message !== rhs.message) { + return false; + } + + return true; +} + function makeStateMachine(stateTable) { return function stateTransition(target, newState) { const legalStates = stateTable[target.s.state]; @@ -191,6 +210,51 @@ function makeStateMachine(stateTable) { }; } +function makeClientMetadata(options) { + options = options || {}; + + const metadata = { + driver: { + name: 'nodejs', + version: require('../../package.json').version + }, + os: { + type: os.type(), + name: process.platform, + architecture: process.arch, + version: os.release() + }, + platform: `'Node.js ${process.version}, ${os.endianness} (${ + options.useUnifiedTopology ? 'unified' : 'legacy' + })` + }; + + // support optionally provided wrapping driver info + if (options.driverInfo) { + if (options.driverInfo.name) { + metadata.driver.name = `${metadata.driver.name}|${options.driverInfo.name}`; + } + + if (options.driverInfo.version) { + metadata.version = `${metadata.driver.version}|${options.driverInfo.version}`; + } + + if (options.driverInfo.platform) { + metadata.platform = `${metadata.platform}|${options.driverInfo.platform}`; + } + } + + if (options.appname) { + // MongoDB requires the appname not exceed a byte length of 128 + const buffer = Buffer.from(options.appname); + metadata.application = { + name: buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname + }; + } + + return metadata; +} + module.exports = { uuidV4, calculateDurationInMs, @@ -204,5 +268,7 @@ module.exports = { isUnifiedTopology, arrayStrictEqual, tagsStrictEqual, - makeStateMachine + errorStrictEqual, + makeStateMachine, + makeClientMetadata }; diff --git a/lib/core/wireprotocol/command.js b/lib/core/wireprotocol/command.js index e6af5dabb9e..214385bbc6f 100644 --- a/lib/core/wireprotocol/command.js +++ b/lib/core/wireprotocol/command.js @@ -12,7 +12,8 @@ const MongoNetworkError = require('../error').MongoNetworkError; const maxWireVersion = require('../utils').maxWireVersion; function isClientEncryptionEnabled(server) { - return server.autoEncrypter; + const wireVersion = maxWireVersion(server); + return wireVersion && server.autoEncrypter; } function command(server, ns, cmd, options, callback) { @@ -152,9 +153,6 @@ function supportsOpMsg(topologyOrServer) { } function _cryptCommand(server, ns, cmd, options, callback) { - const shouldBypassAutoEncryption = !!( - server.s.options.autoEncryption && server.s.options.autoEncryption.bypassAutoEncryption - ); const autoEncrypter = server.autoEncrypter; function commandResponseHandler(err, response) { if (err || response == null) { @@ -174,11 +172,6 @@ function _cryptCommand(server, ns, cmd, options, callback) { }); } - if (shouldBypassAutoEncryption) { - _command(server, ns, cmd, options, commandResponseHandler); - return; - } - autoEncrypter.encrypt(ns, cmd, options, (err, encrypted) => { if (err) { callback(err, null); diff --git a/lib/gridfs-stream/upload.js b/lib/gridfs-stream/upload.js index 40e1eab3ffe..578949a53d7 100644 --- a/lib/gridfs-stream/upload.js +++ b/lib/gridfs-stream/upload.js @@ -422,7 +422,7 @@ function doWrite(_this, chunk, encoding, callback) { if (_this.md5) { _this.md5.update(_this.bufToStore); } - var doc = createChunkDoc(_this.id, _this.n, _this.bufToStore); + var doc = createChunkDoc(_this.id, _this.n, Buffer.from(_this.bufToStore)); ++_this.state.outstandingRequests; ++outstandingRequests; diff --git a/lib/mongo_client.js b/lib/mongo_client.js index eaad74a22ef..090682f163d 100644 --- a/lib/mongo_client.js +++ b/lib/mongo_client.js @@ -87,7 +87,7 @@ const CloseOperation = require('./operations/close'); * @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer *deprecated* use `tls` variants * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. *deprecated* use `tls` variants * @param {boolean} [options.tls=false] Enable TLS connections - * @param {boolean} [options.tlsinsecure=false] Relax TLS constraints, disabling validation + * @param {boolean} [options.tlsInsecure=false] Relax TLS constraints, disabling validation * @param {string} [options.tlsCAFile] A path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection * @param {string} [options.tlsCertificateKeyFile] A path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated * @param {string} [options.tlsCertificateKeyFilePassword] The password to decrypt the client private key to be used for TLS connections @@ -97,10 +97,10 @@ const CloseOperation = require('./operations/close'); * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting + * @param {number} [options.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default). * If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure - * @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies @@ -320,7 +320,7 @@ MongoClient.prototype.isConnected = function(options) { * @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer *deprecated* use `tls` variants * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. *deprecated* use `tls` variants * @param {boolean} [options.tls=false] Enable TLS connections - * @param {boolean} [options.tlsinsecure=false] Relax TLS constraints, disabling validation + * @param {boolean} [options.tlsInsecure=false] Relax TLS constraints, disabling validation * @param {string} [options.tlsCAFile] A path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection * @param {string} [options.tlsCertificateKeyFile] A path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated * @param {string} [options.tlsCertificateKeyFilePassword] The password to decrypt the client private key to be used for TLS connections @@ -330,10 +330,10 @@ MongoClient.prototype.isConnected = function(options) { * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {boolean} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting + * @param {number} [options.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default). * If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure - * @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 172c099102c..0ba93b5580f 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -17,6 +17,8 @@ const Server = require('../topologies/server'); const ServerSessionPool = require('../core').Sessions.ServerSessionPool; const emitDeprecationWarning = require('../utils').emitDeprecationWarning; const fs = require('fs'); +const BSON = require('../core/connection/utils').retrieveBSON(); +const CMAP_EVENT_NAMES = require('../cmap/events').CMAP_EVENT_NAMES; let client; function loadClient() { @@ -142,13 +144,17 @@ const validOptionNames = [ 'autoEncryption', 'driverInfo', 'tls', + 'tlsInsecure', 'tlsinsecure', 'tlsAllowInvalidCertificates', 'tlsAllowInvalidHostnames', 'tlsCAFile', 'tlsCertificateFile', 'tlsCertificateKeyFile', - 'tlsCertificateKeyFilePassword' + 'tlsCertificateKeyFilePassword', + 'minHeartbeatFrequencyMS', + 'heartbeatFrequencyMS', + 'waitQueueTimeoutMS' ]; const ignoreOptionNames = ['native_parser']; @@ -221,10 +227,10 @@ function addListeners(mongoClient, topology) { function assignTopology(client, topology) { client.topology = topology; - topology.s.sessionPool = - topology instanceof NativeTopology - ? new ServerSessionPool(topology) - : new ServerSessionPool(topology.s.coreTopology); + + if (!(topology instanceof NativeTopology)) { + topology.s.sessionPool = new ServerSessionPool(topology.s.coreTopology); + } } // Clear out all events @@ -300,7 +306,7 @@ function connect(mongoClient, url, options, callback) { // Check if we have connection and socket timeout set if (_finalOptions.socketTimeoutMS == null) _finalOptions.socketTimeoutMS = 360000; - if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 30000; + if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 10000; if (_finalOptions.retryWrites == null) _finalOptions.retryWrites = true; if (_finalOptions.useRecoveryToken == null) _finalOptions.useRecoveryToken = true; if (_finalOptions.readPreference == null) _finalOptions.readPreference = 'primary'; @@ -463,7 +469,19 @@ function createServer(mongoClient, options, callback) { }); } -const DEPRECATED_UNIFIED_EVENTS = new Set(['reconnect', 'reconnectFailed', 'attemptReconnect']); +const DEPRECATED_UNIFIED_EVENTS = new Set([ + 'reconnect', + 'reconnectFailed', + 'attemptReconnect', + 'joined', + 'left', + 'ping', + 'ha', + 'all', + 'fullsetup', + 'open' +]); + function registerDeprecatedEventNotifiers(client) { client.on('newListener', eventName => { if (DEPRECATED_UNIFIED_EVENTS.has(eventName)) { @@ -485,6 +503,62 @@ function createTopology(mongoClient, topologyType, options, callback) { // Set default options const servers = translateOptions(options, translationOptions); + // determine CSFLE support + if (options.autoEncryption != null) { + let AutoEncrypter; + try { + require.resolve('mongodb-client-encryption'); + } catch (err) { + callback( + new MongoError( + 'Auto-encryption requested, but the module is not installed. Please add `mongodb-client-encryption` as a dependency of your project' + ) + ); + return; + } + + try { + let mongodbClientEncryption = require('mongodb-client-encryption'); + if (typeof mongodbClientEncryption.extension !== 'function') { + callback( + new MongoError( + 'loaded version of `mongodb-client-encryption` does not have property `extension`. Please make sure you are loading the correct version of `mongodb-client-encryption`' + ) + ); + } + AutoEncrypter = mongodbClientEncryption.extension(require('../../index')).AutoEncrypter; + } catch (err) { + callback(err); + return; + } + + const mongoCryptOptions = Object.assign( + { + bson: + options.bson || + new BSON([ + BSON.Binary, + BSON.Code, + BSON.DBRef, + BSON.Decimal128, + BSON.Double, + BSON.Int32, + BSON.Long, + BSON.Map, + BSON.MaxKey, + BSON.MinKey, + BSON.ObjectId, + BSON.BSONRegExp, + BSON.Symbol, + BSON.Timestamp + ]) + }, + options.autoEncryption + ); + + options.autoEncrypter = new AutoEncrypter(mongoClient, mongoCryptOptions); + } + // Create the topology let topology; if (topologyType === 'mongos') { @@ -504,48 +578,38 @@ function createTopology(mongoClient, topologyType, options, callback) { // Open the connection assignTopology(mongoClient, topology); + + // initialize CSFLE if requested + if (options.autoEncrypter) { + options.autoEncrypter.init(err => { + if (err) { + callback(err); + return; + } + + topology.connect(options, err => { + if (err) { + topology.close(true); + callback(err); + return; + } + + callback(undefined, topology); + }); + }); + + return; + } + + // otherwise connect normally topology.connect(options, err => { if (err) { topology.close(true); return callback(err); } - if (options.autoEncryption == null) { - callback(null, topology); - return; - } - - // setup for client side encryption - let AutoEncrypter; - try { - require.resolve('mongodb-client-encryption'); - } catch (err) { - callback( - new MongoError( - 'Auto-encryption requested, but the module is not installed. Please add `mongodb-client-encryption` as a dependency of your project' - ) - ); - return; - } - try { - let mongodbClientEncryption = require('mongodb-client-encryption'); - if (typeof mongodbClientEncryption.extension !== 'function') { - throw new MongoError( - 'loaded version of `mongodb-client-encryption` does not have property `extension`. Please make sure you are loading the correct version of `mongodb-client-encryption`' - ); - } - AutoEncrypter = mongodbClientEncryption.extension(require('../../index')).AutoEncrypter; - } catch (err) { - callback(err); - return; - } - - const mongoCryptOptions = Object.assign({}, options.autoEncryption); - topology.s.options.autoEncrypter = new AutoEncrypter(mongoClient, mongoCryptOptions); - topology.s.options.autoEncrypter.init(err => { - if (err) return callback(err, null); - callback(null, topology); - }); + callback(undefined, topology); + return; }); } @@ -638,23 +702,28 @@ function mergeOptions(target, source, flatten) { function relayEvents(mongoClient, topology) { const serverOrCommandEvents = [ + // APM + 'commandStarted', + 'commandSucceeded', + 'commandFailed', + + // SDAM 'serverOpening', + 'serverClosed', 'serverDescriptionChanged', 'serverHeartbeatStarted', 'serverHeartbeatSucceeded', 'serverHeartbeatFailed', - 'serverClosed', 'topologyOpening', 'topologyClosed', 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed', + + // Legacy 'joined', 'left', 'ping', 'ha' - ]; + ].concat(CMAP_EVENT_NAMES); serverOrCommandEvents.forEach(event => { topology.on(event, (object1, object2) => { @@ -737,7 +806,7 @@ function translateOptions(options, translationOptions) { // Set the socket and connection timeouts if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000; - if (options.connectTimeoutMS == null) options.connectTimeoutMS = 30000; + if (options.connectTimeoutMS == null) options.connectTimeoutMS = 10000; if (!translationOptions.createServers) { return; diff --git a/lib/operations/execute_operation.js b/lib/operations/execute_operation.js index 77aa903a966..da487279e24 100644 --- a/lib/operations/execute_operation.js +++ b/lib/operations/execute_operation.js @@ -53,54 +53,45 @@ function executeOperation(topology, operation, callback) { } } - const makeExecuteCallback = (resolve, reject) => - function executeCallback(err, result) { - if (session && session.owner === owner) { - session.endSession(() => { - if (operation.session === session) { - operation.clearSession(); - } - if (err) return reject(err); - resolve(result); - }); - } else { + let result; + if (typeof callback !== 'function') { + result = new Promise((resolve, reject) => { + callback = (err, res) => { if (err) return reject(err); - resolve(result); - } - }; - - // Execute using callback - if (typeof callback === 'function') { - const handler = makeExecuteCallback( - result => callback(null, result), - err => callback(err, null) - ); + resolve(res); + }; + }); + } - try { - if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { - return executeWithServerSelection(topology, operation, handler); - } else { - return operation.execute(handler); + function executeCallback(err, result) { + if (session && session.owner === owner) { + session.endSession(); + if (operation.session === session) { + operation.clearSession(); } - } catch (e) { - handler(e); - throw e; } - } - return new Promise(function(resolve, reject) { - const handler = makeExecuteCallback(resolve, reject); + callback(err, result); + } - try { - if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { - return executeWithServerSelection(topology, operation, handler); - } else { - return operation.execute(handler); + try { + if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { + executeWithServerSelection(topology, operation, executeCallback); + } else { + operation.execute(executeCallback); + } + } catch (e) { + if (session && session.owner === owner) { + session.endSession(); + if (operation.session === session) { + operation.clearSession(); } - } catch (e) { - handler(e); } - }); + + throw e; + } + + return result; } function supportsRetryableReads(server) { diff --git a/lib/operations/find_one.js b/lib/operations/find_one.js index d3037a6dbfa..b584db643d9 100644 --- a/lib/operations/find_one.js +++ b/lib/operations/find_one.js @@ -17,16 +17,20 @@ class FindOneOperation extends OperationBase { const query = this.query; const options = this.options; - const cursor = coll - .find(query, options) - .limit(-1) - .batchSize(1); + try { + const cursor = coll + .find(query, options) + .limit(-1) + .batchSize(1); - // Return the item - cursor.next((err, item) => { - if (err != null) return handleCallback(callback, toError(err), null); - handleCallback(callback, null, item); - }); + // Return the item + cursor.next((err, item) => { + if (err != null) return handleCallback(callback, toError(err), null); + handleCallback(callback, null, item); + }); + } catch (e) { + callback(e); + } } } diff --git a/lib/topologies/mongos.js b/lib/topologies/mongos.js index ec14f48516c..10e66d2151b 100644 --- a/lib/topologies/mongos.js +++ b/lib/topologies/mongos.js @@ -83,8 +83,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology * @fires Mongos#connect @@ -168,13 +168,6 @@ class Mongos extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Internal state this.s = { // Create the Mongos diff --git a/lib/topologies/native_topology.js b/lib/topologies/native_topology.js index 51574878d74..778ddc9fab7 100644 --- a/lib/topologies/native_topology.js +++ b/lib/topologies/native_topology.js @@ -15,7 +15,8 @@ class NativeTopology extends Topology { cursorFactory: Cursor, reconnect: false, emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, + maxPoolSize: typeof options.poolSize === 'number' ? options.poolSize : 5, + minPoolSize: typeof options.minSize === 'number' ? options.minSize : 0, monitorCommands: typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false } @@ -34,11 +35,6 @@ class NativeTopology extends Topology { clonedOptions = translateOptions(clonedOptions, socketOptions); super(servers, clonedOptions); - - // Do we have an application specific string - if (options.appname) { - this.s.clientInfo.application = { name: options.appname }; - } } capabilities() { diff --git a/lib/topologies/replset.js b/lib/topologies/replset.js index 44e83d11fea..69df26d19e0 100644 --- a/lib/topologies/replset.js +++ b/lib/topologies/replset.js @@ -93,8 +93,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed); * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology @@ -175,13 +175,6 @@ class ReplSet extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Create the ReplSet var coreTopology = new CReplSet(seedlist, clonedOptions); diff --git a/lib/topologies/server.js b/lib/topologies/server.js index 9bbe4350ec7..3079cb9953e 100644 --- a/lib/topologies/server.js +++ b/lib/topologies/server.js @@ -85,8 +85,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.monitoring=true] Triggers the server instance to call ismaster @@ -168,13 +168,6 @@ class Server extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Define the internal properties this.s = { // Create an instance of a server instance from core module diff --git a/lib/topologies/topology_base.js b/lib/topologies/topology_base.js index e74cb9ff601..967b4cd4627 100644 --- a/lib/topologies/topology_base.js +++ b/lib/topologies/topology_base.js @@ -3,7 +3,6 @@ const EventEmitter = require('events'), MongoError = require('../core').MongoError, f = require('util').format, - os = require('os'), translateReadPreference = require('../utils').translateReadPreference, ClientSession = require('../core').Sessions.ClientSession; @@ -254,33 +253,9 @@ var ServerCapabilities = function(ismaster) { setup_get_property(this, 'commandsTakeCollation', commandsTakeCollation); }; -// Get package.json variable -const driverVersion = require('../../package.json').version, - nodejsversion = f('Node.js %s, %s', process.version, os.endianness()), - type = os.type(), - name = process.platform, - architecture = process.arch, - release = os.release(); - class TopologyBase extends EventEmitter { constructor() { super(); - - // Build default client information - this.clientInfo = { - driver: { - name: 'nodejs', - version: driverVersion - }, - os: { - type: type, - name: name, - architecture: architecture, - version: release - }, - platform: nodejsversion - }; - this.setMaxListeners(Infinity); } @@ -304,6 +279,10 @@ class TopologyBase extends EventEmitter { return this.s.coreTopology.endSessions(sessions, callback); } + get clientMetadata() { + return this.s.coreTopology.s.options.metadata; + } + // Server capabilities capabilities() { if (this.s.sCapabilities) return this.s.sCapabilities; diff --git a/lib/utils.js b/lib/utils.js index 7140e9ba0b1..dd6cbe8ce88 100644 --- a/lib/utils.js +++ b/lib/utils.js @@ -1,5 +1,4 @@ 'use strict'; - const MongoError = require('./core/error').MongoError; const ReadPreference = require('./core/topologies/read_preference'); const WriteConcern = require('./write_concern'); @@ -685,6 +684,15 @@ class MongoDBNamespace { } } +function* makeCounter(seed) { + let count = seed || 0; + while (true) { + const newCount = count; + count += 1; + yield newCount; + } +} + module.exports = { filterOptions, mergeOptions, @@ -713,5 +721,6 @@ module.exports = { SUPPORTS, MongoDBNamespace, resolveReadPreference, - emitDeprecationWarning + emitDeprecationWarning, + makeCounter }; diff --git a/output b/output new file mode 100644 index 00000000000..e041d078a51 Binary files /dev/null and b/output differ diff --git a/package-lock.json b/package-lock.json index 6bcee628548..c8d35bc56a8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "mongodb", - "version": "3.4.1", + "version": "3.5.0", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -290,7 +290,6 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/bl/-/bl-2.2.0.tgz", "integrity": "sha512-wbgvOpqopSr7uq6fJrLH8EsvYMJf9gzfo2jCsL2eTy75qXPukA4pCgHamOQkZtY5vmfVtjB+P3LNlMHW5CEZXA==", - "dev": true, "requires": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" @@ -830,8 +829,7 @@ "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, "coveralls": { "version": "2.13.3", @@ -982,6 +980,11 @@ "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", "dev": true }, + "denque": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/denque/-/denque-1.4.1.tgz", + "integrity": "sha512-OfzPuSZKGcgr96rf1oODnfjqBFmr1DVoc/TrItj3Ohe0Ah1C5WX5Baquw/9U9KovnQ88EqmJbD66rKYUQYN1tQ==" + }, "detect-libc": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", @@ -1836,8 +1839,7 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "ini": { "version": "1.3.5", @@ -1996,8 +1998,7 @@ "isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" }, "isexe": { "version": "2.0.0", @@ -2974,8 +2975,7 @@ "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" }, "progress": { "version": "2.0.3", @@ -3137,7 +3137,6 @@ "version": "2.3.6", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", - "dev": true, "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -3151,8 +3150,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" } } }, @@ -3698,7 +3696,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, "requires": { "safe-buffer": "~5.1.0" }, @@ -3706,8 +3703,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" } } }, @@ -3946,8 +3942,7 @@ "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" }, "uuid": { "version": "3.3.3", diff --git a/package.json b/package.json index 8aa159756a6..248baf630f3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "mongodb", - "version": "3.4.1", + "version": "3.5.0", "description": "The official MongoDB driver for Node.js", "main": "index.js", "files": [ @@ -24,13 +24,14 @@ "bson-ext": "^2.0.0" }, "dependencies": { + "bl": "^2.2.0", "bson": "^1.1.1", + "denque": "^1.4.1", "require_optional": "^1.0.1", "safe-buffer": "^5.1.2" }, "devDependencies": { "bluebird": "3.5.0", - "bl": "^2.2.0", "chai": "^4.1.1", "chai-subset": "^1.6.0", "chalk": "^2.4.2", diff --git a/test/examples/change_streams.js b/test/examples/change_streams.js index 95221f4afa2..2e1eed9ff02 100644 --- a/test/examples/change_streams.js +++ b/test/examples/change_streams.js @@ -129,7 +129,7 @@ describe('examples(change-stream):', function() { const changeStream = collection.watch(); let newChangeStream; - changeStream.on('change', next => { + changeStream.once('change', next => { const resumeToken = changeStream.resumeToken; changeStream.close(); diff --git a/test/functional/apm.test.js b/test/functional/apm.test.js index 6a80a056d2e..3f229b751af 100644 --- a/test/functional/apm.test.js +++ b/test/functional/apm.test.js @@ -224,8 +224,15 @@ describe('APM', function() { .then(() => { expect(started).to.have.lengthOf(2); - // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); + if (self.configuration.usingUnifiedTopology()) { + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); + } else { + // Ensure command was not sent to the primary + expect(started[0].connectionId).to.not.equal(started[1].connectionId); + } + return client.close(); }); }); @@ -274,7 +281,14 @@ describe('APM', function() { expect(started).to.have.lengthOf(2); // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); + if (self.configuration.usingUnifiedTopology()) { + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); + } else { + expect(started[0].connectionId).to.not.equal(started[1].connectionId); + } + return client.close(); }); }); diff --git a/test/functional/change_stream_spec.test.js b/test/functional/change_stream_spec.test.js index 0fb707f8eb8..251b84fad49 100644 --- a/test/functional/change_stream_spec.test.js +++ b/test/functional/change_stream_spec.test.js @@ -48,7 +48,9 @@ describe('Change Stream Spec', function() { ctx.database = ctx.client.db(sDB); ctx.collection = ctx.database.collection(sColl); - ctx.client.on('commandStarted', e => _events.push(e)); + ctx.client.on('commandStarted', e => { + if (e.commandName !== 'ismaster') _events.push(e); + }); }); }); @@ -124,7 +126,7 @@ describe('Change Stream Spec', function() { if (result.success) { expect(value).to.have.a.lengthOf(result.success.length); - assertEquality(value, result.success); + expect(value).to.matchMongoSpec(result.success); } }; } @@ -137,7 +139,7 @@ describe('Change Stream Spec', function() { throw err; } - assertEquality(err, result.error); + expect(err).to.matchMongoSpec(result.error); }; } @@ -154,7 +156,8 @@ describe('Change Stream Spec', function() { `Expected there to be an APM event at index ${idx}, but there was none` ); } - assertEquality(events[idx], expected); + + expect(events[idx]).to.matchMongoSpec(expected); }); }; } @@ -254,41 +257,4 @@ describe('Change Stream Spec', function() { } return () => target[command].apply(target, args); } - - function assertEquality(actual, expected) { - try { - _assertEquality(actual, expected); - } catch (e) { - console.dir(actual, { depth: 999 }); - console.dir(expected, { depth: 999 }); - throw e; - } - } - - function _assertEquality(actual, expected) { - try { - if (expected === '42' || expected === 42) { - expect(actual).to.exist; - return; - } - - const expectedType = - expected && expected.code ? 'error' : Array.isArray(expected) ? 'array' : typeof expected; - expect(actual).to.be.a(expectedType); - - if (expected == null) { - expect(actual).to.not.exist; - } else if (Array.isArray(expected)) { - expected.forEach((ex, idx) => _assertEquality(actual[idx], ex)); - } else if (typeof expected === 'object') { - for (let i in expected) { - _assertEquality(actual[i], expected[i]); - } - } else { - expect(actual).to.equal(expected); - } - } catch (e) { - throw e; - } - } }); diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index b299f89f93f..35bb90747bf 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -1,6 +1,6 @@ 'use strict'; -const Connection = require('../../../lib/core/cmap/connection'); +const Connection = require('../../../lib/cmap/connection').Connection; const connect = require('../../../lib/core/connection/connect'); const expect = require('chai').expect; const BSON = require('bson'); @@ -16,7 +16,10 @@ describe('Connection', function() { expect(err).to.not.exist; this.defer(_done => conn.destroy(_done)); - conn.command('admin.$cmd', { ismaster: 1 }, (err, ismaster) => { + conn.command('admin.$cmd', { ismaster: 1 }, (err, result) => { + // NODE-2382: remove `result.result` when command returns just a raw response + const ismaster = result.result; + expect(err).to.not.exist; expect(ismaster).to.exist; expect(ismaster.ok).to.equal(1); @@ -40,7 +43,10 @@ describe('Connection', function() { conn.on('commandSucceeded', event => events.push(event)); conn.on('commandFailed', event => events.push(event)); - conn.command('admin.$cmd', { ismaster: 1 }, (err, ismaster) => { + conn.command('admin.$cmd', { ismaster: 1 }, (err, result) => { + // NODE-2382: remove `result.result` when command returns just a raw response + const ismaster = result.result; + expect(err).to.not.exist; expect(ismaster).to.exist; expect(ismaster.ok).to.equal(1); @@ -49,4 +55,19 @@ describe('Connection', function() { }); }); }); + + it('should support socket timeouts', function(done) { + const connectOptions = Object.assign({ + host: '240.0.0.1', + connectionType: Connection, + bson: new BSON(), + connectionTimeout: 500 + }); + + connect(connectOptions, err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + done(); + }); + }); }); diff --git a/test/functional/connection.test.js b/test/functional/connection.test.js index dfc16d9c473..3b6084275c3 100644 --- a/test/functional/connection.test.js +++ b/test/functional/connection.test.js @@ -19,7 +19,7 @@ describe('Connection', function() { var configuration = this.configuration; var client = configuration.newClient( { w: 1 }, - { poolSize: 1, host: '/tmp/mongodb-27017.sock' } + { poolSize: 1, host: '/tmp/mongodb-27017.sock', heartbeatFrequencyMS: 250 } ); client.connect(function(err, client) { @@ -459,7 +459,7 @@ describe('Connection', function() { * @ignore */ it('should correctly reconnect and finish query operation', { - metadata: { requires: { topology: 'single' } }, + metadata: { requires: { topology: 'single', unifiedTopology: false } }, // The actual test we wish to run test: function(done) { diff --git a/test/functional/core/client_metadata.test.js b/test/functional/core/client_metadata.test.js index 7bdcf014457..62089f36f50 100644 --- a/test/functional/core/client_metadata.test.js +++ b/test/functional/core/client_metadata.test.js @@ -22,7 +22,7 @@ describe('Client metadata tests', function() { } ); - expect(server.clientInfo.application.name).to.equal('My application name'); + expect(server.clientMetadata.application.name).to.equal('My application name'); done(); } }); @@ -53,9 +53,8 @@ describe('Client metadata tests', function() { server.on('connect', function(_server) { _server.s.replicaSetState.allServers().forEach(function(x) { - // console.dir(x.clientInfo) - expect(x.clientInfo.application.name).to.equal('My application name'); - expect(x.clientInfo.platform.split('mongodb-core').length).to.equal(2); + expect(x.clientMetadata.application.name).to.equal('My application name'); + expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); }); _server.destroy(done); @@ -86,9 +85,8 @@ describe('Client metadata tests', function() { // Add event listeners _server.once('connect', function(server) { server.connectedProxies.forEach(function(x) { - // console.dir(x.clientInfo) - expect(x.clientInfo.application.name).to.equal('My application name'); - expect(x.clientInfo.platform.split('mongodb-core').length).to.equal(2); + expect(x.clientMetadata.application.name).to.equal('My application name'); + expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); }); server.destroy(done); diff --git a/test/functional/core/server.test.js b/test/functional/core/server.test.js index 0c2a8e2aeb7..c758ad4279f 100644 --- a/test/functional/core/server.test.js +++ b/test/functional/core/server.test.js @@ -1002,8 +1002,7 @@ describe('Server tests', function() { let err; try { expect(error).to.be.an.instanceOf(Error); - const errorMessage = error.reason ? error.reason.message : error.message; - expect(errorMessage).to.match(/but this version of the Node.js Driver requires/); + expect(error).to.match(/but this version of the Node.js Driver requires/); } catch (e) { err = e; } diff --git a/test/functional/core/single_mocks/compression.test.js b/test/functional/core/single_mocks/compression.test.js index 513d008b6aa..6d135c01b6a 100644 --- a/test/functional/core/single_mocks/compression.test.js +++ b/test/functional/core/single_mocks/compression.test.js @@ -1,10 +1,11 @@ 'use strict'; const expect = require('chai').expect; -const co = require('co'); const mock = require('mongodb-mock-server'); describe('Single Compression (mocks)', function() { + let server; afterEach(() => mock.cleanup()); + beforeEach(() => mock.createServer().then(s => (server = s))); it("server should recieve list of client's supported compressors in handshake", { metadata: { @@ -19,29 +20,23 @@ describe('Single Compression (mocks)', function() { var serverResponse = Object.assign({}, mock.DEFAULT_ISMASTER); const config = this.configuration; - // Boot the mock - co(function*() { - const server = yield mock.createServer(); - - server.setMessageHandler(request => { - expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); - request.reply(serverResponse); - }); - - const client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'], zlibCompressionLevel: -1 } - }); + server.setMessageHandler(request => { + expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); + request.reply(serverResponse); + }); - client.on('connect', function() { - client.destroy(); - done(); - }); + const client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'], zlibCompressionLevel: -1 } + }); - client.connect(); + client.on('connect', function() { + client.destroy(done); }); + + client.connect(); } }); @@ -63,75 +58,82 @@ describe('Single Compression (mocks)', function() { let serverResponse = Object.assign({}, mock.DEFAULT_ISMASTER); // Boot the mock - co(function*() { - const server = yield mock.createServer(); + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; - server.setMessageHandler(request => { - var doc = request.document; - if (currentStep === 0) { + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); + expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with no compression request.reply(serverResponse, { compression: { compressor: 'no_compression' } }); - } else if (currentStep === 1) { - expect(server.isCompressed).to.be.false; - // Acknowledge insertion using OP_COMPRESSED with no compression - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'no_compression' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - expect(server.isCompressed).to.be.false; - // Acknowledge update using OP_COMPRESSED with no compression - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'no_compression' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.false; - request.reply({ ok: 1 }, { compression: { compressor: 'no_compression' } }); + currentStep = 1; + firstIsMasterSeen = true; + } else { + // this is an ismaster for initial connection setup in the pool + request.reply(serverResponse); } - currentStep++; - }); - // Attempt to connect - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + return; + } + + if (currentStep === 1) { + expect(server.isCompressed).to.be.false; + + // Acknowledge insertion using OP_COMPRESSED with no compression + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'no_compression' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + expect(server.isCompressed).to.be.false; + // Acknowledge update using OP_COMPRESSED with no compression + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'no_compression' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.false; + request.reply({ ok: 1 }, { compression: { compressor: 'no_compression' } }); + } + currentStep++; + }); + + // Attempt to connect + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); + + // Connect and try inserting, updating, and removing + // All outbound messages from the driver will be uncompressed + // Inbound messages from the server should be OP_COMPRESSED with no compression + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); + + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver will be uncompressed - // Inbound messages from the server should be OP_COMPRESSED with no compression - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.destroy(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -155,75 +157,77 @@ describe('Single Compression (mocks)', function() { compression: ['snappy'] }); - // Boot the mock - co(function*() { - const server = yield mock.createServer(); - - server.setMessageHandler(request => { - var doc = request.document; - if (currentStep === 0) { + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge insertion using OP_COMPRESSED with snappy - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'snappy' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - expect(server.isCompressed).to.be.true; - // Acknowledge update using OP_COMPRESSED with snappy - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + currentStep = 1; + firstIsMasterSeen = true; + } else { + request.reply(serverResponse); } - currentStep++; - }); - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + return; + } + + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; + // Acknowledge insertion using OP_COMPRESSED with snappy + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'snappy' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + expect(server.isCompressed).to.be.true; + // Acknowledge update using OP_COMPRESSED with snappy + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'snappy' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } + currentStep++; + }); + + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); + + // Connect and try inserting, updating, and removing + // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using snappy + // Inbound messages from the server should be OP_COMPRESSED with snappy + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using snappy - // Inbound messages from the server should be OP_COMPRESSED with snappy - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); + + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.destroy(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -240,7 +244,6 @@ describe('Single Compression (mocks)', function() { test: function(done) { const config = this.configuration; - var server = null; var currentStep = 0; // Prepare the server's response @@ -248,76 +251,79 @@ describe('Single Compression (mocks)', function() { compression: ['zlib'] }); - // Boot the mock - co(function*() { - server = yield mock.createServer(); - - server.setMessageHandler(request => { - var doc = request.document; - if (currentStep === 0) { + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with zlib request.reply(serverResponse, { compression: { compressor: 'zlib' } }); - } else if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge insertion using OP_COMPRESSED with zlib - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'zlib' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - // Acknowledge update using OP_COMPRESSED with zlib - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'zlib' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1 }, { compression: { compressor: 'zlib' } }); + currentStep = 1; + firstIsMasterSeen = true; + return; + } else { + request.reply(serverResponse); + return; } - currentStep++; - }); + } - // Attempt to connect - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; + + // Acknowledge insertion using OP_COMPRESSED with zlib + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'zlib' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + // Acknowledge update using OP_COMPRESSED with zlib + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'zlib' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1 }, { compression: { compressor: 'zlib' } }); + } + currentStep++; + }); + + // Attempt to connect + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); + + // Connect and try inserting, updating, and removing + // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using zlib + // Inbound messages from the server should be OP_COMPRESSED with zlib + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); + + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using zlib - // Inbound messages from the server should be OP_COMPRESSED with zlib - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.destroy(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -332,7 +338,6 @@ describe('Single Compression (mocks)', function() { test: function(done) { const config = this.configuration; - var server = null; var currentStep = 0; // Prepare the server's response @@ -340,63 +345,75 @@ describe('Single Compression (mocks)', function() { compression: ['snappy'] }); - // Boot the mock - co(function*() { - server = yield mock.createServer(); + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { + if (doc.compression == null) { + expect(server.isCompressed).to.be.false; + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + return; + } - server.setMessageHandler(request => { - if (currentStep === 0) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge ping using OP_COMPRESSED with snappy - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); - } else if (currentStep >= 2) { - expect(server.isCompressed).to.be.false; - // Acknowledge further uncompressible commands using OP_COMPRESSED with snappy - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + currentStep = 1; + firstIsMasterSeen = true; + return; + } else { + request.reply(serverResponse); + return; } - currentStep++; - }); + } - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; + // Acknowledge ping using OP_COMPRESSED with snappy + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } else if (currentStep >= 2) { + expect(server.isCompressed).to.be.false; + // Acknowledge further uncompressible commands using OP_COMPRESSED with snappy + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } + currentStep++; + }); - // Connect and try some commands, checking that uncompressible commands are indeed not compressed - client.on('connect', function(_server) { - _server.command('system.$cmd', { ping: 1 }, function(err, r) { - expect(err).to.be.null; - expect(r.result.ok).to.equal(1); + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); - _server.command('system.$cmd', { ismaster: 1 }, function(_err, _r) { - expect(_err).to.be.null; - expect(_r.result.ok).to.equal(1); + // Connect and try some commands, checking that uncompressible commands are indeed not compressed + client.on('connect', function(_server) { + _server.command('system.$cmd', { ping: 1 }, function(err, r) { + expect(err).to.be.null; + expect(r.result.ok).to.equal(1); - _server.command('system.$cmd', { getnonce: 1 }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.ok).to.equal(1); + _server.command('system.$cmd', { ismaster: 1 }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.ok).to.equal(1); - _server.command('system.$cmd', { ismaster: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); + _server.command('system.$cmd', { getnonce: 1 }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.ok).to.equal(1); - client.destroy(); - done(); - }); + _server.command('system.$cmd', { ismaster: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.destroy(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } }); }); diff --git a/test/functional/cursor.test.js b/test/functional/cursor.test.js index 8e5c42cece8..5661376778b 100644 --- a/test/functional/cursor.test.js +++ b/test/functional/cursor.test.js @@ -2148,7 +2148,8 @@ describe('Cursor', function() { metadata: { requires: { mongodb: '<=3.5.0', // NOTE: remove this when SERVER-30576 is resolved - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false } }, diff --git a/test/functional/db.test.js b/test/functional/db.test.js index 4f003d3ee4a..b9503f35034 100644 --- a/test/functional/db.test.js +++ b/test/functional/db.test.js @@ -667,25 +667,32 @@ describe('Db', function() { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - mongodb: '>= 2.8.0' + mongodb: '>= 2.8.0', + unifiedTopology: false } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); + client.connect(function(err, client) { - test.equal(null, err); - var items = []; + expect(err).to.not.exist; + + // run one command to ensure connections exist, otherwise `close` is near immediate + client.db('admin').command({ ping: 1 }, err => { + expect(err).to.not.exist; + + var items = []; + items.push(1); + client.close(function() { + expect(items).to.have.length(2); + done(); + }); - items.push(1); - client.close(function() { - test.equal(2, items.length); - done(); + items.push(2); }); - items.push(2); }); } }); diff --git a/test/functional/mapreduce.test.js b/test/functional/mapreduce.test.js index 1991d3e96c4..402dc088aff 100644 --- a/test/functional/mapreduce.test.js +++ b/test/functional/mapreduce.test.js @@ -364,44 +364,54 @@ describe('MapReduce', function() { var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); + const outDb = client.db('outputCollectionDb'); // Create a test collection db.createCollection('test_map_reduce_functions', function(err, collection) { - // Insert some documents to perform map reduce over - collection.insert( - [{ user_id: 1 }, { user_id: 2 }], - configuration.writeConcernMax(), - function(err) { - test.equal(null, err); - // Map function - var map = function() { - emit(this.user_id, 1); // eslint-disable-line - }; - // Reduce function - var reduce = function() { - return 1; - }; - - // Perform the map reduce - collection.mapReduce( - map, - reduce, - { out: { replace: 'tempCollection', db: 'outputCollectionDb' } }, - function(err, collection) { - // Mapreduce returns the temporary collection with the results - collection.findOne({ _id: 1 }, function(err, result) { - test.equal(1, result.value); - - collection.findOne({ _id: 2 }, function(err, result) { + // create the output collection + outDb.createCollection('tempCollection', err => { + test.equal(null, err); + + // Insert some documents to perform map reduce over + collection.insert( + [{ user_id: 1 }, { user_id: 2 }], + configuration.writeConcernMax(), + function(err) { + test.equal(null, err); + // Map function + var map = function() { + emit(this.user_id, 1); // eslint-disable-line + }; + // Reduce function + var reduce = function() { + return 1; + }; + + // Perform the map reduce + collection.mapReduce( + map, + reduce, + { out: { replace: 'tempCollection', db: 'outputCollectionDb' } }, + function(err, collection) { + test.equal(null, err); + + // Mapreduce returns the temporary collection with the results + collection.findOne({ _id: 1 }, function(err, result) { + test.equal(null, err); test.equal(1, result.value); - client.close(done); + collection.findOne({ _id: 2 }, function(err, result) { + test.equal(null, err); + test.equal(1, result.value); + + client.close(done); + }); }); - }); - } - ); - } - ); + } + ); + } + ); + }); }); }); } diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index 42f52cb4541..c46c81aea9d 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -345,7 +345,8 @@ describe('MongoClient', function() { it('Should correctly set MaxPoolSize on replicaset server', { metadata: { requires: { - topology: ['replicaset'] + topology: ['replicaset'], + unifiedTopology: false } }, @@ -365,7 +366,7 @@ describe('MongoClient', function() { var connections = client.topology.connections(); for (var i = 0; i < connections.length; i++) { - test.equal(30000, connections[i].connectionTimeout); + test.equal(10000, connections[i].connectionTimeout); test.equal(360000, connections[i].socketTimeout); } @@ -396,7 +397,8 @@ describe('MongoClient', function() { it('Should correctly set MaxPoolSize on sharded server', { metadata: { requires: { - topology: ['sharded'] + topology: ['sharded'], + unifiedTopology: false } }, @@ -497,35 +499,15 @@ describe('MongoClient', function() { } }); - /** - * @ignore - */ - it('correctly error out when no socket available on MongoClient `connect` with domain', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - const client = configuration.newClient('mongodb://test.does.not.exist.com:80/test', { - serverSelectionTimeoutMS: 10 - }); - - client.connect(function(err) { - test.ok(err != null); - - done(); - }); - } - }); - /** * @ignore */ it('correctly connect setting keepAlive to 100', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false + } }, // The actual test we wish to run @@ -566,7 +548,10 @@ describe('MongoClient', function() { */ it('default keepAlive behavior', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false + } }, // The actual test we wish to run @@ -657,7 +642,7 @@ describe('MongoClient', function() { const client = configuration.newClient(url); client.connect(function(err, client) { test.equal(null, err); - test.equal('hello world', client.topology.clientInfo.application.name); + test.equal('hello world', client.topology.clientMetadata.application.name); client.close(done); }); @@ -679,7 +664,7 @@ describe('MongoClient', function() { const client = configuration.newClient(url, { appname: 'hello world' }); client.connect(err => { test.equal(null, err); - test.equal('hello world', client.topology.clientInfo.application.name); + test.equal('hello world', client.topology.clientMetadata.application.name); client.close(done); }); @@ -806,11 +791,7 @@ describe('MongoClient', function() { }); it('Should use compression from URI', { - metadata: { - requires: { - topology: ['single'] - } - }, + metadata: { requires: { topology: ['single'], unifiedTopology: false } }, // The actual test we wish to run test: function(done) { diff --git a/test/functional/object_id.test.js b/test/functional/object_id.test.js index 1b3eb814f56..93b6aaa9ecd 100644 --- a/test/functional/object_id.test.js +++ b/test/functional/object_id.test.js @@ -213,15 +213,21 @@ describe('ObjectID', function() { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { + test.equal(null, err); + var db = client.db(configuration.db); var collection = db.collection('shouldCorrectlyInsertWithObjectId'); collection.insert({}, { w: 1 }, function(err) { test.equal(null, err); + const firstCompareDate = new Date(); + setTimeout(function() { collection.insert({}, { w: 1 }, function(err) { test.equal(null, err); + const secondCompareDate = new Date(); + collection.find().toArray(function(err, items) { - var compareDate = new Date(); + test.equal(null, err); // Date 1 var date1 = new Date(); @@ -231,15 +237,15 @@ describe('ObjectID', function() { date2.setTime(items[1]._id.generationTime * 1000); // Compare - test.equal(compareDate.getFullYear(), date1.getFullYear()); - test.equal(compareDate.getDate(), date1.getDate()); - test.equal(compareDate.getMonth(), date1.getMonth()); - test.equal(compareDate.getHours(), date1.getHours()); - - test.equal(compareDate.getFullYear(), date2.getFullYear()); - test.equal(compareDate.getDate(), date2.getDate()); - test.equal(compareDate.getMonth(), date2.getMonth()); - test.equal(compareDate.getHours(), date2.getHours()); + test.equal(firstCompareDate.getFullYear(), date1.getFullYear()); + test.equal(firstCompareDate.getDate(), date1.getDate()); + test.equal(firstCompareDate.getMonth(), date1.getMonth()); + test.equal(firstCompareDate.getHours(), date1.getHours()); + + test.equal(secondCompareDate.getFullYear(), date2.getFullYear()); + test.equal(secondCompareDate.getDate(), date2.getDate()); + test.equal(secondCompareDate.getMonth(), date2.getMonth()); + test.equal(secondCompareDate.getHours(), date2.getHours()); // Let's close the db client.close(done); }); diff --git a/test/functional/operation_example.test.js b/test/functional/operation_example.test.js index 95939260ff2..45b89d3b5b2 100644 --- a/test/functional/operation_example.test.js +++ b/test/functional/operation_example.test.js @@ -3718,7 +3718,7 @@ describe('Operation Examples', function() { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { - test.equal(null, err); + expect(err).to.not.exist; // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -3732,8 +3732,7 @@ describe('Operation Examples', function() { // BEGIN // Close the connection with a callback that is optional client.close(function(err) { - test.equal(null, err); - + expect(err).to.not.exist; done(); }); }); diff --git a/test/functional/promises_collection.test.js b/test/functional/promises_collection.test.js index 94f5b9de827..eb92f7f8ec8 100644 --- a/test/functional/promises_collection.test.js +++ b/test/functional/promises_collection.test.js @@ -26,7 +26,6 @@ describe('Promises (Collection)', function() { const client = configuration.newClient(url); client.connect().then(function(client) { - test.equal(1, client.topology.connections().length); var db = client.db(configuration.db); db.collection('insertOne') diff --git a/test/functional/promises_cursor.test.js b/test/functional/promises_cursor.test.js deleted file mode 100644 index b8c613055f3..00000000000 --- a/test/functional/promises_cursor.test.js +++ /dev/null @@ -1,40 +0,0 @@ -'use strict'; -var test = require('./shared').assert; -var setupDatabase = require('./shared').setupDatabase; -var f = require('util').format; - -describe('Promises (Cursor)', function() { - before(function() { - return setupDatabase(this.configuration); - }); - - it('Should correctly execute Collection.prototype.insertOne as promise', { - metadata: { - requires: { - topology: ['single'] - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var url = configuration.url(); - url = - url.indexOf('?') !== -1 - ? f('%s&%s', url, 'maxPoolSize=100') - : f('%s?%s', url, 'maxPoolSize=100'); - - const client = configuration.newClient(url); - client.connect().then(function(client) { - var db = client.db(configuration.db); - test.equal(1, client.topology.connections().length); - - db.collection('insertOne') - .insertOne({ a: 1 }) - .then(function() { - client.close(done); - }); - }); - } - }); -}); diff --git a/test/functional/promises_db.test.js b/test/functional/promises_db.test.js index a5f45c9b687..e06a9e706f4 100644 --- a/test/functional/promises_db.test.js +++ b/test/functional/promises_db.test.js @@ -16,7 +16,7 @@ describe('Promises (Db)', function() { }, // The actual test we wish to run - test: function(done) { + test: function() { var configuration = this.configuration; var url = configuration.url(); url = @@ -25,11 +25,7 @@ describe('Promises (Db)', function() { : f('%s?%s', url, 'maxPoolSize=100'); const client = configuration.newClient(url); - client.connect().then(function(client) { - test.equal(1, client.topology.connections().length); - - client.close(done); - }); + return client.connect().then(() => client.close()); } }); diff --git a/test/functional/scram_sha_256.test.js b/test/functional/scram_sha_256.test.js index 36d3d41eb7b..76edef5a503 100644 --- a/test/functional/scram_sha_256.test.js +++ b/test/functional/scram_sha_256.test.js @@ -186,10 +186,7 @@ describe('SCRAM-SHA-256 auth', function() { return withClient( this.configuration.newClient({}, options), () => Promise.reject(new Error('This request should have failed to authenticate')), - err => { - const errMessage = err.reason ? err.reason.message : err; - expect(errMessage).to.match(/Authentication failed/); - } + err => expect(err).to.match(/Authentication failed/) ); } }); @@ -223,10 +220,7 @@ describe('SCRAM-SHA-256 auth', function() { withClient( this.configuration.newClient({}, options), () => Promise.reject(new Error('This request should have failed to authenticate')), - err => { - const errMessage = err.reason ? err.reason.message : err; - expect(errMessage).to.match(/Authentication failed/); - } + err => expect(err).to.match(/Authentication failed/) ); return Promise.all([getErrorMsg(noUsernameOptions), getErrorMsg(badPasswordOptions)]); diff --git a/test/functional/uri.test.js b/test/functional/uri.test.js index c99c2014bc6..a98b77e0375 100644 --- a/test/functional/uri.test.js +++ b/test/functional/uri.test.js @@ -14,7 +14,7 @@ describe('URI', function() { { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: 'single' } }, + metadata: { requires: { topology: 'single', unifiedTopology: false } }, // The actual test we wish to run test: function(done) { diff --git a/test/spec/connection-monitoring-and-pooling/README.rst b/test/spec/connection-monitoring-and-pooling/README.rst new file mode 100644 index 00000000000..6480d7f43b7 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/README.rst @@ -0,0 +1,158 @@ +.. role:: javascript(code) + :language: javascript + +======================================== +Connection Monitoring and Pooling (CMAP) +======================================== + +.. contents:: + +-------- + +Introduction +============ + +The YAML and JSON files in this directory are platform-independent tests that +drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. + +Several prose tests, which are not easily expressed in YAML, are also presented +in this file. Those tests will need to be manually implemented by each driver. + +Common Test Format +================== + +Each YAML file has the following keys: + +- ``version``: A version number indicating the expected format of the spec tests (current version = 1) +- ``style``: A string indicating what style of tests this file contains. Currently ``unit`` is the only valid value +- ``description``: A text description of what the test is meant to assert + +Unit Test Format: +================= + +All Unit Tests have some of the following fields: + +- ``poolOptions``: if present, connection pool options to use when creating a pool +- ``operations``: A list of operations to perform. All operations support the following fields: + + - ``name``: A string describing which operation to issue. + - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread + +- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: + + - ``type``: the type of error emitted + - ``message``: the message associated with that error + - ``address``: Address of pool emitting error + +- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields + + - ``type``: The type of event emitted + - ``address``: The address of the pool emitting the event + - ``connectionId``: The id of a connection associated with the event + - ``options``: Options used to create the pool + - ``reason``: A reason giving mroe information on why the event was emitted + +- ``ignore``: An array of event names to ignore + +Valid Unit Test Operations are the following: + +- ``start(target)``: Starts a new thread named ``target`` + + - ``target``: The name of the new thread to start + +- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds + + - ``ms``: The number of milliseconds to sleep the current thread for + +- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. + + - ``target``: The name of the thread to wait for. + +- ``waitForEvent(event, count)``: block the current thread until ``event`` has occurred ``count`` times + + - ``event``: The name of the event + - ``count``: The number of times the event must occur (counting from the start of the test) + +- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection + + - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations + +- ``pool.checkIn(connection)``: call ``checkIn`` on pool + + - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` + +- ``pool.clear()``: call ``clear`` on Pool +- ``pool.close()``: call ``close`` on Pool + +Spec Test Match Function +======================== + +The definition of MATCH or MATCHES in the Spec Test Runner is as follows: + +- MATCH takes two values, ``expected`` and ``actual`` +- Notation is "Assert [actual] MATCHES [expected] +- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" + +Pseudocode implementation of ``actual`` MATCHES ``expected``: + +:: + + If expected is "42" or 42: + Assert that actual exists (is not null or undefined) + Else: + Assert that actual is of the same JSON type as expected + If expected is a JSON array: + For every idx/value in expected: + Assert that actual[idx] MATCHES value + Else if expected is a JSON object: + For every key/value in expected + Assert that actual[key] MATCHES value + Else: + Assert that expected equals actual + +Unit Test Runner: +================= + +For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests + +For each YAML file with ``style: unit``: + +- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. + + - If ``poolOptions`` is specified, use those options to initialize both pools + - The returned pool must have an ``address`` set as a string value. + +- Execute each ``operation`` in ``operations`` + + - If a ``thread`` is specified, execute in that corresponding thread. Otherwise, execute in the main thread. + +- Wait for the main thread to finish executing all of its operations +- If ``error`` is presented + + - Assert that an actual error ``actualError`` was thrown by the main thread + - Assert that ``actualError`` MATCHES ``error`` + +- Else: + + - Assert that no errors were thrown by the main thread + +- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` +- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` + + - Assert that ``actualEvents[idx]`` exists + - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` + + +It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command + +Prose Tests +=========== + +The following tests have not yet been automated, but MUST still be tested + +#. All ConnectionPoolOptions MUST be specified at the MongoClient level +#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient +#. A user MUST be able to specify all ConnectionPoolOptions via a URI string +#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver +#. When a check out attempt fails because connection set up throws an error, + assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json new file mode 100644 index 00000000000..7ed67902285 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "must have an ID number associated with it", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml new file mode 100644 index 00000000000..5b7b660e54a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must have an ID number associated with it +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json new file mode 100644 index 00000000000..9b839e8f060 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "must have IDs assigned in order of creation", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml new file mode 100644 index 00000000000..162acfa7975 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must have IDs assigned in order of creation +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 1 + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 2 + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json new file mode 100644 index 00000000000..a73afbf752b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if pool has been closed", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "close" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "poolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml new file mode 100644 index 00000000000..cf9bdfc1d70 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must destroy checked in connection if pool has been closed +operations: + - name: checkOut + label: conn + - name: close + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionPoolClosed + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: poolClosed + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json new file mode 100644 index 00000000000..600c0520719 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if it is stale", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml new file mode 100644 index 00000000000..2c95d5c03b6 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must destroy checked in connection if it is stale +operations: + - name: checkOut + label: conn + - name: clear + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: stale + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json new file mode 100644 index 00000000000..015928c50d3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json @@ -0,0 +1,41 @@ +{ + "version": 1, + "style": "unit", + "description": "must make valid checked in connection available", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml new file mode 100644 index 00000000000..bebc035f702 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml @@ -0,0 +1,24 @@ +version: 1 +style: unit +description: must make valid checked in connection available +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: checkOut +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted \ No newline at end of file diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/spec/connection-monitoring-and-pooling/pool-checkin.json new file mode 100644 index 00000000000..7073895ad2a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.json @@ -0,0 +1,30 @@ +{ + "version": 1, + "style": "unit", + "description": "must have a method of allowing the driver to check in a connection", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml new file mode 100644 index 00000000000..c2560a5cd3b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml @@ -0,0 +1,19 @@ +version: 1 +style: unit +description: must have a method of allowing the driver to check in a connection +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted + - ConnectionCheckedOut diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json new file mode 100644 index 00000000000..0343fa75568 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json @@ -0,0 +1,26 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out a connection", + "operations": [ + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml new file mode 100644 index 00000000000..b0f61a275d6 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml @@ -0,0 +1,15 @@ +version: 1 +style: unit +description: must be able to check out a connection +operations: + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady \ No newline at end of file diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json new file mode 100644 index 00000000000..3823c23a780 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json @@ -0,0 +1,64 @@ +{ + "version": 1, + "style": "unit", + "description": "must throw error if checkOut is called on a closed pool", + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "close" + }, + { + "name": "checkOut" + } + ], + "error": { + "type": "PoolClosedError", + "message": "Attempted to check out a connection from closed connection pool" + }, + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "reason": "poolClosed" + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml new file mode 100644 index 00000000000..6621685545a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml @@ -0,0 +1,36 @@ +version: 1 +style: unit +description: must throw error if checkOut is called on a closed pool +operations: + - name: checkOut + label: conn1 + - name: checkIn + connection: conn1 + - name: close + - name: checkOut +error: + type: PoolClosedError + message: Attempted to check out a connection from closed connection pool +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + address: 42 + connectionId: 42 + - type: ConnectionCheckedIn + address: 42 + connectionId: 42 + - type: ConnectionPoolClosed + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutFailed + address: 42 + reason: poolClosed +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json new file mode 100644 index 00000000000..fee0d076cf1 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out multiple connections at the same time", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml new file mode 100644 index 00000000000..714506ef7fe --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml @@ -0,0 +1,37 @@ +version: 1 +style: unit +description: must be able to check out multiple connections at the same time +operations: + - name: start + target: thread1 + - name: start + target: thread2 + - name: start + target: thread3 + - name: checkOut + thread: thread1 + - name: checkOut + thread: thread2 + - name: checkOut + thread: thread3 + - name: waitForThread + target: thread1 + - name: waitForThread + target: thread2 + - name: waitForThread + target: thread3 +events: + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json new file mode 100644 index 00000000000..74325d655d3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json @@ -0,0 +1,58 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out an idle connection if found while iterating available connections", + "poolOptions": { + "maxIdleTimeMS": 10 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "wait", + "ms": 50 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "idle", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml new file mode 100644 index 00000000000..415906bb576 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml @@ -0,0 +1,35 @@ +version: 1 +style: unit +description: must destroy and must not check out an idle connection if found while iterating available connections +poolOptions: + maxIdleTimeMS: 10 +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: wait + ms: 50 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + # In between these, wait so connection becomes idle + - type: ConnectionClosed + connectionId: 1 + reason: idle + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json new file mode 100644 index 00000000000..67ee507fe88 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json @@ -0,0 +1,58 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml new file mode 100644 index 00000000000..c434f4b0656 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml @@ -0,0 +1,33 @@ +version: 1 +style: unit +description: must destroy and must not check out a stale connection if found while iterating available connections +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: clear + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: stale + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json new file mode 100644 index 00000000000..e1fb9d07837 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "When a pool is closed, it MUST first destroy all available connections in that pool", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkOut" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "poolClosed", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml new file mode 100644 index 00000000000..65b13a6d51b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml @@ -0,0 +1,28 @@ +version: 1 +style: unit +description: When a pool is closed, it MUST first destroy all available connections in that pool +operations: + - name: checkOut + - name: checkOut + label: conn + - name: checkOut + - name: checkIn + connection: conn + - name: close +events: + - type: ConnectionCheckedIn + connectionId: 2 + address: 42 + - type: ConnectionClosed + connectionId: 2 + reason: poolClosed + address: 42 + - type: ConnectionPoolClosed + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted + - ConnectionCheckedOut + diff --git a/test/spec/connection-monitoring-and-pooling/pool-close.json b/test/spec/connection-monitoring-and-pooling/pool-close.json new file mode 100644 index 00000000000..fe083d73e63 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to manually close a pool", + "operations": [ + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-close.yml b/test/spec/connection-monitoring-and-pooling/pool-close.yml new file mode 100644 index 00000000000..2562224b43c --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close.yml @@ -0,0 +1,11 @@ +version: 1 +style: unit +description: must be able to manually close a pool +operations: + - name: close +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionPoolClosed + address: 42 diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json new file mode 100644 index 00000000000..b585d0daec7 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json @@ -0,0 +1,129 @@ +{ + "version": 1, + "style": "unit", + "description": "must never exceed maxPoolSize total connections", + "poolOptions": { + "maxPoolSize": 3 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml new file mode 100644 index 00000000000..64e521c7ec3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml @@ -0,0 +1,71 @@ +version: 1 +style: unit +description: must never exceed maxPoolSize total connections +poolOptions: + maxPoolSize: 3 +operations: + - name: checkOut + label: conn1 + - name: checkOut + - name: checkOut + label: conn2 + - name: checkIn + connection: conn2 + - name: checkOut + - name: start + target: thread1 + - name: checkOut + thread: thread1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json new file mode 100644 index 00000000000..7b5cf202b31 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with minPoolSize connections", + "poolOptions": { + "minPoolSize": 3 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 3 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml new file mode 100644 index 00000000000..d87f7feec34 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml @@ -0,0 +1,31 @@ +version: 1 +style: unit +description: must be able to start a pool with minPoolSize connections +poolOptions: + minPoolSize: 3 +operations: + - name: waitForEvent + event: ConnectionCreated + count: 3 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + # Ensures that by the time pool is closed, there are at least 3 connections + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json new file mode 100644 index 00000000000..4e8223f91e3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with various options set", + "poolOptions": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + } + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml new file mode 100644 index 00000000000..32c8d0e54c8 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml @@ -0,0 +1,21 @@ +version: 1 +style: unit +description: must be able to start a pool with various options set +poolOptions: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +ignore: + - ConnectionCreated + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create.json b/test/spec/connection-monitoring-and-pooling/pool-create.json new file mode 100644 index 00000000000..8c1f85537f9 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to create a pool", + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + } + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create.yml b/test/spec/connection-monitoring-and-pooling/pool-create.yml new file mode 100644 index 00000000000..f4989e8d4b3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create.yml @@ -0,0 +1,12 @@ +version: 1 +style: unit +description: must be able to create a pool +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json new file mode 100644 index 00000000000..c58fbadcff2 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json @@ -0,0 +1,192 @@ +{ + "version": 1, + "style": "unit", + "description": "must issue Connections to threads in the order that the threads entered the queue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1", + "label": "conn1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 2 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2", + "label": "conn2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 3 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3", + "label": "conn3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread4" + }, + { + "name": "checkOut", + "thread": "thread4", + "label": "conn4" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "waitForThread", + "target": "thread3" + }, + { + "name": "checkIn", + "connection": "conn3" + }, + { + "name": "waitForThread", + "target": "thread4" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml new file mode 100644 index 00000000000..024ec69316a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml @@ -0,0 +1,124 @@ +version: 1 +style: unit +description: must issue Connections to threads in the order that the threads entered the queue +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 5000 +operations: + # Check out sole connection in pool + - name: checkOut + label: conn0 + # Create 4 threads, have them all queue up for connections + # Note: this might become non-deterministic depending on how you + # implement your test runner. The goal is for each thread to + # have started and begun checkOut before the next thread starts. + # The sleep operations should make this more consistent. + - name: start + target: thread1 + - name: checkOut + thread: thread1 + label: conn1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 2 + # Give thread1 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread2 + - name: checkOut + thread: thread2 + label: conn2 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 3 + # Give thread2 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread3 + - name: checkOut + thread: thread3 + label: conn3 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 4 + # Give thread3 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread4 + - name: checkOut + thread: thread4 + label: conn4 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + # Give thread4 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + # From main thread, keep checking in connection and then wait for appropriate thread + # Test will timeout if threads are not enqueued in proper order + - name: checkIn + connection: conn0 + - name: waitForThread + target: thread1 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread2 + - name: checkIn + connection: conn2 + - name: waitForThread + target: thread3 + - name: checkIn + connection: conn3 + - name: waitForThread + target: thread4 +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json new file mode 100644 index 00000000000..f28d69f61c3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -0,0 +1,71 @@ +{ + "version": 1, + "style": "unit", + "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 100 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml new file mode 100644 index 00000000000..1a98cba8359 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -0,0 +1,46 @@ +version: 1 +style: unit +description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 100 +operations: + # Check out only possible connection + - name: checkOut + label: conn0 + # Start a thread, have it enter the wait queue + - name: start + target: thread1 + - name: checkOut + thread: thread1 + # Wait for other thread to time out, then check in connection + - name: waitForEvent + event: ConnectionCheckOutFailed + count: 1 + - name: checkIn + connection: conn0 + # Rejoin thread1, should experience error + - name: waitForThread + target: thread1 +error: + type: WaitQueueTimeoutError + message: Timed out while checking out a connection from connection pool +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutFailed + reason: timeout + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated diff --git a/test/tools/run_each_test.sh b/test/tools/run_each_test.sh index f7722b8f6c0..78921c8b331 100755 --- a/test/tools/run_each_test.sh +++ b/test/tools/run_each_test.sh @@ -6,4 +6,4 @@ if [ "$#" -ne 1 ]; then fi TEST_PATH=$1 -find $TEST_PATH -type f \( -iname "*_tests.js" ! -iname "*atlas*" ! -path "*node-next*" \) -exec npx mocha {} \; +find $TEST_PATH -type f \( -iname "*.test.js" ! -iname "*atlas*" ! -path "*node-next*" \) -exec npx mocha {} \; diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index 808c50e098f..faca173b6fb 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -62,7 +62,7 @@ class NativeConfiguration { return new MongoClient( dbOptions, this.usingUnifiedTopology() - ? Object.assign({ useUnifiedTopology: true }, serverOptions) + ? Object.assign({ useUnifiedTopology: true, minHeartbeatFrequencyMS: 100 }, serverOptions) : serverOptions ); } @@ -71,6 +71,7 @@ class NativeConfiguration { serverOptions = Object.assign({}, { haInterval: 100 }, serverOptions); if (this.usingUnifiedTopology()) { serverOptions.useUnifiedTopology = true; + serverOptions.minHeartbeatFrequencyMS = 100; } // Fall back diff --git a/test/tools/runner/index.js b/test/tools/runner/index.js index d477b4a0043..d70967837b3 100644 --- a/test/tools/runner/index.js +++ b/test/tools/runner/index.js @@ -98,3 +98,12 @@ after(() => mock.cleanup()); require('./plugins/deferred'); require('./plugins/session_leak_checker'); require('./plugins/client_leak_checker'); + +// configure mocha and chai +require('mocha-sinon'); +const chai = require('chai'); +chai.use(require('sinon-chai')); +chai.use(require('../../functional/spec-runner/matcher').default); +chai.config.includeStack = true; +chai.config.showDiff = true; +chai.config.truncateThreshold = 0; diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index d9ad4053de1..31b69e81453 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -2,7 +2,7 @@ 'use strict'; const { MongoClient } = require('../..'); -const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; +const visualizeMonitoringEvents = require('./utils').visualizeMonitoringEvents; const chalk = require('chalk'); const argv = require('yargs') .usage('Usage: $0 [options] ') @@ -24,7 +24,6 @@ const client = new MongoClient(uri, { useUnifiedTopology: !argv.legacy }); -let workloadInterrupt = false; async function run() { print( `connecting to: ${chalk.bold(uri)} using ${chalk.bold( @@ -32,139 +31,53 @@ async function run() { )} topology` ); - client.on('serverHeartbeatSucceeded', event => - print( - `${chalk.yellow('heartbeat')} ${chalk.green('succeeded')} host: '${ - event.connectionId - }' ${chalk.gray(`(${event.duration} ms)`)}` - ) - ); - - client.on('serverHeartbeatFailed', event => - print( - `${chalk.yellow('heartbeat')} ${chalk.red('failed')} host: '${ - event.connectionId - }' ${chalk.gray(`(${event.duration} ms)`)}` - ) - ); - - // server information - client.on('serverOpening', event => { - print( - `${chalk.cyan('server')} [${event.address}] ${chalk.bold('opening')} in topology#${ - event.topologyId - }` - ); - }); - - client.on('serverClosed', event => { - print( - `${chalk.cyan('server')} [${event.address}] ${chalk.bold('closed')} in topology#${ - event.topologyId - }` - ); - }); - - client.on('serverDescriptionChanged', event => { - print(`${chalk.cyan('server')} [${event.address}] changed:`); - console.log(serverDescriptionDiff(event.previousDescription, event.newDescription)); - }); - - // topology information - client.on('topologyOpening', event => { - print(`${chalk.magenta('topology')} adding topology#${event.topologyId}`); - }); - - client.on('topologyClosed', event => { - print(`${chalk.magenta('topology')} removing topology#${event.topologyId}`); - }); - - client.on('topologyDescriptionChanged', event => { - const diff = topologyDescriptionDiff(event.previousDescription, event.newDescription); - if (diff !== '') { - print(`${chalk.magenta('topology')} [topology#${event.topologyId}] changed:`); - console.log(diff); - } - }); - + visualizeMonitoringEvents(client); await client.connect(); if (argv.workload) { - while (!workloadInterrupt) { - await wait(2000); - - try { - print(`${chalk.yellow('workload')} issuing find...`); - const result = await client - .db('test') - .collection('test') - .find({}) - .limit(1) - .toArray(); - print(`${chalk.yellow('workload')} find completed: ${JSON.stringify(result)}`); - } catch (e) { - print(`${chalk.yellow('workload')} find failed: ${e.message}`); - } - } + scheduleWorkload(client); } } -const wait = ms => new Promise(resolve => setTimeout(resolve, ms)); - -function diff(lhs, rhs, fields, comparator) { - return fields.reduce((diff, field) => { - if (lhs[field] == null || rhs[field] == null) { - return diff; - } - - if (!comparator(lhs[field], rhs[field])) { - diff.push( - ` ${field}: ${chalk.green(`[${lhs[field]}]`)} => ${chalk.green(`[${rhs[field]}]`)}` - ); - } - - return diff; - }, []); -} - -function serverDescriptionDiff(lhs, rhs) { - const objectIdFields = ['electionId']; - const arrayFields = ['hosts', 'tags']; - const simpleFields = [ - 'type', - 'minWireVersion', - 'me', - 'setName', - 'setVersion', - 'electionId', - 'primary', - 'logicalSessionTimeoutMinutes' - ]; +let workloadTimer; +let workloadCounter = 0; +let workloadInterrupt = false; +async function scheduleWorkload(client) { + if (!workloadInterrupt) { + // immediately reschedule work + workloadTimer = setTimeout(() => scheduleWorkload(client), 7000); + } - return diff(lhs, rhs, simpleFields, (x, y) => x === y) - .concat(diff(lhs, rhs, arrayFields, (x, y) => arrayStrictEqual(x, y))) - .concat(diff(lhs, rhs, objectIdFields, (x, y) => x.equals(y))) - .join(',\n'); -} + const currentWorkload = workloadCounter++; -function topologyDescriptionDiff(lhs, rhs) { - const simpleFields = [ - 'type', - 'setName', - 'maxSetVersion', - 'stale', - 'compatible', - 'compatibilityError', - 'logicalSessionTimeoutMinutes', - 'error', - 'commonWireVersion' - ]; + try { + print(`${chalk.yellow(`workload#${currentWorkload}`)} issuing find...`); + const result = await client + .db('test') + .collection('test') + .find({}, { socketTimeout: 2000 }) + .limit(1) + .toArray(); - return diff(lhs, rhs, simpleFields, (x, y) => x === y).join(',\n'); + print( + `${chalk.yellow(`workload#${currentWorkload}`)} find completed: ${JSON.stringify(result)}` + ); + } catch (e) { + print(`${chalk.yellow(`workload#${currentWorkload}`)} find failed: ${e.message}`); + } } -run().catch(error => console.log('Caught', error)); +let exitRequestCount = 0; process.on('SIGINT', async function() { + exitRequestCount++; + if (exitRequestCount > 3) { + console.log('force quitting...'); + process.exit(1); + } + workloadInterrupt = true; + clearTimeout(workloadTimer); await client.close(); }); + +run().catch(error => console.log('Caught', error)); diff --git a/test/tools/utils.js b/test/tools/utils.js index ec8826d88db..a766d8cda57 100644 --- a/test/tools/utils.js +++ b/test/tools/utils.js @@ -2,6 +2,9 @@ const Logger = require('../../lib/core').Logger; const deprecateOptions = require('../../lib/utils').deprecateOptions; +const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; +const errorStrictEqual = require('../../lib/core/utils').errorStrictEqual; +const chalk = require('chalk'); const chai = require('chai'); const expect = chai.expect; const sinonChai = require('sinon-chai'); @@ -55,10 +58,129 @@ ClassWithUndefinedLogger.prototype.getLogger = function() { return undefined; }; +function diff(lhs, rhs, fields, comparator) { + return fields.reduce((diff, field) => { + if ((lhs[field] == null || rhs[field] == null) && field !== 'error') { + return diff; + } + + if (!comparator(lhs[field], rhs[field])) { + diff.push( + ` ${field}: ${chalk.green(`[${lhs[field]}]`)} => ${chalk.green(`[${rhs[field]}]`)}` + ); + } + + return diff; + }, []); +} + +function serverDescriptionDiff(lhs, rhs) { + const objectIdFields = ['electionId']; + const arrayFields = ['hosts', 'tags']; + const simpleFields = [ + 'type', + 'minWireVersion', + 'me', + 'setName', + 'setVersion', + 'electionId', + 'primary', + 'logicalSessionTimeoutMinutes' + ]; + + return diff(lhs, rhs, simpleFields, (x, y) => x === y) + .concat(diff(lhs, rhs, ['error'], (x, y) => errorStrictEqual(x, y))) + .concat(diff(lhs, rhs, arrayFields, (x, y) => arrayStrictEqual(x, y))) + .concat(diff(lhs, rhs, objectIdFields, (x, y) => x.equals(y))) + .join(',\n'); +} + +function topologyDescriptionDiff(lhs, rhs) { + const simpleFields = [ + 'type', + 'setName', + 'maxSetVersion', + 'stale', + 'compatible', + 'compatibilityError', + 'logicalSessionTimeoutMinutes', + 'error', + 'commonWireVersion' + ]; + + return diff(lhs, rhs, simpleFields, (x, y) => x === y).join(',\n'); +} + +function visualizeMonitoringEvents(client) { + function print(msg) { + console.log(`${chalk.white(new Date().toISOString())} ${msg}`); + } + + client.on('serverHeartbeatStarted', event => + print(`${chalk.yellow('heartbeat')} ${chalk.bold('started')} host: '${event.connectionId}`) + ); + + client.on('serverHeartbeatSucceeded', event => + print( + `${chalk.yellow('heartbeat')} ${chalk.green('succeeded')} host: '${ + event.connectionId + }' ${chalk.gray(`(${event.duration} ms)`)}` + ) + ); + + client.on('serverHeartbeatFailed', event => + print( + `${chalk.yellow('heartbeat')} ${chalk.red('failed')} host: '${ + event.connectionId + }' ${chalk.gray(`(${event.duration} ms)`)}` + ) + ); + + // server information + client.on('serverOpening', event => { + print( + `${chalk.cyan('server')} [${event.address}] ${chalk.bold('opening')} in topology#${ + event.topologyId + }` + ); + }); + + client.on('serverClosed', event => { + print( + `${chalk.cyan('server')} [${event.address}] ${chalk.bold('closed')} in topology#${ + event.topologyId + }` + ); + }); + + client.on('serverDescriptionChanged', event => { + print(`${chalk.cyan('server')} [${event.address}] changed:`); + console.log(serverDescriptionDiff(event.previousDescription, event.newDescription)); + }); + + // topology information + client.on('topologyOpening', event => { + print(`${chalk.magenta('topology')} adding topology#${event.topologyId}`); + }); + + client.on('topologyClosed', event => { + print(`${chalk.magenta('topology')} removing topology#${event.topologyId}`); + }); + + client.on('topologyDescriptionChanged', event => { + const diff = topologyDescriptionDiff(event.previousDescription, event.newDescription); + if (diff !== '') { + print(`${chalk.magenta('topology')} [topology#${event.topologyId}] changed:`); + console.log(diff); + } + }); +} + module.exports = { makeTestFunction, ensureCalledWith, ClassWithLogger, ClassWithoutLogger, - ClassWithUndefinedLogger + ClassWithUndefinedLogger, + visualizeMonitoringEvents }; diff --git a/test/unit/client_metadata.test.js b/test/unit/client_metadata.test.js new file mode 100644 index 00000000000..21b51274189 --- /dev/null +++ b/test/unit/client_metadata.test.js @@ -0,0 +1,51 @@ +'use strict'; +const mock = require('mongodb-mock-server'); +const expect = require('chai').expect; + +describe('Client Metadata', function() { + let mockServer; + before(() => mock.createServer().then(server => (mockServer = server))); + after(() => mock.cleanup()); + + it('should report the correct platform in client metadata', function(done) { + const ismasters = []; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + ismasters.push(doc); + request.reply(mock.DEFAULT_ISMASTER); + } else { + request.reply({ ok: 1 }); + } + }); + + const isUnifiedTopology = this.configuration.usingUnifiedTopology(); + const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db().command({ ping: 1 }, err => { + expect(err).to.not.exist; + + if (isUnifiedTopology) { + expect(ismasters).to.have.length.greaterThan(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/unified/) + ); + } else { + expect(ismasters).to.have.length(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/legacy/) + ); + } + + done(); + }); + }); + }); +}); diff --git a/test/unit/cmap/connection.test.js b/test/unit/cmap/connection.test.js new file mode 100644 index 00000000000..d944c6c4099 --- /dev/null +++ b/test/unit/cmap/connection.test.js @@ -0,0 +1,71 @@ +'use strict'; + +const BSON = require('bson'); +const mock = require('mongodb-mock-server'); +const connect = require('../../../lib/core/connection/connect'); +const Connection = require('../../../lib/cmap/connection').Connection; +const expect = require('chai').expect; + +describe('Connection', function() { + let server; + after(() => mock.cleanup()); + before(() => { + mock.createServer().then(s => (server = s)); + }); + + it('should support fire-and-forget messages', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + + // blackhole all other requests + }); + + connect( + Object.assign({ bson: new BSON(), connectionType: Connection }, server.address()), + (err, conn) => { + expect(err).to.not.exist; + expect(conn).to.exist; + + conn.command('$admin.cmd', { ping: 1 }, { noResponse: true }, (err, result) => { + expect(err).to.not.exist; + expect(result).to.not.exist; + + done(); + }); + } + ); + }); + + it('should destroy streams which time out', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + + // blackhole all other requests + }); + + connect( + Object.assign({ bson: new BSON(), connectionType: Connection }, server.address()), + (err, conn) => { + expect(err).to.not.exist; + expect(conn).to.exist; + + conn.command('$admin.cmd', { ping: 1 }, { socketTimeout: 50 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + + expect(conn) + .property('stream') + .property('destroyed').to.be.true; + + done(); + }); + } + ); + }); +}); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js new file mode 100644 index 00000000000..a41919cf707 --- /dev/null +++ b/test/unit/cmap/connection_pool.test.js @@ -0,0 +1,465 @@ +'use strict'; + +const Promise = require('bluebird'); +const loadSpecTests = require('../../spec').loadSpecTests; +const ConnectionPool = require('../../../lib/cmap/connection_pool').ConnectionPool; +const EventEmitter = require('events').EventEmitter; +const mock = require('mongodb-mock-server'); +const BSON = require('bson'); +const cmapEvents = require('../../../lib/cmap/events'); + +const chai = require('chai'); +chai.use(require('../../functional/spec-runner/matcher').default); +const expect = chai.expect; + +const ALL_POOL_EVENTS = new Set([ + 'connectionPoolCreated', + 'connectionPoolClosed', + 'connectionCreated', + 'connectionReady', + 'connectionClosed', + 'connectionCheckOutStarted', + 'connectionCheckOutFailed', + 'connectionCheckedOut', + 'connectionCheckedIn', + 'connectionPoolCleared' +]); + +const PROMISIFIED_POOL_FUNCTIONS = { + checkOut: Promise.promisify(ConnectionPool.prototype.checkOut), + close: Promise.promisify(ConnectionPool.prototype.close) +}; + +function closePool(pool) { + return new Promise(resolve => { + ALL_POOL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); + pool.close(resolve); + }); +} + +describe('Connection Pool', function() { + let server; + after(() => mock.cleanup()); + before(() => { + mock.createServer().then(s => (server = s)); + }); + + it('should destroy connections which have been closed', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } else { + // destroy on any other command + request.connection.destroy(); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1 }, server.address()) + ); + + const events = []; + pool.on('connectionClosed', event => events.push(event)); + + pool.checkOut((err, conn) => { + expect(err).to.not.exist; + + conn.command('admin.$cmd', { ping: 1 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + + pool.checkIn(conn); + + expect(events).to.have.length(1); + const closeEvent = events[0]; + expect(closeEvent) + .have.property('reason') + .equal('error'); + }); + }); + + pool.withConnection( + (err, conn, cb) => { + expect(err).to.not.exist; + cb(); + }, + () => { + pool.close(done); + } + ); + }); + + it('should propagate socket timeouts to connections', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } else { + // blackhole other requests + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1, socketTimeout: 50 }, server.address()) + ); + + pool.withConnection( + (err, conn, cb) => { + expect(err).to.not.exist; + conn.command('admin.$cmd', { ping: 1 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + expect(err).to.match(/timed out/); + cb(); + }); + }, + () => pool.close(done) + ); + }); + + describe('withConnection', function() { + it('should manage a connection for a successful operation', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool(Object.assign({ bson: new BSON() }, server.address())); + const callback = (err, result) => { + expect(err).to.not.exist; + expect(result).to.exist; + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + + conn.command('$admin.cmd', { ismaster: 1 }, (cmdErr, ismaster) => { + expect(cmdErr).to.not.exist; + cb(undefined, ismaster); + }); + }, callback); + }); + + it('should allow user interaction with an error', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.connection.destroy(); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), waitQueueTimeoutMS: 200 }, server.address()) + ); + + const callback = err => { + expect(err).to.exist; + expect(err).to.match(/Timed out/); + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.exist; + expect(err).to.match(/Timed out/); + cb(err); + }, callback); + }); + + it('should return an error to the original callback', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool(Object.assign({ bson: new BSON() }, server.address())); + const callback = (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + expect(err).to.match(/my great error/); + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + cb(new Error('my great error')); + }, callback); + }); + + it('should still manage a connection if no callback is provided', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1 }, server.address()) + ); + + const events = []; + pool.on('connectionCheckedOut', event => events.push(event)); + pool.on('connectionCheckedIn', event => { + events.push(event); + + expect(events).to.have.length(2); + expect(events[0]).to.be.instanceOf(cmapEvents.ConnectionCheckedOutEvent); + expect(events[1]).to.be.instanceOf(cmapEvents.ConnectionCheckedInEvent); + pool.close(done); + }); + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + cb(); + }); + }); + }); + + describe('spec tests', function() { + const threads = new Map(); + const connections = new Map(); + const orphans = new Set(); + const poolEvents = []; + const poolEventsEventEmitter = new EventEmitter(); + let pool = undefined; + + function createPool(options) { + options = Object.assign({}, options, { bson: new BSON() }, server.address()); + pool = new ConnectionPool(options); + ALL_POOL_EVENTS.forEach(ev => { + pool.on(ev, x => { + poolEvents.push(x); + poolEventsEventEmitter.emit('poolEvent'); + }); + }); + } + + function getThread(name) { + let thread = threads.get(name); + if (!thread) { + thread = new Thread(); + threads.set(name, thread); + } + + return thread; + } + + function eventType(event) { + const eventName = event.constructor.name; + return eventName.substring(0, eventName.lastIndexOf('Event')); + } + + const OPERATION_FUNCTIONS = { + checkOut: function(op) { + return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { + if (op.label != null) { + connections.set(op.label, connection); + } else { + orphans.add(connection); + } + }); + }, + checkIn: function(op) { + const connection = connections.get(op.connection); + connections.delete(op.connection); + + if (!connection) { + throw new Error(`Attempted to release non-existient connection ${op.connection}`); + } + + return pool.checkIn(connection); + }, + clear: function() { + return pool.clear(); + }, + close: function() { + return PROMISIFIED_POOL_FUNCTIONS.close.call(pool); + }, + wait: function(options) { + const ms = options.ms; + return new Promise(r => setTimeout(r, ms)); + }, + start: function(options) { + const target = options.target; + const thread = getThread(target); + thread.start(); + }, + waitForThread: function(options) { + const name = options.name; + const target = options.target; + const suppressError = options.suppressError; + + const threadObj = threads.get(target); + + if (!threadObj) { + throw new Error(`Attempted to run op ${name} on non-existent thread ${target}`); + } + + return threadObj.finish().catch(e => { + if (!suppressError) { + throw e; + } + }); + }, + waitForEvent: function(options) { + const event = options.event; + const count = options.count; + return new Promise(resolve => { + function run() { + if (poolEvents.filter(ev => eventType(ev) === event).length >= count) { + return resolve(); + } + + poolEventsEventEmitter.once('poolEvent', run); + } + run(); + }); + } + }; + + class Thread { + constructor() { + this._killed = false; + this._error = undefined; + this._promise = new Promise(resolve => { + this.start = () => setTimeout(resolve); + }); + } + + run(op) { + if (this._killed || this._error) { + return; + } + + this._promise = this._promise + .then(() => this._runOperation(op)) + .catch(e => (this._error = e)); + } + + _runOperation(op) { + const operationFn = OPERATION_FUNCTIONS[op.name]; + if (!operationFn) { + throw new Error(`Invalid command ${op.name}`); + } + + return Promise.resolve() + .then(() => operationFn(op, this)) + .then(() => new Promise(r => setTimeout(r))); + } + + finish() { + this._killed = true; + return this._promise.then(() => { + if (this._error) { + throw this._error; + } + }); + } + } + + before(() => { + // we aren't testing errors yet, so it's fine for the mock server to just accept + // and establish valid connections + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + }); + + afterEach(() => { + const p = pool ? closePool(pool) : Promise.resolve(); + return p + .then(() => { + const connectionsToDestroy = Array.from(orphans).concat(Array.from(connections.values())); + return Promise.each(connectionsToDestroy, conn => { + return new Promise((resolve, reject) => + conn.destroy({ force: true }, err => { + if (err) return reject(err); + resolve(); + }) + ); + }); + }) + .then(() => { + pool = undefined; + threads.clear(); + connections.clear(); + orphans.clear(); + poolEvents.length = 0; + poolEventsEventEmitter.removeAllListeners(); + }); + }); + + loadSpecTests('connection-monitoring-and-pooling').forEach(test => { + it(test.description, function() { + const operations = test.operations; + const expectedEvents = test.events || []; + const ignoreEvents = test.ignore || []; + const expectedError = test.error; + const poolOptions = test.poolOptions || {}; + + let actualError; + + const MAIN_THREAD_KEY = Symbol('Main Thread'); + const mainThread = new Thread(); + threads.set(MAIN_THREAD_KEY, mainThread); + mainThread.start(); + + createPool(poolOptions); + + let basePromise = Promise.resolve(); + + for (let idx in operations) { + const op = operations[idx]; + + const threadKey = op.thread || MAIN_THREAD_KEY; + const thread = getThread(threadKey); + + basePromise = basePromise.then(() => { + if (!thread) { + throw new Error(`Invalid thread ${threadKey}`); + } + + return Promise.resolve() + .then(() => thread.run(op)) + .then(() => new Promise(r => setTimeout(r))); + }); + } + + return basePromise + .then(() => mainThread.finish()) + .catch(e => (actualError = e)) + .then(() => { + const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(eventType(ev)) < 0); + + if (expectedError) { + expect(actualError).to.exist; + expect(actualError) + .property('message') + .to.equal(expectedError.message); + } else if (actualError) { + throw actualError; + } + + expectedEvents.forEach((expected, index) => { + const actual = actualEvents[index]; + if (expected.type) { + expect(actual.constructor.name).to.equal(`${expected.type}Event`); + delete expected.type; + } + + expect(actual).to.matchMongoSpec(expected); + }); + }); + }); + }); + }); +}); diff --git a/test/unit/cmap/message_stream.test.js b/test/unit/cmap/message_stream.test.js index da0bea1b2ef..6487dcf57e7 100644 --- a/test/unit/cmap/message_stream.test.js +++ b/test/unit/cmap/message_stream.test.js @@ -2,7 +2,7 @@ const BSON = require('bson'); const Readable = require('stream').Readable; const Writable = require('stream').Writable; -const MessageStream = require('../../../lib/core/cmap/message_stream'); +const MessageStream = require('../../../lib/cmap/message_stream'); const Msg = require('../../../lib/core/connection/msg').Msg; const expect = require('chai').expect; diff --git a/test/unit/core/connect.test.js b/test/unit/core/connect.test.js index a3db4940c60..312553771cc 100644 --- a/test/unit/core/connect.test.js +++ b/test/unit/core/connect.test.js @@ -100,35 +100,6 @@ describe('Connect Tests', function() { }); }); - it( - 'should report the correct metadata for unified topology', - { requires: { unifiedTopology: true, topology: ['single'] } }, - function(done) { - let ismaster; - test.server.setMessageHandler(request => { - const doc = request.document; - const $clusterTime = genClusterTime(Date.now()); - if (doc.ismaster) { - ismaster = doc; - request.reply( - Object.assign({}, mock.DEFAULT_ISMASTER, { - $clusterTime, - arbiterOnly: true - }) - ); - } - }); - - const topology = this.configuration.newTopology(test.connectOptions); - topology.connect(test.connectOptions, err => { - expect(err).to.not.exist; - const platform = ismaster.client.platform; - expect(platform).to.match(/unified/); - topology.close(done); - }); - } - ); - it('should allow a cancellaton token', function(done) { const cancellationToken = new EventEmitter(); setTimeout(() => cancellationToken.emit('cancel'), 500); diff --git a/test/unit/core/sdam_spec.test.js b/test/unit/core/sdam_spec.test.js index 2cfbe63fd8c..b7a0efbc6f3 100644 --- a/test/unit/core/sdam_spec.test.js +++ b/test/unit/core/sdam_spec.test.js @@ -4,7 +4,7 @@ const path = require('path'); const Topology = require('../../../lib/core/sdam/topology').Topology; const Server = require('../../../lib/core/sdam/server').Server; const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; -const monitoring = require('../../../lib/core/sdam/monitoring'); +const sdamEvents = require('../../../lib/core/sdam/events'); const parse = require('../../../lib/core/uri_parser'); const sinon = require('sinon'); @@ -37,7 +37,9 @@ function collectTests() { describe('Server Discovery and Monitoring (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { @@ -97,7 +99,7 @@ function convertOutcomeEvents(events) { let eventClass = eventType.replace(/_\w/g, c => c[1].toUpperCase()); eventClass = eventClass.charAt(0).toUpperCase() + eventClass.slice(1); args.unshift(null); - const eventConstructor = monitoring[eventClass]; + const eventConstructor = sdamEvents[eventClass]; const eventInstance = new (Function.prototype.bind.apply(eventConstructor, args))(); return eventInstance; }); diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index c80e1638057..6554d1e6c32 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -1,18 +1,34 @@ 'use strict'; const mock = require('mongodb-mock-server'); +const BSON = require('bson'); const Topology = require('../../../lib/core/sdam/topology').Topology; +const Monitor = require('../../../lib/core/sdam/monitor').Monitor; +const ServerType = require('../../../lib/core/sdam/common').ServerType; const expect = require('chai').expect; +class MockServer { + constructor(options) { + this.s = { + bson: new BSON() + }; + + this.description = { + type: ServerType.Unknown, + address: `${options.host}:${options.port}` + }; + } +} + describe('monitoring', function() { - let server; + let mockServer; after(() => mock.cleanup()); beforeEach(function() { - return mock.createServer().then(_server => (server = _server)); + return mock.createServer().then(server => (mockServer = server)); }); it('should record roundTripTime', function(done) { - server.setMessageHandler(request => { + mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); @@ -22,7 +38,7 @@ describe('monitoring', function() { }); // set `heartbeatFrequencyMS` to 250ms to force a quick monitoring check, and wait 500ms to validate below - const topology = new Topology(server.uri(), { heartbeatFrequencyMS: 250 }); + const topology = new Topology(mockServer.uri(), { heartbeatFrequencyMS: 250 }); topology.connect(err => { expect(err).to.not.exist; @@ -41,4 +57,329 @@ describe('monitoring', function() { }, 500); }); }); + + it('should recover on error during initial connect', function(done) { + let acceptConnections = false; + mockServer.setMessageHandler(request => { + if (!acceptConnections) { + request.connection.destroy(); + return; + } + + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } else if (doc.endSessions) { + request.reply({ ok: 1 }); + } + }); + + setTimeout(() => { + acceptConnections = true; + }, 250); + + const topology = new Topology(mockServer.uri()); + topology.connect(err => { + expect(err).to.not.exist; + + setTimeout(() => { + expect(topology) + .property('description') + .property('servers') + .to.have.length(1); + + const serverDescription = Array.from(topology.description.servers.values())[0]; + expect(serverDescription) + .property('roundTripTime') + .to.be.greaterThan(0); + + topology.close(done); + }, 500); + }); + }); + + describe('Monitor', function() { + it('should connect and issue an initial server check', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + this.defer(() => monitor.close()); + + monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); + monitor.on('serverHeartbeatSucceeded', () => done()); + monitor.connect(); + }); + + it('should ignore attempts to connect when not already closed', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + this.defer(() => monitor.close()); + + monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); + monitor.on('serverHeartbeatSucceeded', () => done()); + monitor.connect(); + monitor.connect(); + }); + + it('should not initiate another check if one is in progress', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + setTimeout(() => request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)), 250); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + + const startedEvents = []; + monitor.on('serverHeartbeatStarted', event => startedEvents.push(event)); + monitor.on('close', () => { + expect(startedEvents).to.have.length(2); + done(); + }); + + monitor.connect(); + monitor.once('serverHeartbeatSucceeded', () => { + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + + const minHeartbeatFrequencyMS = 500; + setTimeout(() => { + // wait for minHeartbeatFrequencyMS, then request a check and verify another check occurred + monitor.once('serverHeartbeatSucceeded', () => { + monitor.close(); + }); + + monitor.requestCheck(); + }, minHeartbeatFrequencyMS); + }); + }); + + it('should not close the monitor on a failed heartbeat', function(done) { + let isMasterCount = 0; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + isMasterCount++; + if (isMasterCount === 2) { + request.reply({ ok: 0, errmsg: 'forced from mock server' }); + return; + } + + if (isMasterCount === 3) { + request.connection.destroy(); + return; + } + + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + + const events = []; + monitor.on('serverHeartbeatFailed', event => events.push(event)); + + let successCount = 0; + monitor.on('serverHeartbeatSucceeded', () => { + if (successCount++ === 2) { + monitor.close(); + } + }); + + monitor.on('close', () => { + expect(events).to.have.length(2); + done(); + }); + + monitor.connect(); + }); + + it('should signal to reset the connection pool after first failed heartbeat', function(done) { + let isMasterCount = 0; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + isMasterCount++; + request.reply( + isMasterCount === 2 + ? { ok: 0, errmsg: 'forced from mock server' } + : mock.DEFAULT_ISMASTER_36 + ); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + this.defer(() => monitor.close()); + + let resetRequested = false; + monitor.on('serverHeartbeatFailed', () => { + if (resetRequested) { + done(new Error('unexpected heartbeat failure')); + } + }); + + monitor.on('resetConnectionPool', () => (resetRequested = true)); + monitor.on('serverHeartbeatSucceeded', () => { + if (server.description.type === ServerType.Unknown) { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + return; + } + + // otherwise, this is the second heartbeat success and we should verify + // a reset was requested + expect(resetRequested).to.be.true; + done(); + }); + + monitor.connect(); + }); + + it('should report the most recent error on second monitoring failure', function(done) { + let failedCount = 0; + let initialConnectCompleted = false; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!initialConnectCompleted) { + request.reply(mock.DEFAULT_ISMASTER_36); + initialConnectCompleted = true; + return; + } + + if (failedCount === 0) { + failedCount++; + request.reply({ ok: 0, errmsg: 'first error message' }); + } else if (failedCount === 1) { + failedCount++; + request.reply({ ok: 0, errmsg: 'second error message' }); + } else { + request.reply(mock.DEFAULT_ISMASTER_36); + } + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + this.defer(() => monitor.close()); + + let resetRequested = false; + monitor.on('resetConnectionPool', () => (resetRequested = true)); + monitor.once('serverHeartbeatSucceeded', () => { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + + let failureCount = 0; + monitor.on('serverHeartbeatFailed', event => { + failureCount++; + if (failureCount === 2) { + expect(resetRequested).to.be.true; + expect(event) + .property('failure') + .to.match(/second error message/); + done(); + } + }); + }); + + monitor.connect(); + }); + + it('should report events in the correct order during monitoring failure', function(done) { + let failedCount = 0; + let initialConnectCompleted = false; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!initialConnectCompleted) { + request.reply(mock.DEFAULT_ISMASTER_36); + initialConnectCompleted = true; + return; + } + + if (failedCount === 0) { + failedCount++; + request.reply({ ok: 0, errmsg: 'first error message' }); + } else { + failedCount++; + request.reply({ ok: 0, errmsg: 'second error message' }); + } + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + this.defer(() => monitor.close()); + + let poolResetRequested = false; + let serverResetRequested = false; + monitor.on('resetConnectionPool', () => (poolResetRequested = true)); + monitor.on('resetServer', () => (serverResetRequested = true)); + + const events = []; + monitor.once('serverHeartbeatSucceeded', () => { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + + monitor.on('serverHeartbeatStarted', event => events.push(event)); + monitor.on('serverHeartbeatFailed', event => events.push(event)); + monitor.once('resetServer', err => { + expect(poolResetRequested).to.be.true; + expect(serverResetRequested).to.be.true; + expect(events.map(e => e.constructor.name)).to.eql([ + 'ServerHeartbeatStartedEvent', + 'ServerHeartbeatFailedEvent', + 'ServerHeartbeatStartedEvent', + 'ServerHeartbeatFailedEvent' + ]); + + expect(events[1]) + .property('failure') + .to.match(/first error message/); + expect(events[3]) + .property('failure') + .to.match(/second error message/); + expect(events[3]) + .property('failure') + .to.eql(err); + + done(); + }); + }); + + monitor.connect(); + }); + }); }); diff --git a/test/unit/sdam/server_description.test.js b/test/unit/sdam/server_description.test.js new file mode 100644 index 00000000000..30de1592691 --- /dev/null +++ b/test/unit/sdam/server_description.test.js @@ -0,0 +1,44 @@ +'use strict'; +const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; +const expect = require('chai').expect; + +describe('ServerDescription', function() { + describe('error equality', function() { + [ + { + description: 'equal error types and messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: true + }, + { + description: 'equal error types and unequal messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('blah') }), + equal: false + }, + { + description: 'unequal error types and equal messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new TypeError('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: false + }, + { + description: 'null lhs', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: null }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: false + }, + { + description: 'null rhs', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new TypeError('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: undefined }), + equal: false + } + ].forEach(test => { + it(test.description, function() { + expect(test.lhs.equals(test.rhs)).to.equal(test.equal); + }); + }); + }); +}); diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index fe55235545b..e20d34f6785 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -35,7 +35,7 @@ describe('selectServers', function() { selectServers(topology, ReadPreference.primary, 500, process.hrtime(), err => { expect(err).to.exist; expect(err).to.match(/Server selection timed out/); - expect(err).to.not.have.property('reason'); + expect(err).to.have.property('reason'); done(); }); @@ -43,7 +43,7 @@ describe('selectServers', function() { it('should schedule monitoring if no suitable server is found', function(done) { const topology = new Topology('someserver:27019'); - const serverMonitor = this.sinon.stub(Server.prototype, 'monitor'); + const requestCheck = this.sinon.stub(Server.prototype, 'requestCheck'); this.sinon .stub(Topology.prototype, 'selectServer') @@ -53,6 +53,7 @@ describe('selectServers', function() { }); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); @@ -60,12 +61,13 @@ describe('selectServers', function() { selectServers(topology, ReadPreference.primary, 1000, process.hrtime(), err => { expect(err).to.exist; expect(err).to.match(/Server selection timed out/); - expect(err).to.not.have.property('reason'); + expect(err).to.have.property('reason'); - // expect a call to monitor for initial server creation, and another for the server selection - expect(serverMonitor) + // When server is created `connect` is called on the monitor. When server selection + // occurs `requestCheck` will be called for an immediate check. + expect(requestCheck) .property('callCount') - .to.equal(2); + .to.equal(1); topology.close(done); }); @@ -75,6 +77,7 @@ describe('selectServers', function() { it('should disallow selection when the topology is explicitly closed', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); diff --git a/test/unit/sdam/server_selection/spec.test.js b/test/unit/sdam/server_selection/spec.test.js index a80fa2b11b6..dddae2501d4 100644 --- a/test/unit/sdam/server_selection/spec.test.js +++ b/test/unit/sdam/server_selection/spec.test.js @@ -3,7 +3,7 @@ const path = require('path'); const fs = require('fs'); const core = require('../../../../lib/core'); const Topology = core.Topology; -const MongoTimeoutError = core.MongoTimeoutError; +const MongoServerSelectionError = core.MongoServerSelectionError; const ReadPreference = core.ReadPreference; // TODO: these should be from `core` when legacy topologies are removed @@ -55,7 +55,9 @@ function collectSelectionTests(specDir) { describe('Server Selection (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { @@ -119,7 +121,9 @@ function collectStalenessTests(specDir) { describe('Max Staleness (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { @@ -275,7 +279,7 @@ function executeServerSelectionTest(testDefinition, options, testDone) { } // default to serverSelectionTimeoutMS of `100` for unit tests - topology.selectServer(selector, { serverSelectionTimeoutMS: 100 }, (err, server) => { + topology.selectServer(selector, { serverSelectionTimeoutMS: 50 }, (err, server) => { // are we expecting an error? if (testDefinition.error) { if (!err) { @@ -287,7 +291,7 @@ function executeServerSelectionTest(testDefinition, options, testDone) { if (err) { // this is another expected error case - if (expectedServers.length === 0 && err instanceof MongoTimeoutError) return done(); + if (expectedServers.length === 0 && err instanceof MongoServerSelectionError) return done(); return done(err); } diff --git a/test/unit/sdam/srv_polling.test.js b/test/unit/sdam/srv_polling.test.js index 5c06712177d..e81340b11ed 100644 --- a/test/unit/sdam/srv_polling.test.js +++ b/test/unit/sdam/srv_polling.test.js @@ -4,7 +4,7 @@ const Topology = require('../../../lib/core/sdam/topology').Topology; const TopologyDescription = require('../../../lib/core/sdam/topology_description') .TopologyDescription; const TopologyType = require('../../../lib/core/sdam/common').TopologyType; -const monitoring = require('../../../lib/core/sdam/monitoring'); +const sdamEvents = require('../../../lib/core/sdam/events'); const SrvPoller = require('../../../lib/core/sdam/srv_polling').SrvPoller; const SrvPollingEvent = require('../../../lib/core/sdam/srv_polling').SrvPollingEvent; @@ -283,7 +283,7 @@ describe('Mongos SRV Polling', function() { function emit(prev, current) { topology.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent(topology.s.id, prev, current) + new sdamEvents.TopologyDescriptionChangedEvent(topology.s.id, prev, current) ); } diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index 445bdaed9df..e9755511626 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -12,12 +12,14 @@ describe('Topology (unit)', function() { this.sinon = sinon.sandbox.create(); // these are mocks we want across all tests - this.sinon.stub(Server.prototype, 'monitor'); + this.sinon.stub(Server.prototype, 'requestCheck'); this.sinon .stub(Topology.prototype, 'selectServer') .callsFake(function(selector, options, callback) { - const server = Array.from(this.s.servers.values())[0]; - callback(null, server); + setTimeout(() => { + const server = Array.from(this.s.servers.values())[0]; + callback(null, server); + }, 50); }); }); @@ -28,6 +30,7 @@ describe('Topology (unit)', function() { it('should check for sessions if connected to a single server and has no known servers', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); @@ -40,12 +43,15 @@ describe('Topology (unit)', function() { it('should not check for sessions if connected to a single server', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { - this.emit( - 'descriptionReceived', - new ServerDescription('someserver:27019', { ok: 1, maxWireVersion: 5 }) - ); - + this.s.state = 'connected'; this.emit('connect'); + + setTimeout(() => { + this.emit( + 'descriptionReceived', + new ServerDescription('someserver:27019', { ok: 1, maxWireVersion: 5 }) + ); + }, 20); }); topology.connect(() => { @@ -57,12 +63,15 @@ describe('Topology (unit)', function() { it('should check for sessions if there are no data-bearing nodes', function(done) { const topology = new Topology('mongos:27019,mongos:27018,mongos:27017'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { - this.emit( - 'descriptionReceived', - new ServerDescription(this.name, { ok: 1, msg: 'isdbgrid', maxWireVersion: 5 }) - ); - + this.s.state = 'connected'; this.emit('connect'); + + setTimeout(() => { + this.emit( + 'descriptionReceived', + new ServerDescription(this.name, { ok: 1, msg: 'isdbgrid', maxWireVersion: 5 }) + ); + }, 20); }); topology.connect(() => { @@ -94,7 +103,7 @@ describe('Topology (unit)', function() { topology.connect(err => { expect(err).to.not.exist; - topology.command('admin.$cmd', { ping: 1 }, { socketTimeout: 1500 }, (err, result) => { + topology.command('admin.$cmd', { ping: 1 }, { socketTimeout: 250 }, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.match(/timed out/); diff --git a/test/unit/utils.test.js b/test/unit/utils.test.js new file mode 100644 index 00000000000..367e43625b7 --- /dev/null +++ b/test/unit/utils.test.js @@ -0,0 +1,36 @@ +'use strict'; +const eachAsync = require('../../lib/core/utils').eachAsync; +const expect = require('chai').expect; + +describe('utils', function() { + describe('eachAsync', function() { + it('should callback with an error', function(done) { + eachAsync( + [{ error: false }, { error: true }], + (item, cb) => { + cb(item.error ? new Error('error requested') : null); + }, + err => { + expect(err).to.exist; + done(); + } + ); + }); + + it('should propagate a synchronously thrown error', function(done) { + expect(() => + eachAsync( + [{}], + () => { + throw new Error('something wicked'); + }, + err => { + expect(err).to.not.exist; + done(err); + } + ) + ).to.throw(/something wicked/); + done(); + }); + }); +});