From 70056520a4a7277b11675fab5479fbff5581349d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 12:11:25 -0500 Subject: [PATCH 001/130] chore: sync CMAP spec tests --- .../README.rst | 158 ++++++++++++++ .../connection-must-have-id.json | 48 +++++ .../connection-must-have-id.yml | 27 +++ .../connection-must-order-ids.json | 48 +++++ .../connection-must-order-ids.yml | 27 +++ .../pool-checkin-destroy-closed.json | 46 +++++ .../pool-checkin-destroy-closed.yml | 27 +++ .../pool-checkin-destroy-stale.json | 46 +++++ .../pool-checkin-destroy-stale.yml | 27 +++ .../pool-checkin-make-available.json | 41 ++++ .../pool-checkin-make-available.yml | 24 +++ .../pool-checkin.json | 30 +++ .../pool-checkin.yml | 19 ++ .../pool-checkout-connection.json | 26 +++ .../pool-checkout-connection.yml | 15 ++ .../pool-checkout-error-closed.json | 64 ++++++ .../pool-checkout-error-closed.yml | 36 ++++ .../pool-checkout-multiple.json | 66 ++++++ .../pool-checkout-multiple.yml | 37 ++++ .../pool-checkout-no-idle.json | 58 ++++++ .../pool-checkout-no-idle.yml | 35 ++++ .../pool-checkout-no-stale.json | 58 ++++++ .../pool-checkout-no-stale.yml | 33 +++ .../pool-close-destroy-conns.json | 48 +++++ .../pool-close-destroy-conns.yml | 28 +++ .../pool-close.json | 21 ++ .../pool-close.yml | 11 + .../pool-create-max-size.json | 129 ++++++++++++ .../pool-create-max-size.yml | 71 +++++++ .../pool-create-min-size.json | 50 +++++ .../pool-create-min-size.yml | 31 +++ .../pool-create-with-options.json | 32 +++ .../pool-create-with-options.yml | 21 ++ .../pool-create.json | 19 ++ .../pool-create.yml | 12 ++ .../wait-queue-fairness.json | 192 ++++++++++++++++++ .../wait-queue-fairness.yml | 124 +++++++++++ .../wait-queue-timeout.json | 71 +++++++ .../wait-queue-timeout.yml | 46 +++++ 39 files changed, 1902 insertions(+) create mode 100644 test/spec/connection-monitoring-and-pooling/README.rst create mode 100644 test/spec/connection-monitoring-and-pooling/connection-must-have-id.json create mode 100644 test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml create mode 100644 test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json create mode 100644 test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkin.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-close.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-close.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-max-size.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-min-size.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-with-options.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create.json create mode 100644 test/spec/connection-monitoring-and-pooling/pool-create.yml create mode 100644 test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json create mode 100644 test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml create mode 100644 test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json create mode 100644 test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml diff --git a/test/spec/connection-monitoring-and-pooling/README.rst b/test/spec/connection-monitoring-and-pooling/README.rst new file mode 100644 index 00000000000..6480d7f43b7 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/README.rst @@ -0,0 +1,158 @@ +.. role:: javascript(code) + :language: javascript + +======================================== +Connection Monitoring and Pooling (CMAP) +======================================== + +.. contents:: + +-------- + +Introduction +============ + +The YAML and JSON files in this directory are platform-independent tests that +drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. + +Several prose tests, which are not easily expressed in YAML, are also presented +in this file. Those tests will need to be manually implemented by each driver. + +Common Test Format +================== + +Each YAML file has the following keys: + +- ``version``: A version number indicating the expected format of the spec tests (current version = 1) +- ``style``: A string indicating what style of tests this file contains. Currently ``unit`` is the only valid value +- ``description``: A text description of what the test is meant to assert + +Unit Test Format: +================= + +All Unit Tests have some of the following fields: + +- ``poolOptions``: if present, connection pool options to use when creating a pool +- ``operations``: A list of operations to perform. All operations support the following fields: + + - ``name``: A string describing which operation to issue. + - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread + +- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: + + - ``type``: the type of error emitted + - ``message``: the message associated with that error + - ``address``: Address of pool emitting error + +- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields + + - ``type``: The type of event emitted + - ``address``: The address of the pool emitting the event + - ``connectionId``: The id of a connection associated with the event + - ``options``: Options used to create the pool + - ``reason``: A reason giving mroe information on why the event was emitted + +- ``ignore``: An array of event names to ignore + +Valid Unit Test Operations are the following: + +- ``start(target)``: Starts a new thread named ``target`` + + - ``target``: The name of the new thread to start + +- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds + + - ``ms``: The number of milliseconds to sleep the current thread for + +- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. + + - ``target``: The name of the thread to wait for. + +- ``waitForEvent(event, count)``: block the current thread until ``event`` has occurred ``count`` times + + - ``event``: The name of the event + - ``count``: The number of times the event must occur (counting from the start of the test) + +- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection + + - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations + +- ``pool.checkIn(connection)``: call ``checkIn`` on pool + + - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` + +- ``pool.clear()``: call ``clear`` on Pool +- ``pool.close()``: call ``close`` on Pool + +Spec Test Match Function +======================== + +The definition of MATCH or MATCHES in the Spec Test Runner is as follows: + +- MATCH takes two values, ``expected`` and ``actual`` +- Notation is "Assert [actual] MATCHES [expected] +- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" + +Pseudocode implementation of ``actual`` MATCHES ``expected``: + +:: + + If expected is "42" or 42: + Assert that actual exists (is not null or undefined) + Else: + Assert that actual is of the same JSON type as expected + If expected is a JSON array: + For every idx/value in expected: + Assert that actual[idx] MATCHES value + Else if expected is a JSON object: + For every key/value in expected + Assert that actual[key] MATCHES value + Else: + Assert that expected equals actual + +Unit Test Runner: +================= + +For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests + +For each YAML file with ``style: unit``: + +- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. + + - If ``poolOptions`` is specified, use those options to initialize both pools + - The returned pool must have an ``address`` set as a string value. + +- Execute each ``operation`` in ``operations`` + + - If a ``thread`` is specified, execute in that corresponding thread. Otherwise, execute in the main thread. + +- Wait for the main thread to finish executing all of its operations +- If ``error`` is presented + + - Assert that an actual error ``actualError`` was thrown by the main thread + - Assert that ``actualError`` MATCHES ``error`` + +- Else: + + - Assert that no errors were thrown by the main thread + +- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` +- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` + + - Assert that ``actualEvents[idx]`` exists + - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` + + +It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command + +Prose Tests +=========== + +The following tests have not yet been automated, but MUST still be tested + +#. All ConnectionPoolOptions MUST be specified at the MongoClient level +#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient +#. A user MUST be able to specify all ConnectionPoolOptions via a URI string +#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver +#. When a check out attempt fails because connection set up throws an error, + assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json new file mode 100644 index 00000000000..7ed67902285 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "must have an ID number associated with it", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml new file mode 100644 index 00000000000..5b7b660e54a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must have an ID number associated with it +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json new file mode 100644 index 00000000000..9b839e8f060 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "must have IDs assigned in order of creation", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml new file mode 100644 index 00000000000..162acfa7975 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must have IDs assigned in order of creation +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 1 + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 2 + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json new file mode 100644 index 00000000000..a73afbf752b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if pool has been closed", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "close" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "poolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml new file mode 100644 index 00000000000..cf9bdfc1d70 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must destroy checked in connection if pool has been closed +operations: + - name: checkOut + label: conn + - name: close + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionPoolClosed + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: poolClosed + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json new file mode 100644 index 00000000000..600c0520719 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if it is stale", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml new file mode 100644 index 00000000000..2c95d5c03b6 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must destroy checked in connection if it is stale +operations: + - name: checkOut + label: conn + - name: clear + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: stale + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json new file mode 100644 index 00000000000..015928c50d3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json @@ -0,0 +1,41 @@ +{ + "version": 1, + "style": "unit", + "description": "must make valid checked in connection available", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml new file mode 100644 index 00000000000..bebc035f702 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml @@ -0,0 +1,24 @@ +version: 1 +style: unit +description: must make valid checked in connection available +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: checkOut +events: + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted \ No newline at end of file diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/spec/connection-monitoring-and-pooling/pool-checkin.json new file mode 100644 index 00000000000..7073895ad2a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.json @@ -0,0 +1,30 @@ +{ + "version": 1, + "style": "unit", + "description": "must have a method of allowing the driver to check in a connection", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml new file mode 100644 index 00000000000..c2560a5cd3b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml @@ -0,0 +1,19 @@ +version: 1 +style: unit +description: must have a method of allowing the driver to check in a connection +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted + - ConnectionCheckedOut diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json new file mode 100644 index 00000000000..0343fa75568 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json @@ -0,0 +1,26 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out a connection", + "operations": [ + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml new file mode 100644 index 00000000000..b0f61a275d6 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml @@ -0,0 +1,15 @@ +version: 1 +style: unit +description: must be able to check out a connection +operations: + - name: checkOut +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady \ No newline at end of file diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json new file mode 100644 index 00000000000..3823c23a780 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json @@ -0,0 +1,64 @@ +{ + "version": 1, + "style": "unit", + "description": "must throw error if checkOut is called on a closed pool", + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "close" + }, + { + "name": "checkOut" + } + ], + "error": { + "type": "PoolClosedError", + "message": "Attempted to check out a connection from closed connection pool" + }, + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "reason": "poolClosed" + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml new file mode 100644 index 00000000000..6621685545a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml @@ -0,0 +1,36 @@ +version: 1 +style: unit +description: must throw error if checkOut is called on a closed pool +operations: + - name: checkOut + label: conn1 + - name: checkIn + connection: conn1 + - name: close + - name: checkOut +error: + type: PoolClosedError + message: Attempted to check out a connection from closed connection pool +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + address: 42 + connectionId: 42 + - type: ConnectionCheckedIn + address: 42 + connectionId: 42 + - type: ConnectionPoolClosed + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutFailed + address: 42 + reason: poolClosed +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json new file mode 100644 index 00000000000..fee0d076cf1 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out multiple connections at the same time", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml new file mode 100644 index 00000000000..714506ef7fe --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml @@ -0,0 +1,37 @@ +version: 1 +style: unit +description: must be able to check out multiple connections at the same time +operations: + - name: start + target: thread1 + - name: start + target: thread2 + - name: start + target: thread3 + - name: checkOut + thread: thread1 + - name: checkOut + thread: thread2 + - name: checkOut + thread: thread3 + - name: waitForThread + target: thread1 + - name: waitForThread + target: thread2 + - name: waitForThread + target: thread3 +events: + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json new file mode 100644 index 00000000000..74325d655d3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json @@ -0,0 +1,58 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out an idle connection if found while iterating available connections", + "poolOptions": { + "maxIdleTimeMS": 10 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "wait", + "ms": 50 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "idle", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml new file mode 100644 index 00000000000..415906bb576 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml @@ -0,0 +1,35 @@ +version: 1 +style: unit +description: must destroy and must not check out an idle connection if found while iterating available connections +poolOptions: + maxIdleTimeMS: 10 +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: wait + ms: 50 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + # In between these, wait so connection becomes idle + - type: ConnectionClosed + connectionId: 1 + reason: idle + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json new file mode 100644 index 00000000000..67ee507fe88 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json @@ -0,0 +1,58 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml new file mode 100644 index 00000000000..c434f4b0656 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml @@ -0,0 +1,33 @@ +version: 1 +style: unit +description: must destroy and must not check out a stale connection if found while iterating available connections +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: clear + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + address: 42 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: stale + address: 42 + - type: ConnectionCheckedOut + connectionId: 2 + address: 42 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json new file mode 100644 index 00000000000..e1fb9d07837 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json @@ -0,0 +1,48 @@ +{ + "version": 1, + "style": "unit", + "description": "When a pool is closed, it MUST first destroy all available connections in that pool", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkOut" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "poolClosed", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml new file mode 100644 index 00000000000..65b13a6d51b --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml @@ -0,0 +1,28 @@ +version: 1 +style: unit +description: When a pool is closed, it MUST first destroy all available connections in that pool +operations: + - name: checkOut + - name: checkOut + label: conn + - name: checkOut + - name: checkIn + connection: conn + - name: close +events: + - type: ConnectionCheckedIn + connectionId: 2 + address: 42 + - type: ConnectionClosed + connectionId: 2 + reason: poolClosed + address: 42 + - type: ConnectionPoolClosed + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted + - ConnectionCheckedOut + diff --git a/test/spec/connection-monitoring-and-pooling/pool-close.json b/test/spec/connection-monitoring-and-pooling/pool-close.json new file mode 100644 index 00000000000..fe083d73e63 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to manually close a pool", + "operations": [ + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-close.yml b/test/spec/connection-monitoring-and-pooling/pool-close.yml new file mode 100644 index 00000000000..2562224b43c --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-close.yml @@ -0,0 +1,11 @@ +version: 1 +style: unit +description: must be able to manually close a pool +operations: + - name: close +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionPoolClosed + address: 42 diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json new file mode 100644 index 00000000000..b585d0daec7 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json @@ -0,0 +1,129 @@ +{ + "version": 1, + "style": "unit", + "description": "must never exceed maxPoolSize total connections", + "poolOptions": { + "maxPoolSize": 3 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml new file mode 100644 index 00000000000..64e521c7ec3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml @@ -0,0 +1,71 @@ +version: 1 +style: unit +description: must never exceed maxPoolSize total connections +poolOptions: + maxPoolSize: 3 +operations: + - name: checkOut + label: conn1 + - name: checkOut + - name: checkOut + label: conn2 + - name: checkIn + connection: conn2 + - name: checkOut + - name: start + target: thread1 + - name: checkOut + thread: thread1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json new file mode 100644 index 00000000000..7b5cf202b31 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with minPoolSize connections", + "poolOptions": { + "minPoolSize": 3 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 3 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml new file mode 100644 index 00000000000..d87f7feec34 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml @@ -0,0 +1,31 @@ +version: 1 +style: unit +description: must be able to start a pool with minPoolSize connections +poolOptions: + minPoolSize: 3 +operations: + - name: waitForEvent + event: ConnectionCreated + count: 3 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + - type: ConnectionCreated + connectionId: 42 + address: 42 + # Ensures that by the time pool is closed, there are at least 3 connections + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json new file mode 100644 index 00000000000..4e8223f91e3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with various options set", + "poolOptions": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + } + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml new file mode 100644 index 00000000000..32c8d0e54c8 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml @@ -0,0 +1,21 @@ +version: 1 +style: unit +description: must be able to start a pool with various options set +poolOptions: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +ignore: + - ConnectionCreated + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create.json b/test/spec/connection-monitoring-and-pooling/pool-create.json new file mode 100644 index 00000000000..8c1f85537f9 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to create a pool", + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + } + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/pool-create.yml b/test/spec/connection-monitoring-and-pooling/pool-create.yml new file mode 100644 index 00000000000..f4989e8d4b3 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/pool-create.yml @@ -0,0 +1,12 @@ +version: 1 +style: unit +description: must be able to create a pool +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json new file mode 100644 index 00000000000..c58fbadcff2 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json @@ -0,0 +1,192 @@ +{ + "version": 1, + "style": "unit", + "description": "must issue Connections to threads in the order that the threads entered the queue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1", + "label": "conn1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 2 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2", + "label": "conn2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 3 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3", + "label": "conn3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "start", + "target": "thread4" + }, + { + "name": "checkOut", + "thread": "thread4", + "label": "conn4" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "waitForThread", + "target": "thread3" + }, + { + "name": "checkIn", + "connection": "conn3" + }, + { + "name": "waitForThread", + "target": "thread4" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml new file mode 100644 index 00000000000..024ec69316a --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml @@ -0,0 +1,124 @@ +version: 1 +style: unit +description: must issue Connections to threads in the order that the threads entered the queue +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 5000 +operations: + # Check out sole connection in pool + - name: checkOut + label: conn0 + # Create 4 threads, have them all queue up for connections + # Note: this might become non-deterministic depending on how you + # implement your test runner. The goal is for each thread to + # have started and begun checkOut before the next thread starts. + # The sleep operations should make this more consistent. + - name: start + target: thread1 + - name: checkOut + thread: thread1 + label: conn1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 2 + # Give thread1 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread2 + - name: checkOut + thread: thread2 + label: conn2 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 3 + # Give thread2 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread3 + - name: checkOut + thread: thread3 + label: conn3 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 4 + # Give thread3 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + - name: start + target: thread4 + - name: checkOut + thread: thread4 + label: conn4 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + # Give thread4 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 + # From main thread, keep checking in connection and then wait for appropriate thread + # Test will timeout if threads are not enqueued in proper order + - name: checkIn + connection: conn0 + - name: waitForThread + target: thread1 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread2 + - name: checkIn + connection: conn2 + - name: waitForThread + target: thread3 + - name: checkIn + connection: conn3 + - name: waitForThread + target: thread4 +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json new file mode 100644 index 00000000000..ee7cf279552 --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -0,0 +1,71 @@ +{ + "version": 1, + "style": "unit", + "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 20 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml new file mode 100644 index 00000000000..eba4ab638da --- /dev/null +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -0,0 +1,46 @@ +version: 1 +style: unit +description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 20 +operations: + # Check out only possible connection + - name: checkOut + label: conn0 + # Start a thread, have it enter the wait queue + - name: start + target: thread1 + - name: checkOut + thread: thread1 + # Wait for other thread to time out, then check in connection + - name: waitForEvent + event: ConnectionCheckOutFailed + count: 1 + - name: checkIn + connection: conn0 + # Rejoin thread1, should experience error + - name: waitForThread + target: thread1 +error: + type: WaitQueueTimeoutError + message: Timed out while checking out a connection from connection pool +events: + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckedOut + connectionId: 42 + address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutFailed + reason: timeout + address: 42 + - type: ConnectionCheckedIn + connectionId: 42 + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated From 44c647ee8b264e4520f84a6cb121a9559bd49508 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 17 Dec 2019 15:33:40 -0500 Subject: [PATCH 002/130] chore: update google analytics code --- conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf.json b/conf.json index fd24c95adb3..18324b38d6e 100644 --- a/conf.json +++ b/conf.json @@ -49,7 +49,7 @@ "outputSourceFiles" : true }, "applicationName": "Node.js MongoDB Driver API", - "googleAnalytics": "UA-29229787-1", + "googleAnalytics": "UA-7301842-14", "openGraph": { "title": "MongoDB Driver API for Node.js", "type": "website", From bf988467e6d558e8a21296bf07ecbd06c7d199f8 Mon Sep 17 00:00:00 2001 From: Dan Aprahamian Date: Mon, 17 Dec 2018 13:47:46 -0500 Subject: [PATCH 003/130] WIP: PoC of CMAP - WIP: more updates - WIP more changes - updating pool and runner to properly handle new test - updating to follow spec compliance with allowing not-ready conns - adding spec compliance so connect does not block acquisition - adding spec tests - remove old pool spec tests - updating spec tests - update cmap runner for latest tests - updaating spec definitions - ensure that pool spec tests are not run in pre-node8 env - move file up a directory to avoid being run - updating spec tests - make pool node 4 compliant - updating test runner for cmap to use node 4 - move tests back to where it belongs - updating stale connection test - ACTUAL updated to pool-checkout-no-stale - make Connection be identicial to placeholder connection - remove old parts of spec like WaitQueueFullError and waitQueueSize - make sure waitQueueTimeoutMS default is 0 in waitQueue --- lib/pool/connection.js | 63 ++++ lib/pool/connection_manager.js | 45 +++ lib/pool/counter.js | 12 + lib/pool/errors.js | 35 ++ lib/pool/events.js | 100 ++++++ lib/pool/index.js | 5 + lib/pool/pool.js | 289 ++++++++++++++++ lib/pool/wait_queue.js | 130 +++++++ .../README.rst | 156 +++++++++ .../connection-must-have-id.json | 42 +++ .../connection-must-have-id.yml | 21 ++ .../connection-must-order-ids.json | 42 +++ .../connection-must-order-ids.yml | 21 ++ .../pool-checkin-destroy-closed.json | 43 +++ .../pool-checkin-destroy-closed.yml | 24 ++ .../pool-checkin-destroy-stale.json | 43 +++ .../pool-checkin-destroy-stale.yml | 24 ++ .../pool-checkin-make-available.json | 38 ++ .../pool-checkin-make-available.yml | 21 ++ .../pool-checkin.json | 29 ++ .../pool-checkin.yml | 18 + .../pool-checkout-connection.json | 24 ++ .../pool-checkout-connection.yml | 13 + .../pool-checkout-error-closed.json | 50 +++ .../pool-checkout-error-closed.yml | 28 ++ .../pool-checkout-multiple.json | 63 ++++ .../pool-checkout-multiple.yml | 34 ++ .../pool-checkout-no-idle.json | 54 +++ .../pool-checkout-no-idle.yml | 31 ++ .../pool-checkout-no-stale.json | 54 +++ .../pool-checkout-no-stale.yml | 29 ++ .../pool-close-destroy-conns.json | 46 +++ .../pool-close-destroy-conns.yml | 26 ++ .../pool-close.json | 21 ++ .../pool-close.yml | 11 + .../pool-create-max-size.json | 114 ++++++ .../pool-create-max-size.yml | 56 +++ .../pool-create-min-size.json | 46 +++ .../pool-create-min-size.yml | 27 ++ .../pool-create-with-options.json | 31 ++ .../pool-create-with-options.yml | 20 ++ .../pool-create.json | 19 + .../pool-create.yml | 12 + .../wait-queue-fairness.json | 162 +++++++++ .../wait-queue-fairness.yml | 94 +++++ .../wait-queue-timeout.json | 66 ++++ .../wait-queue-timeout.yml | 41 +++ test/core/unit/pool_spec_tests.js | 324 ++++++++++++++++++ test/match_spec.js | 76 ++++ 49 files changed, 2773 insertions(+) create mode 100644 lib/pool/connection.js create mode 100644 lib/pool/connection_manager.js create mode 100644 lib/pool/counter.js create mode 100644 lib/pool/errors.js create mode 100644 lib/pool/events.js create mode 100644 lib/pool/index.js create mode 100644 lib/pool/pool.js create mode 100644 lib/pool/wait_queue.js create mode 100644 test/core/spec/connection-monitoring-and-pooling/README.rst create mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml create mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json create mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml create mode 100644 test/core/unit/pool_spec_tests.js create mode 100644 test/match_spec.js diff --git a/lib/pool/connection.js b/lib/pool/connection.js new file mode 100644 index 00000000000..f4b459b0605 --- /dev/null +++ b/lib/pool/connection.js @@ -0,0 +1,63 @@ +'use strict'; + +class Connection { + constructor(options) { + options = options || {}; + this.generation = options.generation; + this.id = options.id; + this.maxIdleTimeMS = options.maxIdleTimeMS; + this.poolId = options.poolId; + this.address = options.address; + this.readyToUse = false; + this.lastMadeAvailable = undefined; + this.callbacks = []; + } + + get metadata() { + return { + id: this.id, + generation: this.generation, + poolId: this.poolId, + address: this.adress + }; + } + + timeIdle() { + return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; + } + + write(callback) { + setTimeout(() => callback()); + } + + makeReadyToUse() { + this.readyToUse = true; + this.lastMadeAvailable = Date.now(); + } + + makeInUse() { + this.readyToUse = false; + this.lastMadeAvailable = undefined; + } + + waitUntilConnect(callback) { + if (this.readyToUse) { + return callback(null, this); + } + + this.callbacks.push(callback); + } + + connect(callback) { + this.callbacks.push(callback); + setTimeout(() => { + this.makeReadyToUse(); + this.callbacks.forEach(c => c(null, this)); + this.callbacks = []; + }); + } + + destroy() {} +} + +module.exports.Connection = Connection; diff --git a/lib/pool/connection_manager.js b/lib/pool/connection_manager.js new file mode 100644 index 00000000000..1fed9433ed6 --- /dev/null +++ b/lib/pool/connection_manager.js @@ -0,0 +1,45 @@ +'use strict'; + +class ConnectionManager { + constructor() { + this._totalConnections = new Set(); + this._availableConnections = new Set(); + } + + get totalConnectionCount() { + return this._totalConnections.size; + } + + get availableConnectionCount() { + return this._availableConnections.size; + } + + add(connection) { + this._totalConnections.add(connection); + } + + has(connection) { + return this._totalConnections.has(connection); + } + + remove(connections) { + this._availableConnections.delete(connections); + this._totalConnections.delete(connections); + } + + makeAvailable(connection) { + this._availableConnections.add(connection); + } + + markInUse(connection) { + this._availableConnections.delete(connection); + } + + getAvailable() { + const connection = this._availableConnections.values().next().value; + this._availableConnections.delete(connection); + return connection; + } +} + +module.exports = { ConnectionManager }; diff --git a/lib/pool/counter.js b/lib/pool/counter.js new file mode 100644 index 00000000000..9a2033257ea --- /dev/null +++ b/lib/pool/counter.js @@ -0,0 +1,12 @@ +'use strict'; + +function* makeCounter(seed) { + let count = seed || 0; + while (true) { + const newCount = count; + count += 1; + yield newCount; + } +} + +module.exports = { makeCounter }; diff --git a/lib/pool/errors.js b/lib/pool/errors.js new file mode 100644 index 00000000000..71c4a33215c --- /dev/null +++ b/lib/pool/errors.js @@ -0,0 +1,35 @@ +'use strict'; + +class PoolClosedError extends Error { + constructor(pool) { + super('Attempted to check out a connection from closed connection pool'); + Error.captureStackTrace(this, this.constructor); + this.type = 'PoolClosedError'; + this.address = pool.address; + } +} + +class WaitQueueTimeoutError extends Error { + constructor(pool) { + super('Timed out while checking out a connection from connection pool'); + Error.captureStackTrace(this, this.constructor); + this.type = 'WaitQueueTimeoutError'; + this.address = pool.address; + } +} + +// Technically not part of the spec. +class PoolReleaseForeignConnectionError extends Error { + constructor(pool) { + super('Attempted to check in a connection created by a different pool'); + Error.captureStackTrace(this, this.constructor); + this.errorType = 'poolReleaseForeignConnectionError'; + this.address = pool.address; + } +} + +module.exports = { + PoolClosedError, + WaitQueueTimeoutError, + PoolReleaseForeignConnectionError +}; diff --git a/lib/pool/events.js b/lib/pool/events.js new file mode 100644 index 00000000000..4abe591f4a8 --- /dev/null +++ b/lib/pool/events.js @@ -0,0 +1,100 @@ +'use strict'; + +class PoolMonitoringEvent { + constructor(type, pool) { + this.time = new Date(); + this.type = type; + this.address = pool.address; + } +} + +class PoolCreatedEvent extends PoolMonitoringEvent { + constructor(pool) { + super('ConnectionPoolCreated', pool); + this.options = pool.options; + } +} +PoolCreatedEvent.eventType = 'connectionPoolCreated'; + +class PoolClosedEvent extends PoolMonitoringEvent { + constructor(pool) { + super('ConnectionPoolClosed', pool); + } +} +PoolClosedEvent.eventType = 'connectionPoolClosed'; + +class ConnectionCreatedEvent extends PoolMonitoringEvent { + constructor(pool, connection) { + super('ConnectionCreated', pool); + this.connectionId = connection.id; + } +} +ConnectionCreatedEvent.eventType = 'connectionCreated'; + +class ConnectionReadyEvent extends PoolMonitoringEvent { + constructor(pool, connection) { + super('ConnectionReady', pool); + this.connectionId = connection.id; + } +} +ConnectionReadyEvent.eventType = 'connectionReady'; + +class ConnectionClosedEvent extends PoolMonitoringEvent { + constructor(pool, connection, reason) { + super('ConnectionClosed', pool); + this.connectionId = connection.id; + this.reason = reason || 'unknown'; + } +} +ConnectionClosedEvent.eventType = 'connectionClosed'; + +class ConnectionCheckOutStarted extends PoolMonitoringEvent { + constructor(pool) { + super('ConnectionCheckOutStarted', pool); + } +} +ConnectionCheckOutStarted.eventType = 'connectionCheckOutStarted'; + +class ConnectionCheckOutFailed extends PoolMonitoringEvent { + constructor(pool, reason) { + super('ConnectionCheckOutFailed', pool); + this.reason = reason; + } +} +ConnectionCheckOutFailed.eventType = 'connectionCheckOutFailed'; + +class ConnectionCheckedOutEvent extends PoolMonitoringEvent { + constructor(pool, connection) { + super('ConnectionCheckedOut', pool); + this.connectionId = connection.id; + } +} +ConnectionCheckedOutEvent.eventType = 'connectionCheckedOut'; + +class ConnectionCheckedInEvent extends PoolMonitoringEvent { + constructor(pool, connection) { + super('ConnectionCheckedIn', pool); + this.connectionId = connection.id; + } +} +ConnectionCheckedInEvent.eventType = 'connectionCheckedIn'; + +class PoolClearedEvent extends PoolMonitoringEvent { + constructor(pool) { + super('ConnectionPoolCleared', pool); + } +} +PoolClearedEvent.eventType = 'connectionPoolCleared'; + +module.exports = { + PoolCreatedEvent, + PoolClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + ConnectionClosedEvent, + ConnectionCheckOutStarted, + ConnectionCheckOutFailed, + ConnectionCheckedOutEvent, + ConnectionCheckedInEvent, + PoolClearedEvent +}; diff --git a/lib/pool/index.js b/lib/pool/index.js new file mode 100644 index 00000000000..0b8167cc739 --- /dev/null +++ b/lib/pool/index.js @@ -0,0 +1,5 @@ +'use strict'; + +const Pool = require('./pool').Pool; + +module.exports = { Pool }; diff --git a/lib/pool/pool.js b/lib/pool/pool.js new file mode 100644 index 00000000000..7acb75f7d67 --- /dev/null +++ b/lib/pool/pool.js @@ -0,0 +1,289 @@ +'use strict'; + +const EventEmitter = require('events').EventEmitter; +const makeCounter = require('./counter').makeCounter; +const Connection = require('./connection').Connection; +const WaitQueue = require('./wait_queue').WaitQueue; +const ConnectionManager = require('./connection_manager').ConnectionManager; + +const errors = require('./errors'); +const PoolClosedError = errors.PoolClosedError; +const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; +const PoolReleaseForeignConnectionError = errors.PoolReleaseForeignConnectionError; + +const events = require('./events'); +const PoolCreatedEvent = events.PoolCreatedEvent; +const PoolClosedEvent = events.PoolClosedEvent; +const ConnectionCreatedEvent = events.ConnectionCreatedEvent; +const ConnectionReadyEvent = events.ConnectionReadyEvent; +const ConnectionClosedEvent = events.ConnectionClosedEvent; +const ConnectionCheckOutStarted = events.ConnectionCheckOutStarted; +const ConnectionCheckOutFailed = events.ConnectionCheckOutFailed; +const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; +const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; +const PoolClearedEvent = events.PoolClearedEvent; + +const VALID_OPTIONS = [ + 'maxPoolSize', + 'minPoolSize', + 'maxIdleTimeMS', + 'waitQueueTimeoutMS', + 'enableConnectionMonitoring' +]; + +function getSpecOptions(options) { + const newOptions = VALID_OPTIONS.reduce((obj, key) => { + if (options.hasOwnProperty(key)) { + obj[key] = options[key]; + } + return obj; + }, {}); + + return Object.freeze(newOptions); +} + +class Pool extends EventEmitter { + constructor(options) { + super(); + options = options || {}; + + this.options = getSpecOptions(options); + + const counter = makeCounter(1); + const connections = new ConnectionManager(); + const waitQueue = new WaitQueue({ + pool: this, + waitQueueTimeoutMS: + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0 + }); + + this.s = { + // Wait queue that handles queueing for connections + waitQueue, + + // Connection Manager that handles state of various connections + connections, + + // Counter that increments for each new connection. + counter, + + // Spec mandated fields + maxPoolSize: typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100, + minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, + maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, + + // Allows us to override the Connection constructor for testing purposes + Connection: options.Connection || Connection, + + // State variables that do not fall into any other category + pid: process.pid, + generation: 0, + isClosed: false, + address: options.address + }; + + process.nextTick(() => { + this._emitMonitoringEvent(PoolCreatedEvent); + this._satisfyMinPoolSize(); + }); + } + + // Public API + checkOut(callback) { + this._emitMonitoringEvent(ConnectionCheckOutStarted); + + if (this.s.isClosed) { + this._emitMonitoringEvent(ConnectionCheckOutFailed, 'poolClosed'); + return callback(new PoolClosedError(this)); + } + + const self = this; + + this.s.waitQueue.enter(function() { + const args = [callback].concat(Array.from(arguments)); + self._acquisitionHandler.apply(self, args); + }); + } + + checkIn(connection, force, callback) { + if (typeof force === 'function' && typeof callback !== 'function') { + callback = force; + force = false; + } + + if (!this.s.connections.has(connection)) { + return callback(new PoolReleaseForeignConnectionError(this, connection)); + } + + const closed = this.s.isClosed; + const stale = this._connectionIsStale(connection); + const willDestroy = !!(force || closed || stale); + + // Properly adjust state of connection + if (!willDestroy) { + connection.makeReadyToUse(); + this.s.connections.makeAvailable(connection); + } + + this._emitMonitoringEvent(ConnectionCheckedInEvent, connection); + + if (willDestroy) { + const reason = force ? 'force' : closed ? 'poolClosed' : 'stale'; + this._destroyConnection(connection, reason); + } + + callback(null); + } + + clear(callback) { + this.s.generation += 1; + this._emitMonitoringEvent(PoolClearedEvent); + callback(); + } + + close(callback) { + if (this.s.isClosed) { + return callback(); + } + + this.s.isClosed = true; + this.s.waitQueue.destroy(); + while (this.availableConnectionCount) { + this._destroyConnection(this.s.connections.getAvailable(), 'poolClosed'); + } + this._emitMonitoringEvent(PoolClosedEvent); + callback(); + } + + destroy(callback) { + this.close(() => { + if (typeof this.s.counter.return === 'function') { + this.s.counter.return(); + } + callback(); + }); + } + + // Accessors required by spec + get totalConnectionCount() { + return this.s.connections.totalConnectionCount; + } + + get availableConnectionCount() { + return this.s.connections.availableConnectionCount; + } + + get address() { + return this.s.address; + } + + // Private Helpers + _acquisitionHandler(callback, err, connection) { + if (!err) { + this.s.connections.markInUse(connection); + this._emitMonitoringEvent(ConnectionCheckedOutEvent, connection); + return callback(null, connection); + } + + let reason = 'unknown'; + + if (err instanceof WaitQueueTimeoutError) { + reason = 'timeout'; + } + + this._emitMonitoringEvent(ConnectionCheckOutFailed, reason); + + return callback(err, connection); + } + + _satisfyMinPoolSize() { + const minPoolSize = this.s.minPoolSize; + if (this.totalConnectionCount < minPoolSize) { + this._createConnection(() => this._satisfyMinPoolSize()); + } + } + + _propagateError() { + return; + } + + _createConnection(callback) { + const connection = new this.s.Connection({ + id: this.s.counter.next().value, + generation: this.s.generation, + maxIdleTimeMS: this.s.maxIdleTimeMS, + address: this.s.address + }); + + this.s.connections.add(connection); + this.s.connections.makeAvailable(connection); + this._emitMonitoringEvent(ConnectionCreatedEvent, connection); + + connection.connect(err => { + if (err) { + this.s.connections.remove(connection); + return this._propagateError(err); + } + + connection.makeReadyToUse(); + + this._emitMonitoringEvent(ConnectionReadyEvent, connection); + }); + + if (callback) { + callback(null, connection); + } + } + + _destroyConnection(connection, reason) { + this.s.connections.remove(connection); + this._emitMonitoringEvent(ConnectionClosedEvent, connection, reason); + setTimeout(() => connection.destroy()); + } + + _tryToGetConnection(callback) { + const maxPoolSize = this.s.maxPoolSize; + if (this.availableConnectionCount) { + const connection = this.s.connections.getAvailable(); + const isStale = this._connectionIsStale(connection); + const isIdle = this._connectionIsIdle(connection); + if (isStale || isIdle) { + this._destroyConnection(connection, isStale ? 'stale' : 'idle'); + return setTimeout(() => this._tryToGetConnection(callback)); + } + + return callback(null, connection); + } + + if (maxPoolSize <= 0 || this.totalConnectionCount < maxPoolSize) { + return this._createConnection(() => this._tryToGetConnection(callback)); + } + + return callback(null, null); + } + + _connectionIsStale(connection) { + return connection.generation !== this.s.generation; + } + + _connectionIsIdle(connection) { + return !!(this.s.maxIdleTimeMS && connection.timeIdle() > this.s.maxIdleTimeMS); + } + + _emitMonitoringEvent() { + // Node >=6 impl: + // _emitMonitoringEvent(Ctor, ...args) { + // const ev = new Ctor(this, ...args); + // this.emit(Ctor.eventType, ev); + // } + const args = Array.from(arguments); + const Ctor = args.shift(); + args.unshift(null, this); + const BoundCtor = Function.bind.apply(Ctor, args); + + const ev = new BoundCtor(); + this.emit(Ctor.eventType, ev); + } +} + +exports.Pool = Pool; diff --git a/lib/pool/wait_queue.js b/lib/pool/wait_queue.js new file mode 100644 index 00000000000..742feb0e67c --- /dev/null +++ b/lib/pool/wait_queue.js @@ -0,0 +1,130 @@ +'use strict'; + +const errors = require('./errors'); +const PoolClosedError = errors.PoolClosedError; +const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; + +class WaitQueueMember { + constructor(callback) { + this.callback = callback; + this.finished = false; + this.timeout = null; + } + + _finish(err, ret) { + if (!this.finished) { + this.finished = true; + process.nextTick(() => this.callback.call(null, err, ret)); + } + + if (this.timeout) { + clearTimeout(this.timeout); + } + } + + success(connection) { + this._finish(null, connection); + } + + failure(err) { + this._finish(err); + } + + setTimeout(cb, ms) { + this.timeout = setTimeout(cb, ms); + } +} + +class WaitQueue { + constructor(options) { + this._destroyed = false; + + this.timeoutMS = + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0; + this.periodMS = options.waitQueuePeriodMS || 10; + + this._pool = options.pool; + this._queue = []; + this._timeout = null; + } + + // Returns true if managed to enter wait queue + enter(callback) { + const item = new WaitQueueMember(callback); + this._queue.push(item); + if (this.timeoutMS > 0) { + item.setTimeout(() => this._timeoutHandler(item), this.timeoutMS); + } + + this._start(); + + return true; + } + + destroy() { + this._destroyed = true; + this._stop(); + this._clear(); + this._queue = undefined; + this._pool = undefined; + } + + _timeoutHandler(item) { + if (!item.finished) { + this._queue.splice(this._queue.indexOf(item), 1); + item.failure(new WaitQueueTimeoutError(this._pool)); + } + } + + _clear() { + while (this._queue && this._queue.length) { + const item = this._queue.shift(); + item.failure(new PoolClosedError(this._pool)); + } + } + + _start() { + if (!this._timeout) { + this._timeout = setTimeout(() => this._run()); + } + } + + _stop() { + if (this._timeout) { + clearTimeout(this._timeout); + this._timeout = undefined; + } + } + + _run() { + // If we're closed, destroy entire wait queue + if (this._destroyed) { + this._clear(); + } + + if (!(this._queue && this._queue.length)) { + return this._stop(); + } + + const item = this._queue.shift(); + if (item.finished) { + return setTimeout(() => this._run()); + } + + this._pool._tryToGetConnection((err, connection) => { + setTimeout(() => this._run()); + if (connection) { + connection.waitUntilConnect(err => { + if (err) { + return item.failure(connection); + } + item.success(connection); + }); + } else { + this._queue.unshift(item); + } + }); + } +} + +module.exports = { WaitQueue }; diff --git a/test/core/spec/connection-monitoring-and-pooling/README.rst b/test/core/spec/connection-monitoring-and-pooling/README.rst new file mode 100644 index 00000000000..b1605c14f0e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/README.rst @@ -0,0 +1,156 @@ +.. role:: javascript(code) + :language: javascript + +======================================== +Connection Monitoring and Pooling (CMAP) +======================================== + +.. contents:: + +-------- + +Introduction +============ + +The YAML and JSON files in this directory are platform-independent tests that +drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. + +Several prose tests, which are not easily expressed in YAML, are also presented +in this file. Those tests will need to be manually implemented by each driver. + +Common Test Format +================== + +Each YAML file has the following keys: + +- ``version``: A version number indicating the expected format of the spec tests (current version = 1) +- ``style``: A string indicating what style of tests this file contains. Currently ``unit`` is the only valid value +- ``description``: A text description of what the test is meant to assert + +Unit Test Format: +================= + +All Unit Tests have some of the following fields: + +- ``poolOptions``: if present, connection pool options to use when creating a pool +- ``operations``: A list of operations to perform. All operations support the following fields: + + - ``name``: A string describing which operation to issue. + - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread + +- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: + + - ``type``: the type of error emitted + - ``message``: the message associated with that error + - ``address``: Address of pool emitting error + +- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields + + - ``type``: The type of event emitted + - ``address``: The address of the pool emitting the event + - ``connectionId``: The id of a connection associated with the event + - ``options``: Options used to create the pool + - ``reason``: A reason giving mroe information on why the event was emitted + +- ``ignore``: An array of event names to ignore + +Valid Unit Test Operations are the following: + +- ``start(target)``: Starts a new thread named ``target`` + + - ``target``: The name of the new thread to start + +- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds + + - ``ms``: The number of milliseconds to sleep the current thread for + +- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. + + - ``target``: The name of the thread to wait for. + +- ``waitForEvent(event, count)``: block the current thread until ``event`` has occurred ``count`` times + + - ``event``: The name of the event + - ``count``: The number of times the event must occur (counting from the start of the test) + +- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection + + - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations + +- ``pool.checkIn(connection)``: call ``checkIn`` on pool + + - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` + +- ``pool.clear()``: call ``clear`` on Pool +- ``pool.close()``: call ``close`` on Pool + +Spec Test Match Function +======================== + +The definition of MATCH or MATCHES in the Spec Test Runner is as follows: + +- MATCH takes two values, ``expected`` and ``actual`` +- Notation is "Assert [actual] MATCHES [expected] +- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" + +Pseudocode implementation of ``actual`` MATCHES ``expected``: + +:: + + If expected is "42" or 42: + Assert that actual exists (is not null or undefined) + Else: + Assert that actual is of the same JSON type as expected + If expected is a JSON array: + For every idx/value in expected: + Assert that actual[idx] MATCHES value + Else if expected is a JSON object: + For every key/value in expected + Assert that actual[key] MATCHES value + Else: + Assert that expected equals actual + +Unit Test Runner: +================= + +For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests + +For each YAML file with ``style: unit``: + +- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. + + - If ``poolOptions`` is specified, use those options to initialize both pools + - The returned pool must have an ``address`` set as a string value. + +- Execute each ``operation`` in ``operations`` + + - If a ``thread`` is specified, execute in that corresponding thread. Otherwise, execute in the main thread. + +- Wait for the main thread to finish executing all of its operations +- If ``error`` is presented + + - Assert that an actual error ``actualError`` was thrown by the main thread + - Assert that ``actualError`` MATCHES ``error`` + +- Else: + + - Assert that no errors were thrown by the main thread + +- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` +- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` + + - Assert that ``actualEvents[idx]`` exists + - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` + + +It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command + +Prose Tests +=========== + +The following tests have not yet been automated, but MUST still be tested + +#. All ConnectionPoolOptions MUST be specified at the MongoClient level +#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient +#. A user MUST be able to specify all ConnectionPoolOptions via a URI string +#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json new file mode 100644 index 00000000000..487a5979d08 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json @@ -0,0 +1,42 @@ +{ + "version": 1, + "style": "unit", + "description": "must have an ID number associated with it", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml new file mode 100644 index 00000000000..16d7fc2d8fd --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml @@ -0,0 +1,21 @@ +version: 1 +style: unit +description: must have an ID number associated with it +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json new file mode 100644 index 00000000000..dda515c1a91 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json @@ -0,0 +1,42 @@ +{ + "version": 1, + "style": "unit", + "description": "must have IDs assigned in order of creation", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 1 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 2 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml new file mode 100644 index 00000000000..c554fd2717e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml @@ -0,0 +1,21 @@ +version: 1 +style: unit +description: must have IDs assigned in order of creation +operations: + - name: checkOut + - name: checkOut +events: + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 1 + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 2 + - type: ConnectionCheckedOut + connectionId: 2 +ignore: + - ConnectionPoolCreated + - ConnectionPoolClosed + - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json new file mode 100644 index 00000000000..3b6f1d24840 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json @@ -0,0 +1,43 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if pool has been closed", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "close" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "poolClosed" + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml new file mode 100644 index 00000000000..f1eaaae14db --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml @@ -0,0 +1,24 @@ +version: 1 +style: unit +description: must destroy checked in connection if pool has been closed +operations: + - name: checkOut + label: conn + - name: close + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionPoolClosed + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + - type: ConnectionClosed + connectionId: 1 + reason: poolClosed +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json new file mode 100644 index 00000000000..7faa44d33cb --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json @@ -0,0 +1,43 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if it is stale", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale" + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml new file mode 100644 index 00000000000..a1851101f0e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml @@ -0,0 +1,24 @@ +version: 1 +style: unit +description: must destroy checked in connection if it is stale +operations: + - name: checkOut + label: conn + - name: clear + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionCheckedIn + connectionId: 1 + - type: ConnectionClosed + connectionId: 1 + reason: stale +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json new file mode 100644 index 00000000000..838194fe8eb --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json @@ -0,0 +1,38 @@ +{ + "version": 1, + "style": "unit", + "description": "must make valid checked in connection available", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml new file mode 100644 index 00000000000..44272ebf42d --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml @@ -0,0 +1,21 @@ +version: 1 +style: unit +description: must make valid checked in connection available +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: checkOut +events: + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionCheckedIn + connectionId: 1 + - type: ConnectionCheckedOut + connectionId: 1 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionCheckOutStarted \ No newline at end of file diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json new file mode 100644 index 00000000000..5e93c207a9e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json @@ -0,0 +1,29 @@ +{ + "version": 1, + "style": "unit", + "description": "must have a method of allowing the driver to check in a connection", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml new file mode 100644 index 00000000000..da78c34c8e6 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml @@ -0,0 +1,18 @@ +version: 1 +style: unit +description: must have a method of allowing the driver to check in a connection +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn +events: + - type: ConnectionCheckedIn + connectionId: 42 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted + - ConnectionCheckedOut diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json new file mode 100644 index 00000000000..e6e108ce58e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json @@ -0,0 +1,24 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out a connection", + "operations": [ + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml new file mode 100644 index 00000000000..34e9ae493e3 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml @@ -0,0 +1,13 @@ +version: 1 +style: unit +description: must be able to check out a connection +operations: + - name: checkOut +events: + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedOut + connectionId: 1 +ignore: + - ConnectionPoolCreated + - ConnectionCreated + - ConnectionReady \ No newline at end of file diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json new file mode 100644 index 00000000000..4b32ecb55d8 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "style": "unit", + "description": "must throw error if checkOut is called on a closed pool", + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "close" + }, + { + "name": "checkOut" + } + ], + "error": { + "type": "PoolClosedError", + "message": "Attempted to check out a connection from closed connection pool" + }, + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml new file mode 100644 index 00000000000..3a8d85e8e20 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml @@ -0,0 +1,28 @@ +version: 1 +style: unit +description: must throw error if checkOut is called on a closed pool +operations: + - name: checkOut + label: conn1 + - name: checkIn + connection: conn1 + - name: close + - name: checkOut +error: + type: PoolClosedError + message: Attempted to check out a connection from closed connection pool +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionPoolClosed + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json new file mode 100644 index 00000000000..f3ecdb9be90 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json @@ -0,0 +1,63 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out multiple connections at the same time", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml new file mode 100644 index 00000000000..1ac3236588c --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml @@ -0,0 +1,34 @@ +version: 1 +style: unit +description: must be able to check out multiple connections at the same time +operations: + - name: start + target: thread1 + - name: start + target: thread2 + - name: start + target: thread3 + - name: checkOut + thread: thread1 + - name: checkOut + thread: thread2 + - name: checkOut + thread: thread3 + - name: waitForThread + target: thread1 + - name: waitForThread + target: thread2 + - name: waitForThread + target: thread3 +events: + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json new file mode 100644 index 00000000000..77ce40deacf --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json @@ -0,0 +1,54 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out an idle connection if found while iterating available connections", + "poolOptions": { + "maxIdleTimeMS": 10 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "wait", + "ms": 50 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "idle" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml new file mode 100644 index 00000000000..77f36b19583 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml @@ -0,0 +1,31 @@ +version: 1 +style: unit +description: must destroy and must not check out an idle connection if found while iterating available connections +poolOptions: + maxIdleTimeMS: 10 +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: wait + ms: 50 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionCheckedIn + connectionId: 1 + # In between these, wait so connection becomes idle + - type: ConnectionClosed + connectionId: 1 + reason: idle + - type: ConnectionCheckedOut + connectionId: 2 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json new file mode 100644 index 00000000000..e5ebedfbe52 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json @@ -0,0 +1,54 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "operations": [ + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml new file mode 100644 index 00000000000..a4389b81ef3 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml @@ -0,0 +1,29 @@ +version: 1 +style: unit +description: must destroy and must not check out a stale connection if found while iterating available connections +operations: + - name: checkOut + label: conn + - name: checkIn + connection: conn + - name: clear + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckedOut + connectionId: 1 + - type: ConnectionCheckedIn + connectionId: 1 + - type: ConnectionPoolCleared + address: 42 + - type: ConnectionClosed + connectionId: 1 + reason: stale + - type: ConnectionCheckedOut + connectionId: 2 +ignore: + - ConnectionReady + - ConnectionCreated + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json new file mode 100644 index 00000000000..2bc50419b47 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "When a pool is closed, it MUST first destroy all available connections in that pool", + "operations": [ + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkOut" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 2 + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "poolClosed" + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml new file mode 100644 index 00000000000..ddfd1fad1bb --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml @@ -0,0 +1,26 @@ +version: 1 +style: unit +description: When a pool is closed, it MUST first destroy all available connections in that pool +operations: + - name: checkOut + - name: checkOut + label: conn + - name: checkOut + - name: checkIn + connection: conn + - name: close +events: + - type: ConnectionCheckedIn + connectionId: 2 + - type: ConnectionClosed + connectionId: 2 + reason: poolClosed + - type: ConnectionPoolClosed + address: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionPoolCreated + - ConnectionCheckOutStarted + - ConnectionCheckedOut + diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close.json b/test/core/spec/connection-monitoring-and-pooling/pool-close.json new file mode 100644 index 00000000000..fe083d73e63 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-close.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to manually close a pool", + "operations": [ + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close.yml b/test/core/spec/connection-monitoring-and-pooling/pool-close.yml new file mode 100644 index 00000000000..2562224b43c --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-close.yml @@ -0,0 +1,11 @@ +version: 1 +style: unit +description: must be able to manually close a pool +operations: + - name: close +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionPoolClosed + address: 42 diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json new file mode 100644 index 00000000000..2ba7bdf62bf --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json @@ -0,0 +1,114 @@ +{ + "version": 1, + "style": "unit", + "description": "must never exceed maxPoolSize total connections", + "poolOptions": { + "maxPoolSize": 3 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionReady" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml new file mode 100644 index 00000000000..534917bc755 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml @@ -0,0 +1,56 @@ +version: 1 +style: unit +description: must never exceed maxPoolSize total connections +poolOptions: + maxPoolSize: 3 +operations: + - name: checkOut + label: conn1 + - name: checkOut + - name: checkOut + label: conn2 + - name: checkIn + connection: conn2 + - name: checkOut + - name: start + target: thread1 + - name: checkOut + thread: thread1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 +ignore: + - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json new file mode 100644 index 00000000000..470988043f3 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json @@ -0,0 +1,46 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with minPoolSize connections", + "poolOptions": { + "minPoolSize": 3 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 3 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml new file mode 100644 index 00000000000..848de835ddf --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml @@ -0,0 +1,27 @@ +version: 1 +style: unit +description: must be able to start a pool with minPoolSize connections +poolOptions: + minPoolSize: 3 +operations: + - name: waitForEvent + event: ConnectionCreated + count: 3 + - name: checkOut +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCreated + connectionId: 42 + - type: ConnectionCreated + connectionId: 42 + # Ensures that by the time pool is closed, there are at least 3 connections + - type: ConnectionCheckedOut + connectionId: 42 +ignore: + - ConnectionReady + - ConnectionClosed + - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json new file mode 100644 index 00000000000..ab689448f4e --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json @@ -0,0 +1,31 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with various options set", + "poolOptions": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + } + } + ], + "ignore": [ + "ConnectionCreated" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml new file mode 100644 index 00000000000..2915eb60017 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml @@ -0,0 +1,20 @@ +version: 1 +style: unit +description: must be able to start a pool with various options set +poolOptions: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: + maxPoolSize: 50 + minPoolSize: 5 + maxIdleTimeMS: 100 +ignore: + - ConnectionCreated diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create.json b/test/core/spec/connection-monitoring-and-pooling/pool-create.json new file mode 100644 index 00000000000..8c1f85537f9 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to create a pool", + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + } + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create.yml new file mode 100644 index 00000000000..f4989e8d4b3 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/pool-create.yml @@ -0,0 +1,12 @@ +version: 1 +style: unit +description: must be able to create a pool +operations: + - name: waitForEvent + event: ConnectionPoolCreated + count: 1 +events: + - type: ConnectionPoolCreated + address: 42 + options: 42 + diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json new file mode 100644 index 00000000000..36c8a6dc1ba --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json @@ -0,0 +1,162 @@ +{ + "version": 1, + "style": "unit", + "description": "must issue Connections to threads in the order that the threads entered the queue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 1000 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1", + "label": "conn1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 2 + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2", + "label": "conn2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 3 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3", + "label": "conn3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "start", + "target": "thread4" + }, + { + "name": "checkOut", + "thread": "thread4", + "label": "conn4" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "waitForThread", + "target": "thread3" + }, + { + "name": "checkIn", + "connection": "conn3" + }, + { + "name": "waitForThread", + "target": "thread4" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml new file mode 100644 index 00000000000..564c010bab9 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml @@ -0,0 +1,94 @@ +version: 1 +style: unit +description: must issue Connections to threads in the order that the threads entered the queue +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 1000 +operations: + # Check out sole connection in pool + - name: checkOut + label: conn0 + # Create 4 threads, have them all queue up for connections + # Note: this might become non-deterministic depending on how you + # implement your test runner. The goal is for each thread to + # have started and begun checkOut before the next thread starts. + # The sleep operations should make this more consistent. + - name: start + target: thread1 + - name: checkOut + thread: thread1 + label: conn1 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 2 + - name: start + target: thread2 + - name: checkOut + thread: thread2 + label: conn2 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 3 + - name: start + target: thread3 + - name: checkOut + thread: thread3 + label: conn3 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 4 + - name: start + target: thread4 + - name: checkOut + thread: thread4 + label: conn4 + - name: waitForEvent + event: ConnectionCheckOutStarted + count: 5 + # From main thread, keep checking in connection and then wait for appropriate thread + # Test will timeout if threads are not enqueued in proper order + - name: checkIn + connection: conn0 + - name: waitForThread + target: thread1 + - name: checkIn + connection: conn1 + - name: waitForThread + target: thread2 + - name: checkIn + connection: conn2 + - name: waitForThread + target: thread3 + - name: checkIn + connection: conn3 + - name: waitForThread + target: thread4 +events: + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCheckOutStarted + - type: ConnectionCheckOutStarted + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckedIn + connectionId: 42 + - type: ConnectionCheckedOut + connectionId: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json new file mode 100644 index 00000000000..90ec2f62d95 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "unit", + "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 20 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42 + }, + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout" + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml new file mode 100644 index 00000000000..49c18df4cd7 --- /dev/null +++ b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -0,0 +1,41 @@ +version: 1 +style: unit +description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS +poolOptions: + maxPoolSize: 1 + waitQueueTimeoutMS: 20 +operations: + # Check out only possible connection + - name: checkOut + label: conn0 + # Start a thread, have it enter the wait queue + - name: start + target: thread1 + - name: checkOut + thread: thread1 + # Wait for other thread to time out, then check in connection + - name: waitForEvent + event: ConnectionCheckOutFailed + count: 1 + - name: checkIn + connection: conn0 + # Rejoin thread1, should experience error + - name: waitForThread + target: thread1 +error: + type: WaitQueueTimeoutError + message: Timed out while checking out a connection from connection pool +events: + - type: ConnectionCheckOutStarted + - type: ConnectionCheckedOut + connectionId: 42 + - type: ConnectionCheckOutStarted + - type: ConnectionCheckOutFailed + reason: timeout + - type: ConnectionCheckedIn + connectionId: 42 +ignore: + - ConnectionCreated + - ConnectionReady + - ConnectionClosed + - ConnectionPoolCreated diff --git a/test/core/unit/pool_spec_tests.js b/test/core/unit/pool_spec_tests.js new file mode 100644 index 00000000000..8ffd7c58884 --- /dev/null +++ b/test/core/unit/pool_spec_tests.js @@ -0,0 +1,324 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const expect = require('chai').expect; + +require('chai').use(require('../../match_spec').default); +const Pool = require('../../../lib/pool').Pool; +const EventEmitter = require('events').EventEmitter; + +class Connection { + constructor(options) { + options = options || {}; + this.generation = options.generation; + this.id = options.id; + this.maxIdleTimeMS = options.maxIdleTimeMS; + this.poolId = options.poolId; + this.address = options.address; + this.readyToUse = false; + this.lastMadeAvailable = undefined; + this.callbacks = []; + } + + get metadata() { + return { + id: this.id, + generation: this.generation, + poolId: this.poolId, + address: this.adress + }; + } + + timeIdle() { + return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; + } + + write(callback) { + setTimeout(() => callback()); + } + + makeReadyToUse() { + this.readyToUse = true; + this.lastMadeAvailable = Date.now(); + } + + makeInUse() { + this.readyToUse = false; + this.lastMadeAvailable = undefined; + } + + waitUntilConnect(callback) { + if (this.readyToUse) { + return callback(null, this); + } + + this.callbacks.push(callback); + } + + connect(callback) { + this.callbacks.push(callback); + setTimeout(() => { + this.makeReadyToUse(); + this.callbacks.forEach(c => c(null, this)); + this.callbacks = []; + }); + } + + destroy() {} +} + +const events = require('../../../lib/pool/events'); + +const ALL_EVENTS = Object.keys(events) + .map(key => events[key]) + .filter(Ctor => Ctor.eventType) + .map(Ctor => Ctor.eventType); + +function promisify(fn) { + return function() { + const args = Array.from(arguments); + return new Promise((resolve, reject) => { + const cb = (err, value) => { + if (err) { + return reject(err); + } + return resolve(value); + }; + fn.apply(this, args.concat([cb])); + }); + }; +} + +const PROMISIFIED_POOL_FUNCTIONS = { + checkOut: promisify(Pool.prototype.checkOut), + checkIn: promisify(Pool.prototype.checkIn), + clear: promisify(Pool.prototype.clear), + close: promisify(Pool.prototype.close) +}; + +function destroyPool(pool) { + return new Promise(r => pool.destroy(r)).then(() => { + ALL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); + }); +} + +describe('Pool Spec Tests', function() { + const threads = new Map(); + const connections = new Map(); + const poolEvents = []; + const poolEventsEventEmitter = new EventEmitter(); + let pool = undefined; + + afterEach(() => { + const p = pool ? destroyPool(pool) : Promise.resolve(); + return p.then(() => { + pool = undefined; + threads.clear(); + connections.clear(); + poolEvents.length = 0; + poolEventsEventEmitter.removeAllListeners(); + }); + }); + + function createPool(options) { + const address = 'localhost:27017'; + options = Object.assign({}, options, { Connection, address }); + + pool = new Pool(options); + ALL_EVENTS.forEach(ev => { + pool.on(ev, x => { + poolEvents.push(x); + poolEventsEventEmitter.emit('poolEvent'); + }); + }); + } + + function getThread(name) { + let thread = threads.get(name); + if (!thread) { + thread = new Thread(); + threads.set(name, thread); + } + + return thread; + } + + const OPERATION_FUNCTIONS = { + checkOut: function(op) { + return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { + if (op.label != null) { + connections.set(op.label, connection); + } + }); + }, + checkIn: function(op) { + const connection = connections.get(op.connection); + const force = op.force; + + if (!connection) { + throw new Error(`Attempted to release non-existient connection ${op.connection}`); + } + + return PROMISIFIED_POOL_FUNCTIONS.checkIn.call(pool, connection, force); + }, + clear: function() { + return PROMISIFIED_POOL_FUNCTIONS.clear.call(pool); + }, + close: function() { + return PROMISIFIED_POOL_FUNCTIONS.close.call(pool); + }, + wait: function(options) { + const ms = options.ms; + return new Promise(r => setTimeout(r, ms)); + }, + start: function(options) { + const target = options.target; + const thread = getThread(target); + thread.start(); + }, + waitForThread: function(options) { + const name = options.name; + const target = options.target; + const suppressError = options.suppressError; + + const threadObj = threads.get(target); + + if (!threadObj) { + throw new Error(`Attempted to run op ${name} on non-existent thread ${target}`); + } + + return threadObj.finish().catch(e => { + if (!suppressError) { + throw e; + } + }); + }, + waitForEvent: function(options) { + const event = options.event; + const count = options.count; + return new Promise(resolve => { + function run() { + if (poolEvents.filter(ev => ev.type === event).length >= count) { + return resolve(); + } + + poolEventsEventEmitter.once('poolEvent', run); + } + run(); + }); + } + }; + + class Thread { + constructor() { + this._killed = false; + this._error = undefined; + this._promise = new Promise(resolve => { + this.start = () => setTimeout(resolve); + }); + } + + run(op) { + if (this._killed || this._error) { + return; + } + this._promise = this._promise + .then(() => this._runOperation(op)) + .catch(e => (this._error = e)); + } + + _runOperation(op) { + const operationFn = OPERATION_FUNCTIONS[op.name]; + if (!operationFn) { + throw new Error(`Invalid command ${op.name}`); + } + + return Promise.resolve() + .then(() => operationFn(op, this)) + .then(() => new Promise(r => setTimeout(r))); + } + + finish() { + this._killed = true; + return this._promise.then(() => { + if (this._error) { + throw this._error; + } + }); + } + } + + const specPath = path.join(__dirname, '../spec', 'connection-monitoring-and-pooling'); + const testFiles = fs + .readdirSync(specPath) + .filter(x => x.indexOf('.json') !== -1) + .map(x => [x, fs.readFileSync(path.join(specPath, x), 'utf8')]) + .map(x => [path.basename(x[0], '.json'), JSON.parse(x[1])]) + .filter(testFile => testFile[1].style === 'unit') + .filter(testFile => testFile[1].version === 1); + + testFiles.forEach(testFile => { + const singleTest = testFile[1]; + const itFn = singleTest.only ? it.only : it; + + itFn(singleTest.description, function() { + const operations = singleTest.operations; + const expectedEvents = singleTest.events || []; + const ignoreEvents = singleTest.ignore || []; + const expectedError = singleTest.error; + const poolOptions = singleTest.poolOptions || {}; + + let actualError; + + const MAIN_THREAD_KEY = Symbol('Main Thread'); + const mainThread = new Thread(); + threads.set(MAIN_THREAD_KEY, mainThread); + mainThread.start(); + + createPool(poolOptions); + + let basePromise = Promise.resolve(); + + for (let idx in operations) { + const op = operations[idx]; + + const threadKey = op.thread || MAIN_THREAD_KEY; + const thread = getThread(threadKey); + + basePromise = basePromise.then(() => { + if (!thread) { + throw new Error(`Invalid thread ${threadKey}`); + } + + return Promise.resolve() + .then(() => thread.run(op)) + .then(() => new Promise(r => setTimeout(r))); + }); + } + + return basePromise + .then(() => mainThread.finish()) + .catch(e => (actualError = e)) + .then(() => { + const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(ev.type) < 0); + + if (expectedError) { + if (!actualError) { + expect(actualError).to.matchSpec(expectedError); + } else { + const ae = Object.assign({}, actualError, { message: actualError.message }); + expect(ae).to.matchSpec(expectedError); + } + } else if (actualError) { + throw actualError; + } + + expectedEvents.forEach((expected, index) => { + const actual = actualEvents[index]; + expect(actual).to.matchSpec(expected); + }); + }); + }); + }); +}); diff --git a/test/match_spec.js b/test/match_spec.js new file mode 100644 index 00000000000..68d46fc98b9 --- /dev/null +++ b/test/match_spec.js @@ -0,0 +1,76 @@ +'use strict'; + +const SYMBOL_ANY = Symbol('[[any]]'); + +function transformSpecCompare(obj) { + if (obj === 42 || obj === '42') { + return SYMBOL_ANY; + } + + if (typeof obj !== 'object' || obj === null) { + return obj; + } + + if (obj instanceof Date) { + return obj; + } + + if (Array.isArray(obj)) { + return obj.map(transformSpecCompare); + } + + return Object.keys(obj).reduce((memo, key) => { + memo[key] = transformSpecCompare(obj[key]); + return memo; + }, {}); +} + +function matchSpecCompare(expected, actual) { + const typeOfExpected = typeof expected; + + if (expected === 42 || expected === '42') { + return actual != null; + } + + if (typeOfExpected !== typeof actual) { + return false; + } + + if (typeOfExpected !== 'object' || expected == null) { + return expected === actual; + } + + if (Array.isArray(expected)) { + if (!Array.isArray(actual)) { + return false; + } + + return expected.every((val, idx) => matchSpecCompare(val, actual[idx])); + } else if (expected instanceof Date) { + return actual instanceof Date ? expected.getTime() === actual.getTime() : false; + } + + return Object.keys(expected).every(key => matchSpecCompare(expected[key], actual[key])); +} + +function matchSpec(chai, utils) { + chai.Assertion.addMethod('matchSpec', function(expected) { + const actual = utils.flag(this, 'object'); + + chai.Assertion.prototype.assert.call( + this, + matchSpecCompare(expected, actual), + 'expected #{act} to match spec #{exp}', + 'expected #{act} to not match spec #{exp}', + transformSpecCompare(expected), + actual, + chai.config.showDiff + ); + }); + + chai.assert.matchSpec = function(val, exp, msg) { + new chai.Assertion(val, msg).to.matchSpec(exp); + }; +} + +module.exports.default = matchSpec; From 20017f38eac49260658aa3012c0e5392c494a34d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 3 Nov 2019 12:07:07 -0500 Subject: [PATCH 004/130] chore: rename `lib/pool` => `lib/core/cmap` --- lib/core/cmap/connection.js | 66 ++++++++++++++++++- lib/{pool => core/cmap}/connection_manager.js | 0 lib/{pool => core/cmap}/counter.js | 0 lib/{pool => core/cmap}/errors.js | 0 lib/{pool => core/cmap}/events.js | 0 lib/{pool => core/cmap}/index.js | 0 lib/{pool => core/cmap}/pool.js | 2 +- lib/{pool => core/cmap}/wait_queue.js | 0 lib/pool/connection.js | 63 ------------------ 9 files changed, 66 insertions(+), 65 deletions(-) rename lib/{pool => core/cmap}/connection_manager.js (100%) rename lib/{pool => core/cmap}/counter.js (100%) rename lib/{pool => core/cmap}/errors.js (100%) rename lib/{pool => core/cmap}/events.js (100%) rename lib/{pool => core/cmap}/index.js (100%) rename lib/{pool => core/cmap}/pool.js (99%) rename lib/{pool => core/cmap}/wait_queue.js (100%) delete mode 100644 lib/pool/connection.js diff --git a/lib/core/cmap/connection.js b/lib/core/cmap/connection.js index 469d91d98db..c07dbfd4882 100644 --- a/lib/core/cmap/connection.js +++ b/lib/core/cmap/connection.js @@ -217,4 +217,68 @@ function write(command, options, callback) { this[kMessageStream].writeCommand(command, operationDescription); } -module.exports = Connection; +class CMAPConnection { + constructor(options) { + options = options || {}; + this.generation = options.generation; + this.id = options.id; + this.maxIdleTimeMS = options.maxIdleTimeMS; + this.poolId = options.poolId; + this.address = options.address; + this.readyToUse = false; + this.lastMadeAvailable = undefined; + this.callbacks = []; + } + + get metadata() { + return { + id: this.id, + generation: this.generation, + poolId: this.poolId, + address: this.adress + }; + } + + timeIdle() { + return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; + } + + write(callback) { + setTimeout(() => callback()); + } + + makeReadyToUse() { + this.readyToUse = true; + this.lastMadeAvailable = Date.now(); + } + + makeInUse() { + this.readyToUse = false; + this.lastMadeAvailable = undefined; + } + + waitUntilConnect(callback) { + if (this.readyToUse) { + return callback(null, this); + } + + this.callbacks.push(callback); + } + + connect(callback) { + this.callbacks.push(callback); + setTimeout(() => { + this.makeReadyToUse(); + this.callbacks.forEach(c => c(null, this)); + this.callbacks = []; + }); + } + + destroy() {} +} + + +module.exports = { + Connection, + CMAPConnection +}; diff --git a/lib/pool/connection_manager.js b/lib/core/cmap/connection_manager.js similarity index 100% rename from lib/pool/connection_manager.js rename to lib/core/cmap/connection_manager.js diff --git a/lib/pool/counter.js b/lib/core/cmap/counter.js similarity index 100% rename from lib/pool/counter.js rename to lib/core/cmap/counter.js diff --git a/lib/pool/errors.js b/lib/core/cmap/errors.js similarity index 100% rename from lib/pool/errors.js rename to lib/core/cmap/errors.js diff --git a/lib/pool/events.js b/lib/core/cmap/events.js similarity index 100% rename from lib/pool/events.js rename to lib/core/cmap/events.js diff --git a/lib/pool/index.js b/lib/core/cmap/index.js similarity index 100% rename from lib/pool/index.js rename to lib/core/cmap/index.js diff --git a/lib/pool/pool.js b/lib/core/cmap/pool.js similarity index 99% rename from lib/pool/pool.js rename to lib/core/cmap/pool.js index 7acb75f7d67..b0b58da46c8 100644 --- a/lib/pool/pool.js +++ b/lib/core/cmap/pool.js @@ -2,7 +2,7 @@ const EventEmitter = require('events').EventEmitter; const makeCounter = require('./counter').makeCounter; -const Connection = require('./connection').Connection; +const Connection = require('./connection').CMAPConnection; const WaitQueue = require('./wait_queue').WaitQueue; const ConnectionManager = require('./connection_manager').ConnectionManager; diff --git a/lib/pool/wait_queue.js b/lib/core/cmap/wait_queue.js similarity index 100% rename from lib/pool/wait_queue.js rename to lib/core/cmap/wait_queue.js diff --git a/lib/pool/connection.js b/lib/pool/connection.js deleted file mode 100644 index f4b459b0605..00000000000 --- a/lib/pool/connection.js +++ /dev/null @@ -1,63 +0,0 @@ -'use strict'; - -class Connection { - constructor(options) { - options = options || {}; - this.generation = options.generation; - this.id = options.id; - this.maxIdleTimeMS = options.maxIdleTimeMS; - this.poolId = options.poolId; - this.address = options.address; - this.readyToUse = false; - this.lastMadeAvailable = undefined; - this.callbacks = []; - } - - get metadata() { - return { - id: this.id, - generation: this.generation, - poolId: this.poolId, - address: this.adress - }; - } - - timeIdle() { - return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; - } - - write(callback) { - setTimeout(() => callback()); - } - - makeReadyToUse() { - this.readyToUse = true; - this.lastMadeAvailable = Date.now(); - } - - makeInUse() { - this.readyToUse = false; - this.lastMadeAvailable = undefined; - } - - waitUntilConnect(callback) { - if (this.readyToUse) { - return callback(null, this); - } - - this.callbacks.push(callback); - } - - connect(callback) { - this.callbacks.push(callback); - setTimeout(() => { - this.makeReadyToUse(); - this.callbacks.forEach(c => c(null, this)); - this.callbacks = []; - }); - } - - destroy() {} -} - -module.exports.Connection = Connection; From f2ce6cebc6a25a2e1c4b966009067c0ad8843613 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 23 Nov 2019 17:34:07 -0500 Subject: [PATCH 005/130] chore: move tests to `test/unit/cmap` --- lib/core/cmap/index.js | 4 +- lib/core/cmap/pool.js | 6 +- .../README.rst | 156 ----------------- .../connection-must-have-id.json | 42 ----- .../connection-must-have-id.yml | 21 --- .../connection-must-order-ids.json | 42 ----- .../connection-must-order-ids.yml | 21 --- .../pool-checkin-destroy-closed.json | 43 ----- .../pool-checkin-destroy-closed.yml | 24 --- .../pool-checkin-destroy-stale.json | 43 ----- .../pool-checkin-destroy-stale.yml | 24 --- .../pool-checkin-make-available.json | 38 ---- .../pool-checkin-make-available.yml | 21 --- .../pool-checkin.json | 29 ---- .../pool-checkin.yml | 18 -- .../pool-checkout-connection.json | 24 --- .../pool-checkout-connection.yml | 13 -- .../pool-checkout-error-closed.json | 50 ------ .../pool-checkout-error-closed.yml | 28 --- .../pool-checkout-multiple.json | 63 ------- .../pool-checkout-multiple.yml | 34 ---- .../pool-checkout-no-idle.json | 54 ------ .../pool-checkout-no-idle.yml | 31 ---- .../pool-checkout-no-stale.json | 54 ------ .../pool-checkout-no-stale.yml | 29 ---- .../pool-close-destroy-conns.json | 46 ----- .../pool-close-destroy-conns.yml | 26 --- .../pool-close.json | 21 --- .../pool-close.yml | 11 -- .../pool-create-max-size.json | 114 ------------ .../pool-create-max-size.yml | 56 ------ .../pool-create-min-size.json | 46 ----- .../pool-create-min-size.yml | 27 --- .../pool-create-with-options.json | 31 ---- .../pool-create-with-options.yml | 20 --- .../pool-create.json | 19 -- .../pool-create.yml | 12 -- .../wait-queue-fairness.json | 162 ------------------ .../wait-queue-fairness.yml | 94 ---------- .../wait-queue-timeout.json | 66 ------- .../wait-queue-timeout.yml | 41 ----- test/functional/cmap/connection.test.js | 2 +- .../README.rst | 2 - .../connection-must-have-id.json | 18 +- .../connection-must-have-id.yml | 6 - .../connection-must-order-ids.json | 18 +- .../connection-must-order-ids.yml | 6 - .../pool-checkin-destroy-closed.json | 9 +- .../pool-checkin-destroy-closed.yml | 3 - .../pool-checkin-destroy-stale.json | 9 +- .../pool-checkin-destroy-stale.yml | 3 - .../pool-checkin-make-available.json | 9 +- .../pool-checkin-make-available.yml | 3 - .../pool-checkin.json | 3 +- .../pool-checkin.yml | 1 - .../pool-checkout-connection.json | 6 +- .../pool-checkout-connection.yml | 2 - .../pool-checkout-error-closed.json | 18 +- .../pool-checkout-error-closed.yml | 10 +- .../pool-checkout-multiple.json | 9 +- .../pool-checkout-multiple.yml | 3 - .../pool-checkout-no-idle.json | 12 +- .../pool-checkout-no-idle.yml | 4 - .../pool-checkout-no-stale.json | 12 +- .../pool-checkout-no-stale.yml | 4 - .../pool-close-destroy-conns.json | 6 +- .../pool-close-destroy-conns.yml | 2 - .../pool-create-max-size.json | 45 ++--- .../pool-create-max-size.yml | 15 -- .../pool-create-min-size.json | 12 +- .../pool-create-min-size.yml | 4 - .../pool-create-with-options.json | 3 +- .../pool-create-with-options.yml | 1 - .../wait-queue-fairness.json | 60 ++----- .../wait-queue-fairness.yml | 32 +--- .../wait-queue-timeout.json | 15 +- .../wait-queue-timeout.yml | 5 - .../cmap/spec_tests.js} | 18 +- 78 files changed, 97 insertions(+), 1997 deletions(-) delete mode 100644 test/core/spec/connection-monitoring-and-pooling/README.rst delete mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-close.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/pool-create.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml delete mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json delete mode 100644 test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml rename test/{core/unit/pool_spec_tests.js => unit/cmap/spec_tests.js} (94%) diff --git a/lib/core/cmap/index.js b/lib/core/cmap/index.js index 0b8167cc739..0662f610e2d 100644 --- a/lib/core/cmap/index.js +++ b/lib/core/cmap/index.js @@ -1,5 +1,5 @@ 'use strict'; -const Pool = require('./pool').Pool; +const ConnectionPool = require('./pool').ConnectionPool; -module.exports = { Pool }; +module.exports = { ConnectionPool }; diff --git a/lib/core/cmap/pool.js b/lib/core/cmap/pool.js index b0b58da46c8..3eccd481cf7 100644 --- a/lib/core/cmap/pool.js +++ b/lib/core/cmap/pool.js @@ -42,7 +42,7 @@ function getSpecOptions(options) { return Object.freeze(newOptions); } -class Pool extends EventEmitter { +class ConnectionPool extends EventEmitter { constructor(options) { super(); options = options || {}; @@ -286,4 +286,6 @@ class Pool extends EventEmitter { } } -exports.Pool = Pool; +module.exports = { + ConnectionPool +}; diff --git a/test/core/spec/connection-monitoring-and-pooling/README.rst b/test/core/spec/connection-monitoring-and-pooling/README.rst deleted file mode 100644 index b1605c14f0e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/README.rst +++ /dev/null @@ -1,156 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -======================================== -Connection Monitoring and Pooling (CMAP) -======================================== - -.. contents:: - --------- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Common Test Format -================== - -Each YAML file has the following keys: - -- ``version``: A version number indicating the expected format of the spec tests (current version = 1) -- ``style``: A string indicating what style of tests this file contains. Currently ``unit`` is the only valid value -- ``description``: A text description of what the test is meant to assert - -Unit Test Format: -================= - -All Unit Tests have some of the following fields: - -- ``poolOptions``: if present, connection pool options to use when creating a pool -- ``operations``: A list of operations to perform. All operations support the following fields: - - - ``name``: A string describing which operation to issue. - - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread - -- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: - - - ``type``: the type of error emitted - - ``message``: the message associated with that error - - ``address``: Address of pool emitting error - -- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields - - - ``type``: The type of event emitted - - ``address``: The address of the pool emitting the event - - ``connectionId``: The id of a connection associated with the event - - ``options``: Options used to create the pool - - ``reason``: A reason giving mroe information on why the event was emitted - -- ``ignore``: An array of event names to ignore - -Valid Unit Test Operations are the following: - -- ``start(target)``: Starts a new thread named ``target`` - - - ``target``: The name of the new thread to start - -- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds - - - ``ms``: The number of milliseconds to sleep the current thread for - -- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. - - - ``target``: The name of the thread to wait for. - -- ``waitForEvent(event, count)``: block the current thread until ``event`` has occurred ``count`` times - - - ``event``: The name of the event - - ``count``: The number of times the event must occur (counting from the start of the test) - -- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection - - - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations - -- ``pool.checkIn(connection)``: call ``checkIn`` on pool - - - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` - -- ``pool.clear()``: call ``clear`` on Pool -- ``pool.close()``: call ``close`` on Pool - -Spec Test Match Function -======================== - -The definition of MATCH or MATCHES in the Spec Test Runner is as follows: - -- MATCH takes two values, ``expected`` and ``actual`` -- Notation is "Assert [actual] MATCHES [expected] -- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" - -Pseudocode implementation of ``actual`` MATCHES ``expected``: - -:: - - If expected is "42" or 42: - Assert that actual exists (is not null or undefined) - Else: - Assert that actual is of the same JSON type as expected - If expected is a JSON array: - For every idx/value in expected: - Assert that actual[idx] MATCHES value - Else if expected is a JSON object: - For every key/value in expected - Assert that actual[key] MATCHES value - Else: - Assert that expected equals actual - -Unit Test Runner: -================= - -For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests - -For each YAML file with ``style: unit``: - -- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. - - - If ``poolOptions`` is specified, use those options to initialize both pools - - The returned pool must have an ``address`` set as a string value. - -- Execute each ``operation`` in ``operations`` - - - If a ``thread`` is specified, execute in that corresponding thread. Otherwise, execute in the main thread. - -- Wait for the main thread to finish executing all of its operations -- If ``error`` is presented - - - Assert that an actual error ``actualError`` was thrown by the main thread - - Assert that ``actualError`` MATCHES ``error`` - -- Else: - - - Assert that no errors were thrown by the main thread - -- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` -- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` - - - Assert that ``actualEvents[idx]`` exists - - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` - - -It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested - -#. All ConnectionPoolOptions MUST be specified at the MongoClient level -#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient -#. A user MUST be able to specify all ConnectionPoolOptions via a URI string -#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json deleted file mode 100644 index 487a5979d08..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must have an ID number associated with it", - "operations": [ - { - "name": "checkOut" - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionPoolClosed", - "ConnectionReady" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml deleted file mode 100644 index 16d7fc2d8fd..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/connection-must-have-id.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 1 -style: unit -description: must have an ID number associated with it -operations: - - name: checkOut - - name: checkOut -events: - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 -ignore: - - ConnectionPoolCreated - - ConnectionPoolClosed - - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json deleted file mode 100644 index dda515c1a91..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must have IDs assigned in order of creation", - "operations": [ - { - "name": "checkOut" - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 1 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 2 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 2 - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionPoolClosed", - "ConnectionReady" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml deleted file mode 100644 index c554fd2717e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 1 -style: unit -description: must have IDs assigned in order of creation -operations: - - name: checkOut - - name: checkOut -events: - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 1 - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 2 - - type: ConnectionCheckedOut - connectionId: 2 -ignore: - - ConnectionPoolCreated - - ConnectionPoolClosed - - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json deleted file mode 100644 index 3b6f1d24840..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must destroy checked in connection if pool has been closed", - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "close" - }, - { - "name": "checkIn", - "connection": "conn" - } - ], - "events": [ - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionPoolClosed", - "address": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 1 - }, - { - "type": "ConnectionClosed", - "connectionId": 1, - "reason": "poolClosed" - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml deleted file mode 100644 index f1eaaae14db..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: 1 -style: unit -description: must destroy checked in connection if pool has been closed -operations: - - name: checkOut - label: conn - - name: close - - name: checkIn - connection: conn -events: - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionPoolClosed - address: 42 - - type: ConnectionCheckedIn - connectionId: 1 - - type: ConnectionClosed - connectionId: 1 - reason: poolClosed -ignore: - - ConnectionPoolCreated - - ConnectionCreated - - ConnectionReady - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json deleted file mode 100644 index 7faa44d33cb..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must destroy checked in connection if it is stale", - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "clear" - }, - { - "name": "checkIn", - "connection": "conn" - } - ], - "events": [ - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionPoolCleared", - "address": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 1 - }, - { - "type": "ConnectionClosed", - "connectionId": 1, - "reason": "stale" - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml deleted file mode 100644 index a1851101f0e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: 1 -style: unit -description: must destroy checked in connection if it is stale -operations: - - name: checkOut - label: conn - - name: clear - - name: checkIn - connection: conn -events: - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionPoolCleared - address: 42 - - type: ConnectionCheckedIn - connectionId: 1 - - type: ConnectionClosed - connectionId: 1 - reason: stale -ignore: - - ConnectionPoolCreated - - ConnectionCreated - - ConnectionReady - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json deleted file mode 100644 index 838194fe8eb..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must make valid checked in connection available", - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "checkIn", - "connection": "conn" - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 1 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml deleted file mode 100644 index 44272ebf42d..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 1 -style: unit -description: must make valid checked in connection available -operations: - - name: checkOut - label: conn - - name: checkIn - connection: conn - - name: checkOut -events: - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionCheckedIn - connectionId: 1 - - type: ConnectionCheckedOut - connectionId: 1 -ignore: - - ConnectionPoolCreated - - ConnectionCreated - - ConnectionReady - - ConnectionCheckOutStarted \ No newline at end of file diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json deleted file mode 100644 index 5e93c207a9e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must have a method of allowing the driver to check in a connection", - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "checkIn", - "connection": "conn" - } - ], - "events": [ - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady", - "ConnectionClosed", - "ConnectionCheckOutStarted", - "ConnectionCheckedOut" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml deleted file mode 100644 index da78c34c8e6..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkin.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 1 -style: unit -description: must have a method of allowing the driver to check in a connection -operations: - - name: checkOut - label: conn - - name: checkIn - connection: conn -events: - - type: ConnectionCheckedIn - connectionId: 42 -ignore: - - ConnectionPoolCreated - - ConnectionCreated - - ConnectionReady - - ConnectionClosed - - ConnectionCheckOutStarted - - ConnectionCheckedOut diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json deleted file mode 100644 index e6e108ce58e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to check out a connection", - "operations": [ - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - } - ], - "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml deleted file mode 100644 index 34e9ae493e3..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 1 -style: unit -description: must be able to check out a connection -operations: - - name: checkOut -events: - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedOut - connectionId: 1 -ignore: - - ConnectionPoolCreated - - ConnectionCreated - - ConnectionReady \ No newline at end of file diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json deleted file mode 100644 index 4b32ecb55d8..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must throw error if checkOut is called on a closed pool", - "operations": [ - { - "name": "checkOut", - "label": "conn1" - }, - { - "name": "checkIn", - "connection": "conn1" - }, - { - "name": "close" - }, - { - "name": "checkOut" - } - ], - "error": { - "type": "PoolClosedError", - "message": "Attempted to check out a connection from closed connection pool" - }, - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionPoolClosed", - "address": 42 - } - ], - "ignore": [ - "ConnectionCreated", - "ConnectionReady", - "ConnectionClosed", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml deleted file mode 100644 index 3a8d85e8e20..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: 1 -style: unit -description: must throw error if checkOut is called on a closed pool -operations: - - name: checkOut - label: conn1 - - name: checkIn - connection: conn1 - - name: close - - name: checkOut -error: - type: PoolClosedError - message: Attempted to check out a connection from closed connection pool -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionPoolClosed - address: 42 -ignore: - - ConnectionCreated - - ConnectionReady - - ConnectionClosed - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json deleted file mode 100644 index f3ecdb9be90..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to check out multiple connections at the same time", - "operations": [ - { - "name": "start", - "target": "thread1" - }, - { - "name": "start", - "target": "thread2" - }, - { - "name": "start", - "target": "thread3" - }, - { - "name": "checkOut", - "thread": "thread1" - }, - { - "name": "checkOut", - "thread": "thread2" - }, - { - "name": "checkOut", - "thread": "thread3" - }, - { - "name": "waitForThread", - "target": "thread1" - }, - { - "name": "waitForThread", - "target": "thread2" - }, - { - "name": "waitForThread", - "target": "thread3" - } - ], - "events": [ - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionCreated", - "ConnectionReady", - "ConnectionPoolCreated", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml deleted file mode 100644 index 1ac3236588c..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: 1 -style: unit -description: must be able to check out multiple connections at the same time -operations: - - name: start - target: thread1 - - name: start - target: thread2 - - name: start - target: thread3 - - name: checkOut - thread: thread1 - - name: checkOut - thread: thread2 - - name: checkOut - thread: thread3 - - name: waitForThread - target: thread1 - - name: waitForThread - target: thread2 - - name: waitForThread - target: thread3 -events: - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 -ignore: - - ConnectionCreated - - ConnectionReady - - ConnectionPoolCreated - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json deleted file mode 100644 index 77ce40deacf..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must destroy and must not check out an idle connection if found while iterating available connections", - "poolOptions": { - "maxIdleTimeMS": 10 - }, - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "checkIn", - "connection": "conn" - }, - { - "name": "wait", - "ms": 50 - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 1 - }, - { - "type": "ConnectionClosed", - "connectionId": 1, - "reason": "idle" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 2 - } - ], - "ignore": [ - "ConnectionReady", - "ConnectionCreated", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml deleted file mode 100644 index 77f36b19583..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml +++ /dev/null @@ -1,31 +0,0 @@ -version: 1 -style: unit -description: must destroy and must not check out an idle connection if found while iterating available connections -poolOptions: - maxIdleTimeMS: 10 -operations: - - name: checkOut - label: conn - - name: checkIn - connection: conn - - name: wait - ms: 50 - - name: checkOut -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionCheckedIn - connectionId: 1 - # In between these, wait so connection becomes idle - - type: ConnectionClosed - connectionId: 1 - reason: idle - - type: ConnectionCheckedOut - connectionId: 2 -ignore: - - ConnectionReady - - ConnectionCreated - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json deleted file mode 100644 index e5ebedfbe52..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must destroy and must not check out a stale connection if found while iterating available connections", - "operations": [ - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "checkIn", - "connection": "conn" - }, - { - "name": "clear" - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 1 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 1 - }, - { - "type": "ConnectionPoolCleared", - "address": 42 - }, - { - "type": "ConnectionClosed", - "connectionId": 1, - "reason": "stale" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 2 - } - ], - "ignore": [ - "ConnectionReady", - "ConnectionCreated", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml deleted file mode 100644 index a4389b81ef3..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 1 -style: unit -description: must destroy and must not check out a stale connection if found while iterating available connections -operations: - - name: checkOut - label: conn - - name: checkIn - connection: conn - - name: clear - - name: checkOut -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionCheckedOut - connectionId: 1 - - type: ConnectionCheckedIn - connectionId: 1 - - type: ConnectionPoolCleared - address: 42 - - type: ConnectionClosed - connectionId: 1 - reason: stale - - type: ConnectionCheckedOut - connectionId: 2 -ignore: - - ConnectionReady - - ConnectionCreated - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json deleted file mode 100644 index 2bc50419b47..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "When a pool is closed, it MUST first destroy all available connections in that pool", - "operations": [ - { - "name": "checkOut" - }, - { - "name": "checkOut", - "label": "conn" - }, - { - "name": "checkOut" - }, - { - "name": "checkIn", - "connection": "conn" - }, - { - "name": "close" - } - ], - "events": [ - { - "type": "ConnectionCheckedIn", - "connectionId": 2 - }, - { - "type": "ConnectionClosed", - "connectionId": 2, - "reason": "poolClosed" - }, - { - "type": "ConnectionPoolClosed", - "address": 42 - } - ], - "ignore": [ - "ConnectionCreated", - "ConnectionReady", - "ConnectionPoolCreated", - "ConnectionCheckOutStarted", - "ConnectionCheckedOut" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml deleted file mode 100644 index ddfd1fad1bb..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml +++ /dev/null @@ -1,26 +0,0 @@ -version: 1 -style: unit -description: When a pool is closed, it MUST first destroy all available connections in that pool -operations: - - name: checkOut - - name: checkOut - label: conn - - name: checkOut - - name: checkIn - connection: conn - - name: close -events: - - type: ConnectionCheckedIn - connectionId: 2 - - type: ConnectionClosed - connectionId: 2 - reason: poolClosed - - type: ConnectionPoolClosed - address: 42 -ignore: - - ConnectionCreated - - ConnectionReady - - ConnectionPoolCreated - - ConnectionCheckOutStarted - - ConnectionCheckedOut - diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close.json b/test/core/spec/connection-monitoring-and-pooling/pool-close.json deleted file mode 100644 index fe083d73e63..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-close.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to manually close a pool", - "operations": [ - { - "name": "close" - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionPoolClosed", - "address": 42 - } - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-close.yml b/test/core/spec/connection-monitoring-and-pooling/pool-close.yml deleted file mode 100644 index 2562224b43c..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-close.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 1 -style: unit -description: must be able to manually close a pool -operations: - - name: close -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionPoolClosed - address: 42 diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json deleted file mode 100644 index 2ba7bdf62bf..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must never exceed maxPoolSize total connections", - "poolOptions": { - "maxPoolSize": 3 - }, - "operations": [ - { - "name": "checkOut", - "label": "conn1" - }, - { - "name": "checkOut" - }, - { - "name": "checkOut", - "label": "conn2" - }, - { - "name": "checkIn", - "connection": "conn2" - }, - { - "name": "checkOut" - }, - { - "name": "start", - "target": "thread1" - }, - { - "name": "checkOut", - "thread": "thread1" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutStarted", - "count": 5 - }, - { - "name": "checkIn", - "connection": "conn1" - }, - { - "name": "waitForThread", - "target": "thread1" - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionReady" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml deleted file mode 100644 index 534917bc755..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-max-size.yml +++ /dev/null @@ -1,56 +0,0 @@ -version: 1 -style: unit -description: must never exceed maxPoolSize total connections -poolOptions: - maxPoolSize: 3 -operations: - - name: checkOut - label: conn1 - - name: checkOut - - name: checkOut - label: conn2 - - name: checkIn - connection: conn2 - - name: checkOut - - name: start - target: thread1 - - name: checkOut - thread: thread1 - - name: waitForEvent - event: ConnectionCheckOutStarted - count: 5 - - name: checkIn - connection: conn1 - - name: waitForThread - target: thread1 -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 -ignore: - - ConnectionReady diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json deleted file mode 100644 index 470988043f3..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to start a pool with minPoolSize connections", - "poolOptions": { - "minPoolSize": 3 - }, - "operations": [ - { - "name": "waitForEvent", - "event": "ConnectionCreated", - "count": 3 - }, - { - "name": "checkOut" - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCreated", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionReady", - "ConnectionClosed", - "ConnectionCheckOutStarted" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml deleted file mode 100644 index 848de835ddf..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-min-size.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 1 -style: unit -description: must be able to start a pool with minPoolSize connections -poolOptions: - minPoolSize: 3 -operations: - - name: waitForEvent - event: ConnectionCreated - count: 3 - - name: checkOut -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCreated - connectionId: 42 - - type: ConnectionCreated - connectionId: 42 - # Ensures that by the time pool is closed, there are at least 3 connections - - type: ConnectionCheckedOut - connectionId: 42 -ignore: - - ConnectionReady - - ConnectionClosed - - ConnectionCheckOutStarted diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json deleted file mode 100644 index ab689448f4e..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to start a pool with various options set", - "poolOptions": { - "maxPoolSize": 50, - "minPoolSize": 5, - "maxIdleTimeMS": 100 - }, - "operations": [ - { - "name": "waitForEvent", - "event": "ConnectionPoolCreated", - "count": 1 - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": { - "maxPoolSize": 50, - "minPoolSize": 5, - "maxIdleTimeMS": 100 - } - } - ], - "ignore": [ - "ConnectionCreated" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml deleted file mode 100644 index 2915eb60017..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create-with-options.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: 1 -style: unit -description: must be able to start a pool with various options set -poolOptions: - maxPoolSize: 50 - minPoolSize: 5 - maxIdleTimeMS: 100 -operations: - - name: waitForEvent - event: ConnectionPoolCreated - count: 1 -events: - - type: ConnectionPoolCreated - address: 42 - options: - maxPoolSize: 50 - minPoolSize: 5 - maxIdleTimeMS: 100 -ignore: - - ConnectionCreated diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create.json b/test/core/spec/connection-monitoring-and-pooling/pool-create.json deleted file mode 100644 index 8c1f85537f9..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must be able to create a pool", - "operations": [ - { - "name": "waitForEvent", - "event": "ConnectionPoolCreated", - "count": 1 - } - ], - "events": [ - { - "type": "ConnectionPoolCreated", - "address": 42, - "options": 42 - } - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/pool-create.yml b/test/core/spec/connection-monitoring-and-pooling/pool-create.yml deleted file mode 100644 index f4989e8d4b3..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/pool-create.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 1 -style: unit -description: must be able to create a pool -operations: - - name: waitForEvent - event: ConnectionPoolCreated - count: 1 -events: - - type: ConnectionPoolCreated - address: 42 - options: 42 - diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json deleted file mode 100644 index 36c8a6dc1ba..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.json +++ /dev/null @@ -1,162 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must issue Connections to threads in the order that the threads entered the queue", - "poolOptions": { - "maxPoolSize": 1, - "waitQueueTimeoutMS": 1000 - }, - "operations": [ - { - "name": "checkOut", - "label": "conn0" - }, - { - "name": "start", - "target": "thread1" - }, - { - "name": "checkOut", - "thread": "thread1", - "label": "conn1" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutStarted", - "count": 2 - }, - { - "name": "start", - "target": "thread2" - }, - { - "name": "checkOut", - "thread": "thread2", - "label": "conn2" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutStarted", - "count": 3 - }, - { - "name": "start", - "target": "thread3" - }, - { - "name": "checkOut", - "thread": "thread3", - "label": "conn3" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutStarted", - "count": 4 - }, - { - "name": "start", - "target": "thread4" - }, - { - "name": "checkOut", - "thread": "thread4", - "label": "conn4" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutStarted", - "count": 5 - }, - { - "name": "checkIn", - "connection": "conn0" - }, - { - "name": "waitForThread", - "target": "thread1" - }, - { - "name": "checkIn", - "connection": "conn1" - }, - { - "name": "waitForThread", - "target": "thread2" - }, - { - "name": "checkIn", - "connection": "conn2" - }, - { - "name": "waitForThread", - "target": "thread3" - }, - { - "name": "checkIn", - "connection": "conn3" - }, - { - "name": "waitForThread", - "target": "thread4" - } - ], - "events": [ - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionCreated", - "ConnectionReady", - "ConnectionClosed", - "ConnectionPoolCreated" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml deleted file mode 100644 index 564c010bab9..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml +++ /dev/null @@ -1,94 +0,0 @@ -version: 1 -style: unit -description: must issue Connections to threads in the order that the threads entered the queue -poolOptions: - maxPoolSize: 1 - waitQueueTimeoutMS: 1000 -operations: - # Check out sole connection in pool - - name: checkOut - label: conn0 - # Create 4 threads, have them all queue up for connections - # Note: this might become non-deterministic depending on how you - # implement your test runner. The goal is for each thread to - # have started and begun checkOut before the next thread starts. - # The sleep operations should make this more consistent. - - name: start - target: thread1 - - name: checkOut - thread: thread1 - label: conn1 - - name: waitForEvent - event: ConnectionCheckOutStarted - count: 2 - - name: start - target: thread2 - - name: checkOut - thread: thread2 - label: conn2 - - name: waitForEvent - event: ConnectionCheckOutStarted - count: 3 - - name: start - target: thread3 - - name: checkOut - thread: thread3 - label: conn3 - - name: waitForEvent - event: ConnectionCheckOutStarted - count: 4 - - name: start - target: thread4 - - name: checkOut - thread: thread4 - label: conn4 - - name: waitForEvent - event: ConnectionCheckOutStarted - count: 5 - # From main thread, keep checking in connection and then wait for appropriate thread - # Test will timeout if threads are not enqueued in proper order - - name: checkIn - connection: conn0 - - name: waitForThread - target: thread1 - - name: checkIn - connection: conn1 - - name: waitForThread - target: thread2 - - name: checkIn - connection: conn2 - - name: waitForThread - target: thread3 - - name: checkIn - connection: conn3 - - name: waitForThread - target: thread4 -events: - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCheckOutStarted - - type: ConnectionCheckOutStarted - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckedIn - connectionId: 42 - - type: ConnectionCheckedOut - connectionId: 42 -ignore: - - ConnectionCreated - - ConnectionReady - - ConnectionClosed - - ConnectionPoolCreated diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json deleted file mode 100644 index 90ec2f62d95..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "version": 1, - "style": "unit", - "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", - "poolOptions": { - "maxPoolSize": 1, - "waitQueueTimeoutMS": 20 - }, - "operations": [ - { - "name": "checkOut", - "label": "conn0" - }, - { - "name": "start", - "target": "thread1" - }, - { - "name": "checkOut", - "thread": "thread1" - }, - { - "name": "waitForEvent", - "event": "ConnectionCheckOutFailed", - "count": 1 - }, - { - "name": "checkIn", - "connection": "conn0" - }, - { - "name": "waitForThread", - "target": "thread1" - } - ], - "error": { - "type": "WaitQueueTimeoutError", - "message": "Timed out while checking out a connection from connection pool" - }, - "events": [ - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 42 - }, - { - "type": "ConnectionCheckOutStarted" - }, - { - "type": "ConnectionCheckOutFailed", - "reason": "timeout" - }, - { - "type": "ConnectionCheckedIn", - "connectionId": 42 - } - ], - "ignore": [ - "ConnectionCreated", - "ConnectionReady", - "ConnectionClosed", - "ConnectionPoolCreated" - ] -} diff --git a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml deleted file mode 100644 index 49c18df4cd7..00000000000 --- a/test/core/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml +++ /dev/null @@ -1,41 +0,0 @@ -version: 1 -style: unit -description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS -poolOptions: - maxPoolSize: 1 - waitQueueTimeoutMS: 20 -operations: - # Check out only possible connection - - name: checkOut - label: conn0 - # Start a thread, have it enter the wait queue - - name: start - target: thread1 - - name: checkOut - thread: thread1 - # Wait for other thread to time out, then check in connection - - name: waitForEvent - event: ConnectionCheckOutFailed - count: 1 - - name: checkIn - connection: conn0 - # Rejoin thread1, should experience error - - name: waitForThread - target: thread1 -error: - type: WaitQueueTimeoutError - message: Timed out while checking out a connection from connection pool -events: - - type: ConnectionCheckOutStarted - - type: ConnectionCheckedOut - connectionId: 42 - - type: ConnectionCheckOutStarted - - type: ConnectionCheckOutFailed - reason: timeout - - type: ConnectionCheckedIn - connectionId: 42 -ignore: - - ConnectionCreated - - ConnectionReady - - ConnectionClosed - - ConnectionPoolCreated diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index b299f89f93f..e90c0eea0c5 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -1,6 +1,6 @@ 'use strict'; -const Connection = require('../../../lib/core/cmap/connection'); +const Connection = require('../../../lib/core/cmap/connection').Connection; const connect = require('../../../lib/core/connection/connect'); const expect = require('chai').expect; const BSON = require('bson'); diff --git a/test/spec/connection-monitoring-and-pooling/README.rst b/test/spec/connection-monitoring-and-pooling/README.rst index 6480d7f43b7..b1605c14f0e 100644 --- a/test/spec/connection-monitoring-and-pooling/README.rst +++ b/test/spec/connection-monitoring-and-pooling/README.rst @@ -154,5 +154,3 @@ The following tests have not yet been automated, but MUST still be tested #. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient #. A user MUST be able to specify all ConnectionPoolOptions via a URI string #. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver -#. When a check out attempt fails because connection set up throws an error, - assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json index 7ed67902285..487a5979d08 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json @@ -12,32 +12,26 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml index 5b7b660e54a..16d7fc2d8fd 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml @@ -6,21 +6,15 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 ignore: - ConnectionPoolCreated - ConnectionPoolClosed diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json index 9b839e8f060..dda515c1a91 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json @@ -12,32 +12,26 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 2, - "address": 42 + "connectionId": 2 }, { "type": "ConnectionCheckedOut", - "connectionId": 2, - "address": 42 + "connectionId": 2 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml index 162acfa7975..c554fd2717e 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml @@ -6,21 +6,15 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 1 - address: 42 - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 2 - address: 42 - type: ConnectionCheckedOut connectionId: 2 - address: 42 ignore: - ConnectionPoolCreated - ConnectionPoolClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json index a73afbf752b..3b6f1d24840 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json @@ -18,8 +18,7 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionPoolClosed", @@ -27,14 +26,12 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "poolClosed", - "address": 42 + "reason": "poolClosed" } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml index cf9bdfc1d70..f1eaaae14db 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml @@ -10,16 +10,13 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionPoolClosed address: 42 - type: ConnectionCheckedIn connectionId: 1 - address: 42 - type: ConnectionClosed connectionId: 1 reason: poolClosed - address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json index 600c0520719..7faa44d33cb 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json @@ -18,8 +18,7 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionPoolCleared", @@ -27,14 +26,12 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale", - "address": 42 + "reason": "stale" } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml index 2c95d5c03b6..a1851101f0e 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml @@ -10,16 +10,13 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionPoolCleared address: 42 - type: ConnectionCheckedIn connectionId: 1 - address: 42 - type: ConnectionClosed connectionId: 1 reason: stale - address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json index 015928c50d3..838194fe8eb 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json @@ -18,18 +18,15 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionCheckedIn", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml index bebc035f702..44272ebf42d 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml @@ -10,13 +10,10 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionCheckedIn connectionId: 1 - address: 42 - type: ConnectionCheckedOut connectionId: 1 - address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/spec/connection-monitoring-and-pooling/pool-checkin.json index 7073895ad2a..5e93c207a9e 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.json @@ -15,8 +15,7 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml index c2560a5cd3b..da78c34c8e6 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml @@ -9,7 +9,6 @@ operations: events: - type: ConnectionCheckedIn connectionId: 42 - address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json index 0343fa75568..e6e108ce58e 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json @@ -9,13 +9,11 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml index b0f61a275d6..34e9ae493e3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml @@ -5,10 +5,8 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedOut connectionId: 1 - address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json index 3823c23a780..4b32ecb55d8 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json @@ -28,37 +28,23 @@ "address": 42, "options": 42 }, - { - "type": "ConnectionCheckOutStarted", - "address": 42 - }, { "type": "ConnectionCheckedOut", - "address": 42, "connectionId": 42 }, { "type": "ConnectionCheckedIn", - "address": 42, "connectionId": 42 }, { "type": "ConnectionPoolClosed", "address": 42 - }, - { - "type": "ConnectionCheckOutStarted", - "address": 42 - }, - { - "type": "ConnectionCheckOutFailed", - "address": 42, - "reason": "poolClosed" } ], "ignore": [ "ConnectionCreated", "ConnectionReady", - "ConnectionClosed" + "ConnectionClosed", + "ConnectionCheckOutStarted" ] } diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml index 6621685545a..3a8d85e8e20 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml @@ -15,22 +15,14 @@ events: - type: ConnectionPoolCreated address: 42 options: 42 - - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedOut - address: 42 connectionId: 42 - type: ConnectionCheckedIn - address: 42 connectionId: 42 - type: ConnectionPoolClosed address: 42 - - type: ConnectionCheckOutStarted - address: 42 - - type: ConnectionCheckOutFailed - address: 42 - reason: poolClosed ignore: - ConnectionCreated - ConnectionReady - ConnectionClosed + - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json index fee0d076cf1..f3ecdb9be90 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json @@ -43,18 +43,15 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml index 714506ef7fe..1ac3236588c 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml @@ -23,13 +23,10 @@ operations: events: - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 ignore: - ConnectionCreated - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json index 74325d655d3..77ce40deacf 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json @@ -30,24 +30,20 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionCheckedIn", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "idle", - "address": 42 + "reason": "idle" }, { "type": "ConnectionCheckedOut", - "connectionId": 2, - "address": 42 + "connectionId": 2 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml index 415906bb576..77f36b19583 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml @@ -17,18 +17,14 @@ events: options: 42 - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionCheckedIn connectionId: 1 - address: 42 # In between these, wait so connection becomes idle - type: ConnectionClosed connectionId: 1 reason: idle - address: 42 - type: ConnectionCheckedOut connectionId: 2 - address: 42 ignore: - ConnectionReady - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json index 67ee507fe88..e5ebedfbe52 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json @@ -26,13 +26,11 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionCheckedIn", - "connectionId": 1, - "address": 42 + "connectionId": 1 }, { "type": "ConnectionPoolCleared", @@ -41,13 +39,11 @@ { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale", - "address": 42 + "reason": "stale" }, { "type": "ConnectionCheckedOut", - "connectionId": 2, - "address": 42 + "connectionId": 2 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml index c434f4b0656..a4389b81ef3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml @@ -14,19 +14,15 @@ events: options: 42 - type: ConnectionCheckedOut connectionId: 1 - address: 42 - type: ConnectionCheckedIn connectionId: 1 - address: 42 - type: ConnectionPoolCleared address: 42 - type: ConnectionClosed connectionId: 1 reason: stale - address: 42 - type: ConnectionCheckedOut connectionId: 2 - address: 42 ignore: - ConnectionReady - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json index e1fb9d07837..2bc50419b47 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json @@ -24,14 +24,12 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 2, - "address": 42 + "connectionId": 2 }, { "type": "ConnectionClosed", "connectionId": 2, - "reason": "poolClosed", - "address": 42 + "reason": "poolClosed" }, { "type": "ConnectionPoolClosed", diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml index 65b13a6d51b..ddfd1fad1bb 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml @@ -12,11 +12,9 @@ operations: events: - type: ConnectionCheckedIn connectionId: 2 - address: 42 - type: ConnectionClosed connectionId: 2 reason: poolClosed - address: 42 - type: ConnectionPoolClosed address: 42 ignore: diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json index b585d0daec7..2ba7bdf62bf 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json @@ -53,74 +53,59 @@ "options": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml index 64e521c7ec3..534917bc755 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml @@ -28,44 +28,29 @@ events: address: 42 options: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 ignore: - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json index 7b5cf202b31..470988043f3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json @@ -23,23 +23,19 @@ }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCreated", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml index d87f7feec34..848de835ddf 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml @@ -14,17 +14,13 @@ events: options: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 - type: ConnectionCreated connectionId: 42 - address: 42 # Ensures that by the time pool is closed, there are at least 3 connections - type: ConnectionCheckedOut connectionId: 42 - address: 42 ignore: - ConnectionReady - ConnectionClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json index 4e8223f91e3..ab689448f4e 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json @@ -26,7 +26,6 @@ } ], "ignore": [ - "ConnectionCreated", - "ConnectionReady" + "ConnectionCreated" ] } diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml index 32c8d0e54c8..2915eb60017 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml @@ -18,4 +18,3 @@ events: maxIdleTimeMS: 100 ignore: - ConnectionCreated - - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json index c58fbadcff2..36c8a6dc1ba 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json @@ -4,7 +4,7 @@ "description": "must issue Connections to threads in the order that the threads entered the queue", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 5000 + "waitQueueTimeoutMS": 1000 }, "operations": [ { @@ -25,10 +25,6 @@ "event": "ConnectionCheckOutStarted", "count": 2 }, - { - "name": "wait", - "ms": 100 - }, { "name": "start", "target": "thread2" @@ -43,10 +39,6 @@ "event": "ConnectionCheckOutStarted", "count": 3 }, - { - "name": "wait", - "ms": 100 - }, { "name": "start", "target": "thread3" @@ -61,10 +53,6 @@ "event": "ConnectionCheckOutStarted", "count": 4 }, - { - "name": "wait", - "ms": 100 - }, { "name": "start", "target": "thread4" @@ -79,10 +67,6 @@ "event": "ConnectionCheckOutStarted", "count": 5 }, - { - "name": "wait", - "ms": 100 - }, { "name": "checkIn", "connection": "conn0" @@ -118,69 +102,55 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml index 024ec69316a..564c010bab9 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml @@ -3,7 +3,7 @@ style: unit description: must issue Connections to threads in the order that the threads entered the queue poolOptions: maxPoolSize: 1 - waitQueueTimeoutMS: 5000 + waitQueueTimeoutMS: 1000 operations: # Check out sole connection in pool - name: checkOut @@ -21,10 +21,6 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 2 - # Give thread1 some time to actually enter the wait queue since the - # ConnectionCheckOutStarted event is publish beforehand. - - name: wait - ms: 100 - name: start target: thread2 - name: checkOut @@ -33,10 +29,6 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 3 - # Give thread2 some time to actually enter the wait queue since the - # ConnectionCheckOutStarted event is publish beforehand. - - name: wait - ms: 100 - name: start target: thread3 - name: checkOut @@ -45,10 +37,6 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 4 - # Give thread3 some time to actually enter the wait queue since the - # ConnectionCheckOutStarted event is publish beforehand. - - name: wait - ms: 100 - name: start target: thread4 - name: checkOut @@ -57,10 +45,6 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 5 - # Give thread4 some time to actually enter the wait queue since the - # ConnectionCheckOutStarted event is publish beforehand. - - name: wait - ms: 100 # From main thread, keep checking in connection and then wait for appropriate thread # Test will timeout if threads are not enqueued in proper order - name: checkIn @@ -81,42 +65,28 @@ operations: target: thread4 events: - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 ignore: - ConnectionCreated - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json index ee7cf279552..90ec2f62d95 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -39,27 +39,22 @@ }, "events": [ { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckedOut", - "connectionId": 42, - "address": 42 + "connectionId": 42 }, { - "type": "ConnectionCheckOutStarted", - "address": 42 + "type": "ConnectionCheckOutStarted" }, { "type": "ConnectionCheckOutFailed", - "reason": "timeout", - "address": 42 + "reason": "timeout" }, { "type": "ConnectionCheckedIn", - "connectionId": 42, - "address": 42 + "connectionId": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml index eba4ab638da..49c18df4cd7 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -27,18 +27,13 @@ error: message: Timed out while checking out a connection from connection pool events: - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckedOut connectionId: 42 - address: 42 - type: ConnectionCheckOutStarted - address: 42 - type: ConnectionCheckOutFailed reason: timeout - address: 42 - type: ConnectionCheckedIn connectionId: 42 - address: 42 ignore: - ConnectionCreated - ConnectionReady diff --git a/test/core/unit/pool_spec_tests.js b/test/unit/cmap/spec_tests.js similarity index 94% rename from test/core/unit/pool_spec_tests.js rename to test/unit/cmap/spec_tests.js index 8ffd7c58884..b9758a8d188 100644 --- a/test/core/unit/pool_spec_tests.js +++ b/test/unit/cmap/spec_tests.js @@ -5,7 +5,7 @@ const path = require('path'); const expect = require('chai').expect; require('chai').use(require('../../match_spec').default); -const Pool = require('../../../lib/pool').Pool; +const ConnectionPool = require('../../../lib/core/cmap').ConnectionPool; const EventEmitter = require('events').EventEmitter; class Connection { @@ -68,7 +68,7 @@ class Connection { destroy() {} } -const events = require('../../../lib/pool/events'); +const events = require('../../../lib/core/cmap/events'); const ALL_EVENTS = Object.keys(events) .map(key => events[key]) @@ -91,10 +91,10 @@ function promisify(fn) { } const PROMISIFIED_POOL_FUNCTIONS = { - checkOut: promisify(Pool.prototype.checkOut), - checkIn: promisify(Pool.prototype.checkIn), - clear: promisify(Pool.prototype.clear), - close: promisify(Pool.prototype.close) + checkOut: promisify(ConnectionPool.prototype.checkOut), + checkIn: promisify(ConnectionPool.prototype.checkIn), + clear: promisify(ConnectionPool.prototype.clear), + close: promisify(ConnectionPool.prototype.close) }; function destroyPool(pool) { @@ -103,7 +103,7 @@ function destroyPool(pool) { }); } -describe('Pool Spec Tests', function() { +describe('Connection Pool (spec)', function() { const threads = new Map(); const connections = new Map(); const poolEvents = []; @@ -125,7 +125,7 @@ describe('Pool Spec Tests', function() { const address = 'localhost:27017'; options = Object.assign({}, options, { Connection, address }); - pool = new Pool(options); + pool = new ConnectionPool(options); ALL_EVENTS.forEach(ev => { pool.on(ev, x => { poolEvents.push(x); @@ -249,7 +249,7 @@ describe('Pool Spec Tests', function() { } } - const specPath = path.join(__dirname, '../spec', 'connection-monitoring-and-pooling'); + const specPath = path.join(__dirname, '../../spec/connection-monitoring-and-pooling'); const testFiles = fs .readdirSync(specPath) .filter(x => x.indexOf('.json') !== -1) From 56e58f78a0dcd82cb3d2b30445cbc85ec1e7ba09 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 23 Nov 2019 17:52:17 -0500 Subject: [PATCH 006/130] chore: relocate CMAP code to `lib/cmap` --- lib/{core => }/cmap/connection.js | 13 ++++----- lib/{core => }/cmap/connection_manager.js | 0 .../cmap/pool.js => cmap/connection_pool.js} | 0 lib/{core => }/cmap/counter.js | 0 lib/{core => }/cmap/errors.js | 0 lib/{core => }/cmap/events.js | 0 lib/cmap/index.js | 5 ++++ lib/{core => }/cmap/message_stream.js | 28 +++++++++---------- lib/{core => }/cmap/wait_queue.js | 0 lib/core/cmap/index.js | 5 ---- test/functional/cmap/connection.test.js | 2 +- test/unit/cmap/message_stream.test.js | 2 +- test/unit/cmap/spec_tests.js | 4 +-- 13 files changed, 29 insertions(+), 30 deletions(-) rename lib/{core => }/cmap/connection.js (95%) rename lib/{core => }/cmap/connection_manager.js (100%) rename lib/{core/cmap/pool.js => cmap/connection_pool.js} (100%) rename lib/{core => }/cmap/counter.js (100%) rename lib/{core => }/cmap/errors.js (100%) rename lib/{core => }/cmap/events.js (100%) create mode 100644 lib/cmap/index.js rename lib/{core => }/cmap/message_stream.js (84%) rename lib/{core => }/cmap/wait_queue.js (100%) delete mode 100644 lib/core/cmap/index.js diff --git a/lib/core/cmap/connection.js b/lib/cmap/connection.js similarity index 95% rename from lib/core/cmap/connection.js rename to lib/cmap/connection.js index c07dbfd4882..546e8b08f9e 100644 --- a/lib/core/cmap/connection.js +++ b/lib/cmap/connection.js @@ -2,12 +2,12 @@ const EventEmitter = require('events'); const MessageStream = require('./message_stream'); -const MongoError = require('../error').MongoError; -const MongoWriteConcernError = require('../error').MongoWriteConcernError; -const wp = require('../wireprotocol'); -const apm = require('../connection/apm'); -const updateSessionFromResponse = require('../sessions').updateSessionFromResponse; -const uuidV4 = require('../utils').uuidV4; +const MongoError = require('../core/error').MongoError; +const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; +const wp = require('../core/wireprotocol'); +const apm = require('../core/connection/apm'); +const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; +const uuidV4 = require('../core/utils').uuidV4; const kStream = Symbol('stream'); const kQueue = Symbol('queue'); @@ -277,7 +277,6 @@ class CMAPConnection { destroy() {} } - module.exports = { Connection, CMAPConnection diff --git a/lib/core/cmap/connection_manager.js b/lib/cmap/connection_manager.js similarity index 100% rename from lib/core/cmap/connection_manager.js rename to lib/cmap/connection_manager.js diff --git a/lib/core/cmap/pool.js b/lib/cmap/connection_pool.js similarity index 100% rename from lib/core/cmap/pool.js rename to lib/cmap/connection_pool.js diff --git a/lib/core/cmap/counter.js b/lib/cmap/counter.js similarity index 100% rename from lib/core/cmap/counter.js rename to lib/cmap/counter.js diff --git a/lib/core/cmap/errors.js b/lib/cmap/errors.js similarity index 100% rename from lib/core/cmap/errors.js rename to lib/cmap/errors.js diff --git a/lib/core/cmap/events.js b/lib/cmap/events.js similarity index 100% rename from lib/core/cmap/events.js rename to lib/cmap/events.js diff --git a/lib/cmap/index.js b/lib/cmap/index.js new file mode 100644 index 00000000000..35e961382ee --- /dev/null +++ b/lib/cmap/index.js @@ -0,0 +1,5 @@ +'use strict'; + +const ConnectionPool = require('./connection_pool').ConnectionPool; + +module.exports = { ConnectionPool }; diff --git a/lib/core/cmap/message_stream.js b/lib/cmap/message_stream.js similarity index 84% rename from lib/core/cmap/message_stream.js rename to lib/cmap/message_stream.js index 7195e3f7457..8bab9250f74 100644 --- a/lib/core/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -2,20 +2,20 @@ const Duplex = require('stream').Duplex; const BufferList = require('bl'); -const MongoParseError = require('../error').MongoParseError; -const decompress = require('../wireprotocol/compression').decompress; -const Response = require('../connection/commands').Response; -const BinMsg = require('../connection/msg').BinMsg; -const MongoError = require('../error').MongoError; -const OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED; -const OP_MSG = require('../wireprotocol/shared').opcodes.OP_MSG; -const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; -const COMPRESSION_DETAILS_SIZE = require('../wireprotocol/shared').COMPRESSION_DETAILS_SIZE; -const opcodes = require('../wireprotocol/shared').opcodes; -const compress = require('../wireprotocol/compression').compress; -const compressorIDs = require('../wireprotocol/compression').compressorIDs; -const uncompressibleCommands = require('../wireprotocol/compression').uncompressibleCommands; -const Msg = require('../connection/msg').Msg; +const MongoParseError = require('../core/error').MongoParseError; +const decompress = require('../core/wireprotocol/compression').decompress; +const Response = require('../core/connection/commands').Response; +const BinMsg = require('../core/connection/msg').BinMsg; +const MongoError = require('../core/error').MongoError; +const OP_COMPRESSED = require('../core/wireprotocol/shared').opcodes.OP_COMPRESSED; +const OP_MSG = require('../core/wireprotocol/shared').opcodes.OP_MSG; +const MESSAGE_HEADER_SIZE = require('../core/wireprotocol/shared').MESSAGE_HEADER_SIZE; +const COMPRESSION_DETAILS_SIZE = require('../core/wireprotocol/shared').COMPRESSION_DETAILS_SIZE; +const opcodes = require('../core/wireprotocol/shared').opcodes; +const compress = require('../core/wireprotocol/compression').compress; +const compressorIDs = require('../core/wireprotocol/compression').compressorIDs; +const uncompressibleCommands = require('../core/wireprotocol/compression').uncompressibleCommands; +const Msg = require('../core/connection/msg').Msg; const kDefaultMaxBsonMessageSize = 1024 * 1024 * 16 * 4; const kBuffer = Symbol('buffer'); diff --git a/lib/core/cmap/wait_queue.js b/lib/cmap/wait_queue.js similarity index 100% rename from lib/core/cmap/wait_queue.js rename to lib/cmap/wait_queue.js diff --git a/lib/core/cmap/index.js b/lib/core/cmap/index.js deleted file mode 100644 index 0662f610e2d..00000000000 --- a/lib/core/cmap/index.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; - -const ConnectionPool = require('./pool').ConnectionPool; - -module.exports = { ConnectionPool }; diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index e90c0eea0c5..05f45112ec8 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -1,6 +1,6 @@ 'use strict'; -const Connection = require('../../../lib/core/cmap/connection').Connection; +const Connection = require('../../../lib/cmap/connection').Connection; const connect = require('../../../lib/core/connection/connect'); const expect = require('chai').expect; const BSON = require('bson'); diff --git a/test/unit/cmap/message_stream.test.js b/test/unit/cmap/message_stream.test.js index da0bea1b2ef..6487dcf57e7 100644 --- a/test/unit/cmap/message_stream.test.js +++ b/test/unit/cmap/message_stream.test.js @@ -2,7 +2,7 @@ const BSON = require('bson'); const Readable = require('stream').Readable; const Writable = require('stream').Writable; -const MessageStream = require('../../../lib/core/cmap/message_stream'); +const MessageStream = require('../../../lib/cmap/message_stream'); const Msg = require('../../../lib/core/connection/msg').Msg; const expect = require('chai').expect; diff --git a/test/unit/cmap/spec_tests.js b/test/unit/cmap/spec_tests.js index b9758a8d188..abc92f75dc9 100644 --- a/test/unit/cmap/spec_tests.js +++ b/test/unit/cmap/spec_tests.js @@ -5,7 +5,7 @@ const path = require('path'); const expect = require('chai').expect; require('chai').use(require('../../match_spec').default); -const ConnectionPool = require('../../../lib/core/cmap').ConnectionPool; +const ConnectionPool = require('../../../lib/cmap').ConnectionPool; const EventEmitter = require('events').EventEmitter; class Connection { @@ -68,7 +68,7 @@ class Connection { destroy() {} } -const events = require('../../../lib/core/cmap/events'); +const events = require('../../../lib/cmap/events'); const ALL_EVENTS = Object.keys(events) .map(key => events[key]) From 4f2c0a0078e27fd1584c28758c7f54d5674ce4c5 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 23 Nov 2019 18:05:12 -0500 Subject: [PATCH 007/130] refactor(cmap): don't embed eventType on events --- lib/cmap/connection_pool.js | 41 +++++++++++------------------------- lib/cmap/events.js | 10 --------- test/unit/cmap/spec_tests.js | 18 ++++++++++------ 3 files changed, 24 insertions(+), 45 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 3eccd481cf7..2df25117642 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -83,17 +83,17 @@ class ConnectionPool extends EventEmitter { }; process.nextTick(() => { - this._emitMonitoringEvent(PoolCreatedEvent); + this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); this._satisfyMinPoolSize(); }); } // Public API checkOut(callback) { - this._emitMonitoringEvent(ConnectionCheckOutStarted); + this.emit('connectionCheckOutStarted', new ConnectionCheckOutStarted(this)); if (this.s.isClosed) { - this._emitMonitoringEvent(ConnectionCheckOutFailed, 'poolClosed'); + this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, 'poolClosed')); return callback(new PoolClosedError(this)); } @@ -125,7 +125,7 @@ class ConnectionPool extends EventEmitter { this.s.connections.makeAvailable(connection); } - this._emitMonitoringEvent(ConnectionCheckedInEvent, connection); + this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); if (willDestroy) { const reason = force ? 'force' : closed ? 'poolClosed' : 'stale'; @@ -137,7 +137,7 @@ class ConnectionPool extends EventEmitter { clear(callback) { this.s.generation += 1; - this._emitMonitoringEvent(PoolClearedEvent); + this.emit('connectionPoolCleared', new PoolClearedEvent(this)); callback(); } @@ -151,7 +151,8 @@ class ConnectionPool extends EventEmitter { while (this.availableConnectionCount) { this._destroyConnection(this.s.connections.getAvailable(), 'poolClosed'); } - this._emitMonitoringEvent(PoolClosedEvent); + + this.emit('connectionPoolClosed', new PoolClosedEvent(this)); callback(); } @@ -181,18 +182,16 @@ class ConnectionPool extends EventEmitter { _acquisitionHandler(callback, err, connection) { if (!err) { this.s.connections.markInUse(connection); - this._emitMonitoringEvent(ConnectionCheckedOutEvent, connection); + this.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(this, connection)); return callback(null, connection); } let reason = 'unknown'; - if (err instanceof WaitQueueTimeoutError) { reason = 'timeout'; } - this._emitMonitoringEvent(ConnectionCheckOutFailed, reason); - + this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, reason)); return callback(err, connection); } @@ -217,7 +216,7 @@ class ConnectionPool extends EventEmitter { this.s.connections.add(connection); this.s.connections.makeAvailable(connection); - this._emitMonitoringEvent(ConnectionCreatedEvent, connection); + this.emit('connectionCreated', new ConnectionCreatedEvent(this, connection)); connection.connect(err => { if (err) { @@ -226,8 +225,7 @@ class ConnectionPool extends EventEmitter { } connection.makeReadyToUse(); - - this._emitMonitoringEvent(ConnectionReadyEvent, connection); + this.emit('connectionReady', new ConnectionReadyEvent(this, connection)); }); if (callback) { @@ -237,7 +235,7 @@ class ConnectionPool extends EventEmitter { _destroyConnection(connection, reason) { this.s.connections.remove(connection); - this._emitMonitoringEvent(ConnectionClosedEvent, connection, reason); + this.emit('connectionClosed', new ConnectionClosedEvent(this, connection, reason)); setTimeout(() => connection.destroy()); } @@ -269,21 +267,6 @@ class ConnectionPool extends EventEmitter { _connectionIsIdle(connection) { return !!(this.s.maxIdleTimeMS && connection.timeIdle() > this.s.maxIdleTimeMS); } - - _emitMonitoringEvent() { - // Node >=6 impl: - // _emitMonitoringEvent(Ctor, ...args) { - // const ev = new Ctor(this, ...args); - // this.emit(Ctor.eventType, ev); - // } - const args = Array.from(arguments); - const Ctor = args.shift(); - args.unshift(null, this); - const BoundCtor = Function.bind.apply(Ctor, args); - - const ev = new BoundCtor(); - this.emit(Ctor.eventType, ev); - } } module.exports = { diff --git a/lib/cmap/events.js b/lib/cmap/events.js index 4abe591f4a8..a6f99c5f9c6 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -14,14 +14,12 @@ class PoolCreatedEvent extends PoolMonitoringEvent { this.options = pool.options; } } -PoolCreatedEvent.eventType = 'connectionPoolCreated'; class PoolClosedEvent extends PoolMonitoringEvent { constructor(pool) { super('ConnectionPoolClosed', pool); } } -PoolClosedEvent.eventType = 'connectionPoolClosed'; class ConnectionCreatedEvent extends PoolMonitoringEvent { constructor(pool, connection) { @@ -29,7 +27,6 @@ class ConnectionCreatedEvent extends PoolMonitoringEvent { this.connectionId = connection.id; } } -ConnectionCreatedEvent.eventType = 'connectionCreated'; class ConnectionReadyEvent extends PoolMonitoringEvent { constructor(pool, connection) { @@ -37,7 +34,6 @@ class ConnectionReadyEvent extends PoolMonitoringEvent { this.connectionId = connection.id; } } -ConnectionReadyEvent.eventType = 'connectionReady'; class ConnectionClosedEvent extends PoolMonitoringEvent { constructor(pool, connection, reason) { @@ -46,14 +42,12 @@ class ConnectionClosedEvent extends PoolMonitoringEvent { this.reason = reason || 'unknown'; } } -ConnectionClosedEvent.eventType = 'connectionClosed'; class ConnectionCheckOutStarted extends PoolMonitoringEvent { constructor(pool) { super('ConnectionCheckOutStarted', pool); } } -ConnectionCheckOutStarted.eventType = 'connectionCheckOutStarted'; class ConnectionCheckOutFailed extends PoolMonitoringEvent { constructor(pool, reason) { @@ -61,7 +55,6 @@ class ConnectionCheckOutFailed extends PoolMonitoringEvent { this.reason = reason; } } -ConnectionCheckOutFailed.eventType = 'connectionCheckOutFailed'; class ConnectionCheckedOutEvent extends PoolMonitoringEvent { constructor(pool, connection) { @@ -69,7 +62,6 @@ class ConnectionCheckedOutEvent extends PoolMonitoringEvent { this.connectionId = connection.id; } } -ConnectionCheckedOutEvent.eventType = 'connectionCheckedOut'; class ConnectionCheckedInEvent extends PoolMonitoringEvent { constructor(pool, connection) { @@ -77,14 +69,12 @@ class ConnectionCheckedInEvent extends PoolMonitoringEvent { this.connectionId = connection.id; } } -ConnectionCheckedInEvent.eventType = 'connectionCheckedIn'; class PoolClearedEvent extends PoolMonitoringEvent { constructor(pool) { super('ConnectionPoolCleared', pool); } } -PoolClearedEvent.eventType = 'connectionPoolCleared'; module.exports = { PoolCreatedEvent, diff --git a/test/unit/cmap/spec_tests.js b/test/unit/cmap/spec_tests.js index abc92f75dc9..e78d612f588 100644 --- a/test/unit/cmap/spec_tests.js +++ b/test/unit/cmap/spec_tests.js @@ -68,12 +68,18 @@ class Connection { destroy() {} } -const events = require('../../../lib/cmap/events'); - -const ALL_EVENTS = Object.keys(events) - .map(key => events[key]) - .filter(Ctor => Ctor.eventType) - .map(Ctor => Ctor.eventType); +const ALL_EVENTS = new Set([ + 'connectionPoolCreated', + 'connectionPoolClosed', + 'connectionCreated', + 'connectionReady', + 'connectionClosed', + 'connectionCheckOutStarted', + 'connectionCheckOutFailed', + 'connectionCheckedOut', + 'connectionCheckedIn', + 'connectionPoolCleared' +]); function promisify(fn) { return function() { From 02a4f89fed7c6f83015c929192edaefcbb01eaac Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 25 Nov 2019 08:33:36 -0500 Subject: [PATCH 008/130] chore: move `makeCounter` to utils for reuse --- lib/cmap/connection_pool.js | 2 +- lib/cmap/counter.js | 12 ------------ lib/utils.js | 12 +++++++++++- 3 files changed, 12 insertions(+), 14 deletions(-) delete mode 100644 lib/cmap/counter.js diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 2df25117642..1830bbcb739 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -1,7 +1,7 @@ 'use strict'; const EventEmitter = require('events').EventEmitter; -const makeCounter = require('./counter').makeCounter; +const makeCounter = require('../util').makeCounter; const Connection = require('./connection').CMAPConnection; const WaitQueue = require('./wait_queue').WaitQueue; const ConnectionManager = require('./connection_manager').ConnectionManager; diff --git a/lib/cmap/counter.js b/lib/cmap/counter.js deleted file mode 100644 index 9a2033257ea..00000000000 --- a/lib/cmap/counter.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict'; - -function* makeCounter(seed) { - let count = seed || 0; - while (true) { - const newCount = count; - count += 1; - yield newCount; - } -} - -module.exports = { makeCounter }; diff --git a/lib/utils.js b/lib/utils.js index 7140e9ba0b1..052bcabdade 100644 --- a/lib/utils.js +++ b/lib/utils.js @@ -685,6 +685,15 @@ class MongoDBNamespace { } } +function* makeCounter(seed) { + let count = seed || 0; + while (true) { + const newCount = count; + count += 1; + yield newCount; + } +} + module.exports = { filterOptions, mergeOptions, @@ -713,5 +722,6 @@ module.exports = { SUPPORTS, MongoDBNamespace, resolveReadPreference, - emitDeprecationWarning + emitDeprecationWarning, + makeCounter }; From a89fe2a2b63b29b5a0e79274c4cbb3af60a86ab3 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 9 Dec 2019 21:57:12 -0500 Subject: [PATCH 009/130] chore: rename connection pool tests --- test/unit/cmap/{spec_tests.js => connection_pool.test.js} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/unit/cmap/{spec_tests.js => connection_pool.test.js} (100%) diff --git a/test/unit/cmap/spec_tests.js b/test/unit/cmap/connection_pool.test.js similarity index 100% rename from test/unit/cmap/spec_tests.js rename to test/unit/cmap/connection_pool.test.js From 9fe2eb111bf17c803605cd73a1316061ee632927 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 9 Dec 2019 21:57:49 -0500 Subject: [PATCH 010/130] test: reuse `loadSpecTests` helper --- test/unit/cmap/connection_pool.test.js | 397 ++++++++++++------------- 1 file changed, 187 insertions(+), 210 deletions(-) diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index e78d612f588..006aa0a4052 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -1,13 +1,14 @@ 'use strict'; -const fs = require('fs'); -const path = require('path'); -const expect = require('chai').expect; - -require('chai').use(require('../../match_spec').default); -const ConnectionPool = require('../../../lib/cmap').ConnectionPool; +const Promise = require('bluebird'); +const loadSpecTests = require('../../spec').loadSpecTests; +const ConnectionPool = require('../../../lib/cmap/connection_pool').ConnectionPool; const EventEmitter = require('events').EventEmitter; +const chai = require('chai'); +chai.use(require('../../functional/spec-runner/matcher').default); +const expect = chai.expect; + class Connection { constructor(options) { options = options || {}; @@ -68,7 +69,7 @@ class Connection { destroy() {} } -const ALL_EVENTS = new Set([ +const ALL_POOL_EVENTS = new Set([ 'connectionPoolCreated', 'connectionPoolClosed', 'connectionCreated', @@ -81,250 +82,226 @@ const ALL_EVENTS = new Set([ 'connectionPoolCleared' ]); -function promisify(fn) { - return function() { - const args = Array.from(arguments); - return new Promise((resolve, reject) => { - const cb = (err, value) => { - if (err) { - return reject(err); - } - return resolve(value); - }; - fn.apply(this, args.concat([cb])); - }); - }; -} - const PROMISIFIED_POOL_FUNCTIONS = { - checkOut: promisify(ConnectionPool.prototype.checkOut), - checkIn: promisify(ConnectionPool.prototype.checkIn), - clear: promisify(ConnectionPool.prototype.clear), - close: promisify(ConnectionPool.prototype.close) + checkOut: Promise.promisify(ConnectionPool.prototype.checkOut), + checkIn: Promise.promisify(ConnectionPool.prototype.checkIn), + clear: Promise.promisify(ConnectionPool.prototype.clear), + close: Promise.promisify(ConnectionPool.prototype.close) }; function destroyPool(pool) { - return new Promise(r => pool.destroy(r)).then(() => { - ALL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); + return new Promise(resolve => { + ALL_POOL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); + resolve(); }); } -describe('Connection Pool (spec)', function() { - const threads = new Map(); - const connections = new Map(); - const poolEvents = []; - const poolEventsEventEmitter = new EventEmitter(); - let pool = undefined; - - afterEach(() => { - const p = pool ? destroyPool(pool) : Promise.resolve(); - return p.then(() => { - pool = undefined; - threads.clear(); - connections.clear(); - poolEvents.length = 0; - poolEventsEventEmitter.removeAllListeners(); - }); - }); - - function createPool(options) { - const address = 'localhost:27017'; - options = Object.assign({}, options, { Connection, address }); - - pool = new ConnectionPool(options); - ALL_EVENTS.forEach(ev => { - pool.on(ev, x => { - poolEvents.push(x); - poolEventsEventEmitter.emit('poolEvent'); +describe('Connection Pool', function() { + describe('spec tests', function() { + const threads = new Map(); + const connections = new Map(); + const poolEvents = []; + const poolEventsEventEmitter = new EventEmitter(); + let pool = undefined; + + function createPool(options) { + const address = 'localhost:27017'; + options = Object.assign({}, options, { Connection, address }); + + pool = new ConnectionPool(options); + ALL_POOL_EVENTS.forEach(ev => { + pool.on(ev, x => { + poolEvents.push(x); + poolEventsEventEmitter.emit('poolEvent'); + }); }); - }); - } + } - function getThread(name) { - let thread = threads.get(name); - if (!thread) { - thread = new Thread(); - threads.set(name, thread); + function getThread(name) { + let thread = threads.get(name); + if (!thread) { + thread = new Thread(); + threads.set(name, thread); + } + + return thread; } - return thread; - } + const OPERATION_FUNCTIONS = { + checkOut: function(op) { + return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { + if (op.label != null) { + connections.set(op.label, connection); + } + }); + }, + checkIn: function(op) { + const connection = connections.get(op.connection); + const force = op.force; - const OPERATION_FUNCTIONS = { - checkOut: function(op) { - return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { - if (op.label != null) { - connections.set(op.label, connection); + if (!connection) { + throw new Error(`Attempted to release non-existient connection ${op.connection}`); + } + + return PROMISIFIED_POOL_FUNCTIONS.checkIn.call(pool, connection, force); + }, + clear: function() { + return PROMISIFIED_POOL_FUNCTIONS.clear.call(pool); + }, + close: function() { + return PROMISIFIED_POOL_FUNCTIONS.close.call(pool); + }, + wait: function(options) { + const ms = options.ms; + return new Promise(r => setTimeout(r, ms)); + }, + start: function(options) { + const target = options.target; + const thread = getThread(target); + thread.start(); + }, + waitForThread: function(options) { + const name = options.name; + const target = options.target; + const suppressError = options.suppressError; + + const threadObj = threads.get(target); + + if (!threadObj) { + throw new Error(`Attempted to run op ${name} on non-existent thread ${target}`); } - }); - }, - checkIn: function(op) { - const connection = connections.get(op.connection); - const force = op.force; - if (!connection) { - throw new Error(`Attempted to release non-existient connection ${op.connection}`); + return threadObj.finish().catch(e => { + if (!suppressError) { + throw e; + } + }); + }, + waitForEvent: function(options) { + const event = options.event; + const count = options.count; + return new Promise(resolve => { + function run() { + if (poolEvents.filter(ev => ev.type === event).length >= count) { + return resolve(); + } + + poolEventsEventEmitter.once('poolEvent', run); + } + run(); + }); } + }; - return PROMISIFIED_POOL_FUNCTIONS.checkIn.call(pool, connection, force); - }, - clear: function() { - return PROMISIFIED_POOL_FUNCTIONS.clear.call(pool); - }, - close: function() { - return PROMISIFIED_POOL_FUNCTIONS.close.call(pool); - }, - wait: function(options) { - const ms = options.ms; - return new Promise(r => setTimeout(r, ms)); - }, - start: function(options) { - const target = options.target; - const thread = getThread(target); - thread.start(); - }, - waitForThread: function(options) { - const name = options.name; - const target = options.target; - const suppressError = options.suppressError; - - const threadObj = threads.get(target); - - if (!threadObj) { - throw new Error(`Attempted to run op ${name} on non-existent thread ${target}`); + class Thread { + constructor() { + this._killed = false; + this._error = undefined; + this._promise = new Promise(resolve => { + this.start = () => setTimeout(resolve); + }); } - return threadObj.finish().catch(e => { - if (!suppressError) { - throw e; + run(op) { + if (this._killed || this._error) { + return; } - }); - }, - waitForEvent: function(options) { - const event = options.event; - const count = options.count; - return new Promise(resolve => { - function run() { - if (poolEvents.filter(ev => ev.type === event).length >= count) { - return resolve(); - } + this._promise = this._promise + .then(() => this._runOperation(op)) + .catch(e => (this._error = e)); + } - poolEventsEventEmitter.once('poolEvent', run); + _runOperation(op) { + const operationFn = OPERATION_FUNCTIONS[op.name]; + if (!operationFn) { + throw new Error(`Invalid command ${op.name}`); } - run(); - }); - } - }; - - class Thread { - constructor() { - this._killed = false; - this._error = undefined; - this._promise = new Promise(resolve => { - this.start = () => setTimeout(resolve); - }); - } - run(op) { - if (this._killed || this._error) { - return; + return Promise.resolve() + .then(() => operationFn(op, this)) + .then(() => new Promise(r => setTimeout(r))); } - this._promise = this._promise - .then(() => this._runOperation(op)) - .catch(e => (this._error = e)); - } - _runOperation(op) { - const operationFn = OPERATION_FUNCTIONS[op.name]; - if (!operationFn) { - throw new Error(`Invalid command ${op.name}`); + finish() { + this._killed = true; + return this._promise.then(() => { + if (this._error) { + throw this._error; + } + }); } - - return Promise.resolve() - .then(() => operationFn(op, this)) - .then(() => new Promise(r => setTimeout(r))); } - finish() { - this._killed = true; - return this._promise.then(() => { - if (this._error) { - throw this._error; - } + afterEach(() => { + const p = pool ? destroyPool(pool) : Promise.resolve(); + return p.then(() => { + pool = undefined; + threads.clear(); + connections.clear(); + poolEvents.length = 0; + poolEventsEventEmitter.removeAllListeners(); }); - } - } - - const specPath = path.join(__dirname, '../../spec/connection-monitoring-and-pooling'); - const testFiles = fs - .readdirSync(specPath) - .filter(x => x.indexOf('.json') !== -1) - .map(x => [x, fs.readFileSync(path.join(specPath, x), 'utf8')]) - .map(x => [path.basename(x[0], '.json'), JSON.parse(x[1])]) - .filter(testFile => testFile[1].style === 'unit') - .filter(testFile => testFile[1].version === 1); - - testFiles.forEach(testFile => { - const singleTest = testFile[1]; - const itFn = singleTest.only ? it.only : it; + }); - itFn(singleTest.description, function() { - const operations = singleTest.operations; - const expectedEvents = singleTest.events || []; - const ignoreEvents = singleTest.ignore || []; - const expectedError = singleTest.error; - const poolOptions = singleTest.poolOptions || {}; + loadSpecTests('connection-monitoring-and-pooling').forEach(test => { + it(test.description, function() { + const operations = test.operations; + const expectedEvents = test.events || []; + const ignoreEvents = test.ignore || []; + const expectedError = test.error; + const poolOptions = test.poolOptions || {}; - let actualError; + let actualError; - const MAIN_THREAD_KEY = Symbol('Main Thread'); - const mainThread = new Thread(); - threads.set(MAIN_THREAD_KEY, mainThread); - mainThread.start(); + const MAIN_THREAD_KEY = Symbol('Main Thread'); + const mainThread = new Thread(); + threads.set(MAIN_THREAD_KEY, mainThread); + mainThread.start(); - createPool(poolOptions); + createPool(poolOptions); - let basePromise = Promise.resolve(); + let basePromise = Promise.resolve(); - for (let idx in operations) { - const op = operations[idx]; + for (let idx in operations) { + const op = operations[idx]; - const threadKey = op.thread || MAIN_THREAD_KEY; - const thread = getThread(threadKey); + const threadKey = op.thread || MAIN_THREAD_KEY; + const thread = getThread(threadKey); - basePromise = basePromise.then(() => { - if (!thread) { - throw new Error(`Invalid thread ${threadKey}`); - } + basePromise = basePromise.then(() => { + if (!thread) { + throw new Error(`Invalid thread ${threadKey}`); + } - return Promise.resolve() - .then(() => thread.run(op)) - .then(() => new Promise(r => setTimeout(r))); - }); - } + return Promise.resolve() + .then(() => thread.run(op)) + .then(() => new Promise(r => setTimeout(r))); + }); + } - return basePromise - .then(() => mainThread.finish()) - .catch(e => (actualError = e)) - .then(() => { - const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(ev.type) < 0); - - if (expectedError) { - if (!actualError) { - expect(actualError).to.matchSpec(expectedError); - } else { - const ae = Object.assign({}, actualError, { message: actualError.message }); - expect(ae).to.matchSpec(expectedError); + return basePromise + .then(() => mainThread.finish()) + .catch(e => (actualError = e)) + .then(() => { + const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(ev.type) < 0); + + if (expectedError) { + if (!actualError) { + expect(actualError).to.matchMongoSpec(expectedError); + } else { + const ae = Object.assign({}, actualError, { message: actualError.message }); + expect(ae).to.matchMongoSpec(expectedError); + } + } else if (actualError) { + throw actualError; } - } else if (actualError) { - throw actualError; - } - expectedEvents.forEach((expected, index) => { - const actual = actualEvents[index]; - expect(actual).to.matchSpec(expected); + expectedEvents.forEach((expected, index) => { + const actual = actualEvents[index]; + expect(actual).to.matchMongoSpec(expected); + }); }); - }); + }); }); }); }); From 7e3f58e4888da5db89b2fe94657a427b432b8cad Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 9 Dec 2019 22:18:44 -0500 Subject: [PATCH 011/130] refactor: move most private implementation outside of pool class --- lib/cmap/connection_pool.js | 118 ++++++++++++++++++------------------ 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 1830bbcb739..243c1c8690f 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -1,7 +1,7 @@ 'use strict'; const EventEmitter = require('events').EventEmitter; -const makeCounter = require('../util').makeCounter; +const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; const WaitQueue = require('./wait_queue').WaitQueue; const ConnectionManager = require('./connection_manager').ConnectionManager; @@ -23,19 +23,20 @@ const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; const PoolClearedEvent = events.PoolClearedEvent; -const VALID_OPTIONS = [ +const VALID_POOL_OPTIONS = new Set([ 'maxPoolSize', 'minPoolSize', 'maxIdleTimeMS', 'waitQueueTimeoutMS', 'enableConnectionMonitoring' -]; +]); -function getSpecOptions(options) { - const newOptions = VALID_OPTIONS.reduce((obj, key) => { +function pluckSpecOptions(options) { + const newOptions = Array.from(VALID_POOL_OPTIONS).reduce((obj, key) => { if (options.hasOwnProperty(key)) { obj[key] = options[key]; } + return obj; }, {}); @@ -47,7 +48,7 @@ class ConnectionPool extends EventEmitter { super(); options = options || {}; - this.options = getSpecOptions(options); + this.options = pluckSpecOptions(options); const counter = makeCounter(1); const connections = new ConnectionManager(); @@ -84,7 +85,7 @@ class ConnectionPool extends EventEmitter { process.nextTick(() => { this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); - this._satisfyMinPoolSize(); + satisfyMinPoolSize(this); }); } @@ -98,7 +99,6 @@ class ConnectionPool extends EventEmitter { } const self = this; - this.s.waitQueue.enter(function() { const args = [callback].concat(Array.from(arguments)); self._acquisitionHandler.apply(self, args); @@ -116,7 +116,7 @@ class ConnectionPool extends EventEmitter { } const closed = this.s.isClosed; - const stale = this._connectionIsStale(connection); + const stale = connectionIsStale(this, connection); const willDestroy = !!(force || closed || stale); // Properly adjust state of connection @@ -129,7 +129,7 @@ class ConnectionPool extends EventEmitter { if (willDestroy) { const reason = force ? 'force' : closed ? 'poolClosed' : 'stale'; - this._destroyConnection(connection, reason); + destroyConnection(this, connection, reason); } callback(null); @@ -149,7 +149,7 @@ class ConnectionPool extends EventEmitter { this.s.isClosed = true; this.s.waitQueue.destroy(); while (this.availableConnectionCount) { - this._destroyConnection(this.s.connections.getAvailable(), 'poolClosed'); + destroyConnection(this, this.s.connections.getAvailable(), 'poolClosed'); } this.emit('connectionPoolClosed', new PoolClosedEvent(this)); @@ -195,58 +195,18 @@ class ConnectionPool extends EventEmitter { return callback(err, connection); } - _satisfyMinPoolSize() { - const minPoolSize = this.s.minPoolSize; - if (this.totalConnectionCount < minPoolSize) { - this._createConnection(() => this._satisfyMinPoolSize()); - } - } - _propagateError() { return; } - _createConnection(callback) { - const connection = new this.s.Connection({ - id: this.s.counter.next().value, - generation: this.s.generation, - maxIdleTimeMS: this.s.maxIdleTimeMS, - address: this.s.address - }); - - this.s.connections.add(connection); - this.s.connections.makeAvailable(connection); - this.emit('connectionCreated', new ConnectionCreatedEvent(this, connection)); - - connection.connect(err => { - if (err) { - this.s.connections.remove(connection); - return this._propagateError(err); - } - - connection.makeReadyToUse(); - this.emit('connectionReady', new ConnectionReadyEvent(this, connection)); - }); - - if (callback) { - callback(null, connection); - } - } - - _destroyConnection(connection, reason) { - this.s.connections.remove(connection); - this.emit('connectionClosed', new ConnectionClosedEvent(this, connection, reason)); - setTimeout(() => connection.destroy()); - } - _tryToGetConnection(callback) { const maxPoolSize = this.s.maxPoolSize; if (this.availableConnectionCount) { const connection = this.s.connections.getAvailable(); - const isStale = this._connectionIsStale(connection); - const isIdle = this._connectionIsIdle(connection); + const isStale = connectionIsStale(this, connection); + const isIdle = connectionIsIdle(this, connection); if (isStale || isIdle) { - this._destroyConnection(connection, isStale ? 'stale' : 'idle'); + destroyConnection(this, connection, isStale ? 'stale' : 'idle'); return setTimeout(() => this._tryToGetConnection(callback)); } @@ -254,21 +214,61 @@ class ConnectionPool extends EventEmitter { } if (maxPoolSize <= 0 || this.totalConnectionCount < maxPoolSize) { - return this._createConnection(() => this._tryToGetConnection(callback)); + return createConnection(this, () => this._tryToGetConnection(callback)); } return callback(null, null); } +} - _connectionIsStale(connection) { - return connection.generation !== this.s.generation; +function satisfyMinPoolSize(pool) { + const minPoolSize = pool.s.minPoolSize; + if (pool.totalConnectionCount < minPoolSize) { + createConnection(pool, () => satisfyMinPoolSize(pool)); } +} + +function connectionIsStale(pool, connection) { + return connection.generation !== pool.s.generation; +} - _connectionIsIdle(connection) { - return !!(this.s.maxIdleTimeMS && connection.timeIdle() > this.s.maxIdleTimeMS); +function connectionIsIdle(pool, connection) { + return !!(pool.s.maxIdleTimeMS && connection.timeIdle() > pool.s.maxIdleTimeMS); +} + +function createConnection(pool, callback) { + const connection = new pool.s.Connection({ + id: pool.s.counter.next().value, + generation: pool.s.generation, + maxIdleTimeMS: pool.s.maxIdleTimeMS, + address: pool.s.address + }); + + pool.s.connections.add(connection); + pool.s.connections.makeAvailable(connection); + pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); + + connection.connect(err => { + if (err) { + pool.s.connections.remove(connection); + return pool._propagateError(err); + } + + connection.makeReadyToUse(); + pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); + }); + + if (callback) { + callback(null, connection); } } +function destroyConnection(pool, connection, reason) { + pool.s.connections.remove(connection); + pool.emit('connectionClosed', new ConnectionClosedEvent(pool, connection, reason)); + setTimeout(() => connection.destroy()); +} + module.exports = { ConnectionPool }; From 43e7d1d5486d3efb2cf23f04b8c3b45b9240ed76 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 9 Dec 2019 22:19:25 -0500 Subject: [PATCH 012/130] chore: remove index for cmap module --- lib/cmap/index.js | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 lib/cmap/index.js diff --git a/lib/cmap/index.js b/lib/cmap/index.js deleted file mode 100644 index 35e961382ee..00000000000 --- a/lib/cmap/index.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; - -const ConnectionPool = require('./connection_pool').ConnectionPool; - -module.exports = { ConnectionPool }; From 32710792a7c1d4a71aebc0700f161892d7e46635 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 10 Dec 2019 12:19:41 -0500 Subject: [PATCH 013/130] refactor: merge `ConnectionManager` into `ConnectionPool` --- lib/cmap/connection_manager.js | 45 ------------------------------- lib/cmap/connection_pool.js | 48 ++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 64 deletions(-) delete mode 100644 lib/cmap/connection_manager.js diff --git a/lib/cmap/connection_manager.js b/lib/cmap/connection_manager.js deleted file mode 100644 index 1fed9433ed6..00000000000 --- a/lib/cmap/connection_manager.js +++ /dev/null @@ -1,45 +0,0 @@ -'use strict'; - -class ConnectionManager { - constructor() { - this._totalConnections = new Set(); - this._availableConnections = new Set(); - } - - get totalConnectionCount() { - return this._totalConnections.size; - } - - get availableConnectionCount() { - return this._availableConnections.size; - } - - add(connection) { - this._totalConnections.add(connection); - } - - has(connection) { - return this._totalConnections.has(connection); - } - - remove(connections) { - this._availableConnections.delete(connections); - this._totalConnections.delete(connections); - } - - makeAvailable(connection) { - this._availableConnections.add(connection); - } - - markInUse(connection) { - this._availableConnections.delete(connection); - } - - getAvailable() { - const connection = this._availableConnections.values().next().value; - this._availableConnections.delete(connection); - return connection; - } -} - -module.exports = { ConnectionManager }; diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 243c1c8690f..e249da60d93 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -4,7 +4,6 @@ const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; const WaitQueue = require('./wait_queue').WaitQueue; -const ConnectionManager = require('./connection_manager').ConnectionManager; const errors = require('./errors'); const PoolClosedError = errors.PoolClosedError; @@ -23,6 +22,9 @@ const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; const PoolClearedEvent = events.PoolClearedEvent; +const kAvailable = Symbol('available'); +const kConnections = Symbol('connections'); + const VALID_POOL_OPTIONS = new Set([ 'maxPoolSize', 'minPoolSize', @@ -31,6 +33,12 @@ const VALID_POOL_OPTIONS = new Set([ 'enableConnectionMonitoring' ]); +function take(set) { + const connection = set.values().next().value; + set.delete(connection); + return connection; +} + function pluckSpecOptions(options) { const newOptions = Array.from(VALID_POOL_OPTIONS).reduce((obj, key) => { if (options.hasOwnProperty(key)) { @@ -51,20 +59,19 @@ class ConnectionPool extends EventEmitter { this.options = pluckSpecOptions(options); const counter = makeCounter(1); - const connections = new ConnectionManager(); const waitQueue = new WaitQueue({ pool: this, waitQueueTimeoutMS: typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0 }); + this[kConnections] = new Set(); + this[kAvailable] = new Set(); + this.s = { // Wait queue that handles queueing for connections waitQueue, - // Connection Manager that handles state of various connections - connections, - // Counter that increments for each new connection. counter, @@ -111,7 +118,7 @@ class ConnectionPool extends EventEmitter { force = false; } - if (!this.s.connections.has(connection)) { + if (!this[kConnections].has(connection)) { return callback(new PoolReleaseForeignConnectionError(this, connection)); } @@ -122,7 +129,7 @@ class ConnectionPool extends EventEmitter { // Properly adjust state of connection if (!willDestroy) { connection.makeReadyToUse(); - this.s.connections.makeAvailable(connection); + this[kAvailable].add(connection); } this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); @@ -148,9 +155,11 @@ class ConnectionPool extends EventEmitter { this.s.isClosed = true; this.s.waitQueue.destroy(); - while (this.availableConnectionCount) { - destroyConnection(this, this.s.connections.getAvailable(), 'poolClosed'); - } + + this[kAvailable].forEach(conn => { + destroyConnection(this, conn, 'poolClosed'); + }); + this[kAvailable].clear(); this.emit('connectionPoolClosed', new PoolClosedEvent(this)); callback(); @@ -167,11 +176,11 @@ class ConnectionPool extends EventEmitter { // Accessors required by spec get totalConnectionCount() { - return this.s.connections.totalConnectionCount; + return this[kConnections].size; } get availableConnectionCount() { - return this.s.connections.availableConnectionCount; + return this[kAvailable].size; } get address() { @@ -181,7 +190,7 @@ class ConnectionPool extends EventEmitter { // Private Helpers _acquisitionHandler(callback, err, connection) { if (!err) { - this.s.connections.markInUse(connection); + this[kAvailable].delete(connection); this.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(this, connection)); return callback(null, connection); } @@ -202,7 +211,7 @@ class ConnectionPool extends EventEmitter { _tryToGetConnection(callback) { const maxPoolSize = this.s.maxPoolSize; if (this.availableConnectionCount) { - const connection = this.s.connections.getAvailable(); + const connection = take(this[kAvailable]); const isStale = connectionIsStale(this, connection); const isIdle = connectionIsIdle(this, connection); if (isStale || isIdle) { @@ -244,13 +253,13 @@ function createConnection(pool, callback) { address: pool.s.address }); - pool.s.connections.add(connection); - pool.s.connections.makeAvailable(connection); + pool[kConnections].add(connection); + pool[kAvailable].add(connection); pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); connection.connect(err => { if (err) { - pool.s.connections.remove(connection); + pool[kConnections].delete(connection); return pool._propagateError(err); } @@ -264,9 +273,10 @@ function createConnection(pool, callback) { } function destroyConnection(pool, connection, reason) { - pool.s.connections.remove(connection); + pool[kConnections].delete(connection); pool.emit('connectionClosed', new ConnectionClosedEvent(pool, connection, reason)); - setTimeout(() => connection.destroy()); + + process.nextTick(() => connection.destroy()); } module.exports = { From 40821ad1c69d1c196a963d2cba007ba04c7eabdb Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 10 Dec 2019 19:18:50 -0500 Subject: [PATCH 014/130] refactor: run ensureMinPoolSize in a background loop Ensuring the minimum pool size should be run periodically to guarantee there are always a minimum number of connections to work with. --- lib/cmap/connection_pool.js | 17 +++++++++++++---- test/unit/cmap/connection_pool.test.js | 1 + 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index e249da60d93..2a5da1f774a 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -24,6 +24,7 @@ const PoolClearedEvent = events.PoolClearedEvent; const kAvailable = Symbol('available'); const kConnections = Symbol('connections'); +const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); const VALID_POOL_OPTIONS = new Set([ 'maxPoolSize', @@ -67,6 +68,7 @@ class ConnectionPool extends EventEmitter { this[kConnections] = new Set(); this[kAvailable] = new Set(); + this[kMinPoolSizeTimer] = undefined; this.s = { // Wait queue that handles queueing for connections @@ -92,7 +94,7 @@ class ConnectionPool extends EventEmitter { process.nextTick(() => { this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); - satisfyMinPoolSize(this); + ensureMinPoolSize(this); }); } @@ -153,6 +155,10 @@ class ConnectionPool extends EventEmitter { return callback(); } + if (this[kMinPoolSizeTimer]) { + clearTimeout(this[kMinPoolSizeTimer]); + } + this.s.isClosed = true; this.s.waitQueue.destroy(); @@ -170,6 +176,7 @@ class ConnectionPool extends EventEmitter { if (typeof this.s.counter.return === 'function') { this.s.counter.return(); } + callback(); }); } @@ -230,11 +237,13 @@ class ConnectionPool extends EventEmitter { } } -function satisfyMinPoolSize(pool) { +function ensureMinPoolSize(pool) { const minPoolSize = pool.s.minPoolSize; - if (pool.totalConnectionCount < minPoolSize) { - createConnection(pool, () => satisfyMinPoolSize(pool)); + for (let i = pool.totalConnectionCount; i < minPoolSize; ++i) { + createConnection(pool); } + + pool[kMinPoolSizeTimer] = setTimeout(() => ensureMinPoolSize(pool), 1000); } function connectionIsStale(pool, connection) { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 006aa0a4052..669c76037f4 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -92,6 +92,7 @@ const PROMISIFIED_POOL_FUNCTIONS = { function destroyPool(pool) { return new Promise(resolve => { ALL_POOL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); + pool.destroy(resolve); resolve(); }); } From a6e0ceedef51c3410f8b683e4eb0907a439f06f0 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 10 Dec 2019 19:26:53 -0500 Subject: [PATCH 015/130] refactor: `s.isClosed` => `closed` --- lib/cmap/connection_pool.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 2a5da1f774a..125e3925790 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -69,6 +69,7 @@ class ConnectionPool extends EventEmitter { this[kConnections] = new Set(); this[kAvailable] = new Set(); this[kMinPoolSizeTimer] = undefined; + this.closed = false; this.s = { // Wait queue that handles queueing for connections @@ -88,7 +89,6 @@ class ConnectionPool extends EventEmitter { // State variables that do not fall into any other category pid: process.pid, generation: 0, - isClosed: false, address: options.address }; @@ -102,7 +102,7 @@ class ConnectionPool extends EventEmitter { checkOut(callback) { this.emit('connectionCheckOutStarted', new ConnectionCheckOutStarted(this)); - if (this.s.isClosed) { + if (this.closed) { this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, 'poolClosed')); return callback(new PoolClosedError(this)); } @@ -124,7 +124,7 @@ class ConnectionPool extends EventEmitter { return callback(new PoolReleaseForeignConnectionError(this, connection)); } - const closed = this.s.isClosed; + const closed = this.closed; const stale = connectionIsStale(this, connection); const willDestroy = !!(force || closed || stale); @@ -151,7 +151,7 @@ class ConnectionPool extends EventEmitter { } close(callback) { - if (this.s.isClosed) { + if (this.closed) { return callback(); } @@ -159,7 +159,7 @@ class ConnectionPool extends EventEmitter { clearTimeout(this[kMinPoolSizeTimer]); } - this.s.isClosed = true; + this.closed = true; this.s.waitQueue.destroy(); this[kAvailable].forEach(conn => { From 43eb2dbbc600e86632e73a57c8ea19ff0933bde7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 09:38:19 -0500 Subject: [PATCH 016/130] refactor: use events instead of custom wait queue implementation --- lib/cmap/connection_pool.js | 119 +++++++++++++++++---------------- lib/cmap/wait_queue.js | 130 ------------------------------------ 2 files changed, 63 insertions(+), 186 deletions(-) delete mode 100644 lib/cmap/wait_queue.js diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 125e3925790..c8ad0906c2c 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -3,7 +3,11 @@ const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; -const WaitQueue = require('./wait_queue').WaitQueue; +const calculateDurationInMs = require('../core/utils').calculateDurationInMs; + +const common = require('../core/sdam/common'); +const drainTimerQueue = common.drainTimerQueue; +const clearAndRemoveTimerFrom = common.clearAndRemoveTimerFrom; const errors = require('./errors'); const PoolClosedError = errors.PoolClosedError; @@ -25,6 +29,7 @@ const PoolClearedEvent = events.PoolClearedEvent; const kAvailable = Symbol('available'); const kConnections = Symbol('connections'); const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); +const kAcquireTimers = Symbol('acquireTimers'); const VALID_POOL_OPTIONS = new Set([ 'maxPoolSize', @@ -60,21 +65,13 @@ class ConnectionPool extends EventEmitter { this.options = pluckSpecOptions(options); const counter = makeCounter(1); - const waitQueue = new WaitQueue({ - pool: this, - waitQueueTimeoutMS: - typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0 - }); - this[kConnections] = new Set(); this[kAvailable] = new Set(); this[kMinPoolSizeTimer] = undefined; + this[kAcquireTimers] = new Set(); this.closed = false; this.s = { - // Wait queue that handles queueing for connections - waitQueue, - // Counter that increments for each new connection. counter, @@ -82,6 +79,8 @@ class ConnectionPool extends EventEmitter { maxPoolSize: typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100, minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, + waitQueueTimeoutMS: + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000, // Allows us to override the Connection constructor for testing purposes Connection: options.Connection || Connection, @@ -104,14 +103,60 @@ class ConnectionPool extends EventEmitter { if (this.closed) { this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, 'poolClosed')); - return callback(new PoolClosedError(this)); + callback(new PoolClosedError(this)); + return; } - const self = this; - this.s.waitQueue.enter(function() { - const args = [callback].concat(Array.from(arguments)); - self._acquisitionHandler.apply(self, args); - }); + const pool = this; + const maxPoolSize = this.s.maxPoolSize; + const waitQueueTimeoutMS = this.s.waitQueueTimeoutMS; + + function attemptAcquire(start) { + const duration = calculateDurationInMs(start); + if (duration >= waitQueueTimeoutMS) { + callback(new WaitQueueTimeoutError(pool)); + return; + } + + while (pool.availableConnectionCount > 0) { + const connection = take(pool[kAvailable]); + const isStale = connectionIsStale(pool, connection); + const isIdle = connectionIsIdle(pool, connection); + if (!isStale && !isIdle) { + pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); + callback(null, connection); + return; + } + + destroyConnection(pool, connection, isStale ? 'stale' : 'idle'); + } + + if (maxPoolSize <= 0 || pool.totalConnectionCount < maxPoolSize) { + createConnection(pool); + } + + const retryAcquire = () => { + pool.removeListener('connectionReady', retryAcquire); + pool.removeListener('connectionCheckedIn', retryAcquire); + + clearAndRemoveTimerFrom(acquireTimer, pool[kAcquireTimers]); + attemptAcquire(start); + }; + + const acquireTimer = setTimeout(() => { + pool.removeListener('connectionReady', retryAcquire); + pool.removeListener('connectionCheckedIn', retryAcquire); + + pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(pool, 'timeout')); + callback(new WaitQueueTimeoutError(pool)); + }, waitQueueTimeoutMS - duration); + + pool[kAcquireTimers].add(acquireTimer); + pool.once('connectionReady', retryAcquire); + pool.once('connectionCheckedIn', retryAcquire); + } + + attemptAcquire(process.hrtime()); } checkIn(connection, force, callback) { @@ -155,13 +200,12 @@ class ConnectionPool extends EventEmitter { return callback(); } + drainTimerQueue(this[kAcquireTimers]); if (this[kMinPoolSizeTimer]) { clearTimeout(this[kMinPoolSizeTimer]); } this.closed = true; - this.s.waitQueue.destroy(); - this[kAvailable].forEach(conn => { destroyConnection(this, conn, 'poolClosed'); }); @@ -195,46 +239,9 @@ class ConnectionPool extends EventEmitter { } // Private Helpers - _acquisitionHandler(callback, err, connection) { - if (!err) { - this[kAvailable].delete(connection); - this.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(this, connection)); - return callback(null, connection); - } - - let reason = 'unknown'; - if (err instanceof WaitQueueTimeoutError) { - reason = 'timeout'; - } - - this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, reason)); - return callback(err, connection); - } - _propagateError() { return; } - - _tryToGetConnection(callback) { - const maxPoolSize = this.s.maxPoolSize; - if (this.availableConnectionCount) { - const connection = take(this[kAvailable]); - const isStale = connectionIsStale(this, connection); - const isIdle = connectionIsIdle(this, connection); - if (isStale || isIdle) { - destroyConnection(this, connection, isStale ? 'stale' : 'idle'); - return setTimeout(() => this._tryToGetConnection(callback)); - } - - return callback(null, connection); - } - - if (maxPoolSize <= 0 || this.totalConnectionCount < maxPoolSize) { - return createConnection(this, () => this._tryToGetConnection(callback)); - } - - return callback(null, null); - } } function ensureMinPoolSize(pool) { @@ -243,7 +250,7 @@ function ensureMinPoolSize(pool) { createConnection(pool); } - pool[kMinPoolSizeTimer] = setTimeout(() => ensureMinPoolSize(pool), 1000); + pool[kMinPoolSizeTimer] = setTimeout(() => ensureMinPoolSize(pool), 10); } function connectionIsStale(pool, connection) { diff --git a/lib/cmap/wait_queue.js b/lib/cmap/wait_queue.js deleted file mode 100644 index 742feb0e67c..00000000000 --- a/lib/cmap/wait_queue.js +++ /dev/null @@ -1,130 +0,0 @@ -'use strict'; - -const errors = require('./errors'); -const PoolClosedError = errors.PoolClosedError; -const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; - -class WaitQueueMember { - constructor(callback) { - this.callback = callback; - this.finished = false; - this.timeout = null; - } - - _finish(err, ret) { - if (!this.finished) { - this.finished = true; - process.nextTick(() => this.callback.call(null, err, ret)); - } - - if (this.timeout) { - clearTimeout(this.timeout); - } - } - - success(connection) { - this._finish(null, connection); - } - - failure(err) { - this._finish(err); - } - - setTimeout(cb, ms) { - this.timeout = setTimeout(cb, ms); - } -} - -class WaitQueue { - constructor(options) { - this._destroyed = false; - - this.timeoutMS = - typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0; - this.periodMS = options.waitQueuePeriodMS || 10; - - this._pool = options.pool; - this._queue = []; - this._timeout = null; - } - - // Returns true if managed to enter wait queue - enter(callback) { - const item = new WaitQueueMember(callback); - this._queue.push(item); - if (this.timeoutMS > 0) { - item.setTimeout(() => this._timeoutHandler(item), this.timeoutMS); - } - - this._start(); - - return true; - } - - destroy() { - this._destroyed = true; - this._stop(); - this._clear(); - this._queue = undefined; - this._pool = undefined; - } - - _timeoutHandler(item) { - if (!item.finished) { - this._queue.splice(this._queue.indexOf(item), 1); - item.failure(new WaitQueueTimeoutError(this._pool)); - } - } - - _clear() { - while (this._queue && this._queue.length) { - const item = this._queue.shift(); - item.failure(new PoolClosedError(this._pool)); - } - } - - _start() { - if (!this._timeout) { - this._timeout = setTimeout(() => this._run()); - } - } - - _stop() { - if (this._timeout) { - clearTimeout(this._timeout); - this._timeout = undefined; - } - } - - _run() { - // If we're closed, destroy entire wait queue - if (this._destroyed) { - this._clear(); - } - - if (!(this._queue && this._queue.length)) { - return this._stop(); - } - - const item = this._queue.shift(); - if (item.finished) { - return setTimeout(() => this._run()); - } - - this._pool._tryToGetConnection((err, connection) => { - setTimeout(() => this._run()); - if (connection) { - connection.waitUntilConnect(err => { - if (err) { - return item.failure(connection); - } - item.success(connection); - }); - } else { - this._queue.unshift(item); - } - }); - } -} - -module.exports = { WaitQueue }; From 379c869ae688eda4b3011c9a5caa296b1eedaec6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 10:56:57 -0500 Subject: [PATCH 017/130] refactor: use a single deque for managing pool connections --- lib/cmap/connection_pool.js | 38 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index c8ad0906c2c..f109777e11c 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -1,5 +1,6 @@ 'use strict'; +const Denque = require('denque'); const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; @@ -12,7 +13,6 @@ const clearAndRemoveTimerFrom = common.clearAndRemoveTimerFrom; const errors = require('./errors'); const PoolClosedError = errors.PoolClosedError; const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; -const PoolReleaseForeignConnectionError = errors.PoolReleaseForeignConnectionError; const events = require('./events'); const PoolCreatedEvent = events.PoolCreatedEvent; @@ -26,8 +26,8 @@ const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; const PoolClearedEvent = events.PoolClearedEvent; -const kAvailable = Symbol('available'); const kConnections = Symbol('connections'); +const kPermits = Symbol('permits'); const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); const kAcquireTimers = Symbol('acquireTimers'); @@ -39,12 +39,6 @@ const VALID_POOL_OPTIONS = new Set([ 'enableConnectionMonitoring' ]); -function take(set) { - const connection = set.values().next().value; - set.delete(connection); - return connection; -} - function pluckSpecOptions(options) { const newOptions = Array.from(VALID_POOL_OPTIONS).reduce((obj, key) => { if (options.hasOwnProperty(key)) { @@ -65,8 +59,8 @@ class ConnectionPool extends EventEmitter { this.options = pluckSpecOptions(options); const counter = makeCounter(1); - this[kConnections] = new Set(); - this[kAvailable] = new Set(); + this[kConnections] = new Denque(); + this[kPermits] = typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100; this[kMinPoolSizeTimer] = undefined; this[kAcquireTimers] = new Set(); this.closed = false; @@ -119,7 +113,7 @@ class ConnectionPool extends EventEmitter { } while (pool.availableConnectionCount > 0) { - const connection = take(pool[kAvailable]); + const connection = pool[kConnections].pop(); const isStale = connectionIsStale(pool, connection); const isIdle = connectionIsIdle(pool, connection); if (!isStale && !isIdle) { @@ -165,10 +159,6 @@ class ConnectionPool extends EventEmitter { force = false; } - if (!this[kConnections].has(connection)) { - return callback(new PoolReleaseForeignConnectionError(this, connection)); - } - const closed = this.closed; const stale = connectionIsStale(this, connection); const willDestroy = !!(force || closed || stale); @@ -176,7 +166,7 @@ class ConnectionPool extends EventEmitter { // Properly adjust state of connection if (!willDestroy) { connection.makeReadyToUse(); - this[kAvailable].add(connection); + this[kConnections].push(connection); } this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); @@ -206,10 +196,10 @@ class ConnectionPool extends EventEmitter { } this.closed = true; - this[kAvailable].forEach(conn => { + this[kConnections].toArray().forEach(conn => { destroyConnection(this, conn, 'poolClosed'); }); - this[kAvailable].clear(); + this[kConnections].clear(); this.emit('connectionPoolClosed', new PoolClosedEvent(this)); callback(); @@ -227,11 +217,11 @@ class ConnectionPool extends EventEmitter { // Accessors required by spec get totalConnectionCount() { - return this[kConnections].size; + return this[kConnections].length + (this.s.maxPoolSize - this[kPermits]); } get availableConnectionCount() { - return this[kAvailable].size; + return this[kConnections].length; } get address() { @@ -269,16 +259,16 @@ function createConnection(pool, callback) { address: pool.s.address }); - pool[kConnections].add(connection); - pool[kAvailable].add(connection); + pool[kPermits]--; pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); connection.connect(err => { if (err) { - pool[kConnections].delete(connection); + pool[kPermits]++; return pool._propagateError(err); } + pool[kConnections].push(connection); connection.makeReadyToUse(); pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); }); @@ -289,9 +279,7 @@ function createConnection(pool, callback) { } function destroyConnection(pool, connection, reason) { - pool[kConnections].delete(connection); pool.emit('connectionClosed', new ConnectionClosedEvent(pool, connection, reason)); - process.nextTick(() => connection.destroy()); } From d48f85f1fbda1b75e1ed67495a873969e284c6fe Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 11:57:15 -0500 Subject: [PATCH 018/130] refactor: use `eachAsync` for pool close --- lib/cmap/connection_pool.js | 27 +++++++++++++++++++------- test/unit/cmap/connection_pool.test.js | 11 ++++++++++- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index f109777e11c..514dd7a45c2 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -5,6 +5,7 @@ const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; const calculateDurationInMs = require('../core/utils').calculateDurationInMs; +const eachAsync = require('../core/utils').eachAsync; const common = require('../core/sdam/common'); const drainTimerQueue = common.drainTimerQueue; @@ -185,7 +186,12 @@ class ConnectionPool extends EventEmitter { callback(); } - close(callback) { + close(options, callback) { + if (typeof options === 'function') { + callback = options; + } + + options = Object.assign({ force: false }, options); if (this.closed) { return callback(); } @@ -195,14 +201,21 @@ class ConnectionPool extends EventEmitter { clearTimeout(this[kMinPoolSizeTimer]); } + // mark the pool as closed immediately this.closed = true; - this[kConnections].toArray().forEach(conn => { - destroyConnection(this, conn, 'poolClosed'); - }); - this[kConnections].clear(); - this.emit('connectionPoolClosed', new PoolClosedEvent(this)); - callback(); + eachAsync( + this[kConnections].toArray(), + (conn, cb) => { + this.emit('connectionClosed', new ConnectionClosedEvent(this, conn, 'poolClosed')); + conn.destroy(options, cb); + }, + err => { + this[kConnections].clear(); + this.emit('connectionPoolClosed', new PoolClosedEvent(this)); + callback(err); + } + ); } destroy(callback) { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 669c76037f4..84a6aa6af30 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -66,7 +66,16 @@ class Connection { }); } - destroy() {} + destroy(options, callback) { + if (typeof options === 'function') { + callback = options; + options = {}; + } + + if (typeof callback === 'function') { + callback(); + } + } } const ALL_POOL_EVENTS = new Set([ From db3411804b21455a5cb38856aa21d408d5f6de71 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 12:00:21 -0500 Subject: [PATCH 019/130] chore: remove crufty `CMAPConnection` --- lib/cmap/connection.js | 63 +----------------------------------------- 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 546e8b08f9e..b2adf6e5570 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -217,67 +217,6 @@ function write(command, options, callback) { this[kMessageStream].writeCommand(command, operationDescription); } -class CMAPConnection { - constructor(options) { - options = options || {}; - this.generation = options.generation; - this.id = options.id; - this.maxIdleTimeMS = options.maxIdleTimeMS; - this.poolId = options.poolId; - this.address = options.address; - this.readyToUse = false; - this.lastMadeAvailable = undefined; - this.callbacks = []; - } - - get metadata() { - return { - id: this.id, - generation: this.generation, - poolId: this.poolId, - address: this.adress - }; - } - - timeIdle() { - return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; - } - - write(callback) { - setTimeout(() => callback()); - } - - makeReadyToUse() { - this.readyToUse = true; - this.lastMadeAvailable = Date.now(); - } - - makeInUse() { - this.readyToUse = false; - this.lastMadeAvailable = undefined; - } - - waitUntilConnect(callback) { - if (this.readyToUse) { - return callback(null, this); - } - - this.callbacks.push(callback); - } - - connect(callback) { - this.callbacks.push(callback); - setTimeout(() => { - this.makeReadyToUse(); - this.callbacks.forEach(c => c(null, this)); - this.callbacks = []; - }); - } - - destroy() {} -} - module.exports = { - Connection, - CMAPConnection + Connection }; From d0fbe81f65eeceffec54b5fc83456d4db91c6418 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 12:16:56 -0500 Subject: [PATCH 020/130] refactor: simplify required api of `Connection` type --- test/unit/cmap/connection_pool.test.js | 40 ++------------------------ 1 file changed, 3 insertions(+), 37 deletions(-) diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 84a6aa6af30..af9f87277a7 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -12,58 +12,24 @@ const expect = chai.expect; class Connection { constructor(options) { options = options || {}; - this.generation = options.generation; + this.id = options.id; - this.maxIdleTimeMS = options.maxIdleTimeMS; - this.poolId = options.poolId; - this.address = options.address; + this.generation = options.generation; this.readyToUse = false; this.lastMadeAvailable = undefined; - this.callbacks = []; - } - - get metadata() { - return { - id: this.id, - generation: this.generation, - poolId: this.poolId, - address: this.adress - }; } timeIdle() { return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; } - write(callback) { - setTimeout(() => callback()); - } - makeReadyToUse() { this.readyToUse = true; this.lastMadeAvailable = Date.now(); } - makeInUse() { - this.readyToUse = false; - this.lastMadeAvailable = undefined; - } - - waitUntilConnect(callback) { - if (this.readyToUse) { - return callback(null, this); - } - - this.callbacks.push(callback); - } - connect(callback) { - this.callbacks.push(callback); - setTimeout(() => { - this.makeReadyToUse(); - this.callbacks.forEach(c => c(null, this)); - this.callbacks = []; - }); + setTimeout(() => callback()); } destroy(options, callback) { From 612d6bb815f3b246eac7620ab493342331f987d7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 12:56:18 -0500 Subject: [PATCH 021/130] refactor: run CMAP spec tests against a mock server, use `connect` --- lib/cmap/connection_pool.js | 57 +++++++++++++++----------- test/unit/cmap/connection_pool.test.js | 49 +++++++++++++--------- 2 files changed, 63 insertions(+), 43 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 514dd7a45c2..9e5c4a203e9 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -6,6 +6,7 @@ const makeCounter = require('../utils').makeCounter; const Connection = require('./connection').CMAPConnection; const calculateDurationInMs = require('../core/utils').calculateDurationInMs; const eachAsync = require('../core/utils').eachAsync; +const connect = require('../core/connection/connect'); const common = require('../core/sdam/common'); const drainTimerQueue = common.drainTimerQueue; @@ -31,13 +32,20 @@ const kConnections = Symbol('connections'); const kPermits = Symbol('permits'); const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); const kAcquireTimers = Symbol('acquireTimers'); +const kGeneration = Symbol('generation'); const VALID_POOL_OPTIONS = new Set([ + // `connect` options + 'host', + 'port', + 'bson', + 'connectionType', + + // spec options 'maxPoolSize', 'minPoolSize', 'maxIdleTimeMS', - 'waitQueueTimeoutMS', - 'enableConnectionMonitoring' + 'waitQueueTimeoutMS' ]); function pluckSpecOptions(options) { @@ -64,6 +72,7 @@ class ConnectionPool extends EventEmitter { this[kPermits] = typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100; this[kMinPoolSizeTimer] = undefined; this[kAcquireTimers] = new Set(); + this[kGeneration] = 0; this.closed = false; this.s = { @@ -77,12 +86,7 @@ class ConnectionPool extends EventEmitter { waitQueueTimeoutMS: typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000, - // Allows us to override the Connection constructor for testing purposes - Connection: options.Connection || Connection, - // State variables that do not fall into any other category - pid: process.pid, - generation: 0, address: options.address }; @@ -181,7 +185,7 @@ class ConnectionPool extends EventEmitter { } clear(callback) { - this.s.generation += 1; + this[kGeneration] += 1; this.emit('connectionPoolCleared', new PoolClearedEvent(this)); callback(); } @@ -257,7 +261,7 @@ function ensureMinPoolSize(pool) { } function connectionIsStale(pool, connection) { - return connection.generation !== pool.s.generation; + return connection.generation !== pool[kGeneration]; } function connectionIsIdle(pool, connection) { @@ -265,30 +269,37 @@ function connectionIsIdle(pool, connection) { } function createConnection(pool, callback) { - const connection = new pool.s.Connection({ - id: pool.s.counter.next().value, - generation: pool.s.generation, - maxIdleTimeMS: pool.s.maxIdleTimeMS, - address: pool.s.address - }); + const connectOptions = Object.assign( + { + id: pool.s.counter.next().value, + generation: pool[kGeneration], + connectionType: pool.s.Connection + }, + pool.options + ); pool[kPermits]--; - pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); - - connection.connect(err => { + connect(connectOptions, (err, connection) => { if (err) { pool[kPermits]++; - return pool._propagateError(err); + pool._propagateError(err); + if (typeof callback === 'function') { + callback(err); + } + + return; } + pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); + pool[kConnections].push(connection); connection.makeReadyToUse(); pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); - }); - if (callback) { - callback(null, connection); - } + if (typeof callback === 'function') { + callback(null, connection); + } + }); } function destroyConnection(pool, connection, reason) { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index af9f87277a7..7a109f0fa29 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -3,15 +3,18 @@ const Promise = require('bluebird'); const loadSpecTests = require('../../spec').loadSpecTests; const ConnectionPool = require('../../../lib/cmap/connection_pool').ConnectionPool; +const Connection = require('../../../lib/cmap/connection').Connection; const EventEmitter = require('events').EventEmitter; +const mock = require('mongodb-mock-server'); +const BSON = require('bson'); const chai = require('chai'); chai.use(require('../../functional/spec-runner/matcher').default); const expect = chai.expect; -class Connection { - constructor(options) { - options = options || {}; +class MockConnection extends Connection { + constructor(stream, options) { + super(stream, options); this.id = options.id; this.generation = options.generation; @@ -27,21 +30,6 @@ class Connection { this.readyToUse = true; this.lastMadeAvailable = Date.now(); } - - connect(callback) { - setTimeout(() => callback()); - } - - destroy(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - if (typeof callback === 'function') { - callback(); - } - } } const ALL_POOL_EVENTS = new Set([ @@ -73,6 +61,12 @@ function destroyPool(pool) { } describe('Connection Pool', function() { + let server; + after(() => mock.cleanup()); + before(() => { + mock.createServer().then(s => (server = s)); + }); + describe('spec tests', function() { const threads = new Map(); const connections = new Map(); @@ -81,8 +75,12 @@ describe('Connection Pool', function() { let pool = undefined; function createPool(options) { - const address = 'localhost:27017'; - options = Object.assign({}, options, { Connection, address }); + options = Object.assign( + {}, + options, + { connectionType: MockConnection, bson: new BSON() }, + server.address() + ); pool = new ConnectionPool(options); ALL_POOL_EVENTS.forEach(ev => { @@ -208,6 +206,17 @@ describe('Connection Pool', function() { } } + before(() => { + // we aren't testing errors yet, so it's fine for the mock server to just accept + // and establish valid connections + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + }); + afterEach(() => { const p = pool ? destroyPool(pool) : Promise.resolve(); return p.then(() => { From f325a07594c46af7003a14fd8668b5efa8015772 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 13:08:20 -0500 Subject: [PATCH 022/130] refator: remove `s` member, transition all to `option` + symbols --- lib/cmap/connection_pool.js | 68 ++++++++++---------------- test/unit/cmap/connection_pool.test.js | 7 ++- 2 files changed, 30 insertions(+), 45 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 9e5c4a203e9..bcc88cd5981 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -3,7 +3,7 @@ const Denque = require('denque'); const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; -const Connection = require('./connection').CMAPConnection; +const Connection = require('./connection').Connection; const calculateDurationInMs = require('../core/utils').calculateDurationInMs; const eachAsync = require('../core/utils').eachAsync; const connect = require('../core/connection/connect'); @@ -33,6 +33,7 @@ const kPermits = Symbol('permits'); const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); const kAcquireTimers = Symbol('acquireTimers'); const kGeneration = Symbol('generation'); +const kConnectionCounter = Symbol('connectionCounter'); const VALID_POOL_OPTIONS = new Set([ // `connect` options @@ -48,7 +49,7 @@ const VALID_POOL_OPTIONS = new Set([ 'waitQueueTimeoutMS' ]); -function pluckSpecOptions(options) { +function resolveOptions(options, defaults) { const newOptions = Array.from(VALID_POOL_OPTIONS).reduce((obj, key) => { if (options.hasOwnProperty(key)) { obj[key] = options[key]; @@ -57,7 +58,7 @@ function pluckSpecOptions(options) { return obj; }, {}); - return Object.freeze(newOptions); + return Object.freeze(Object.assign({}, defaults, newOptions)); } class ConnectionPool extends EventEmitter { @@ -65,30 +66,22 @@ class ConnectionPool extends EventEmitter { super(); options = options || {}; - this.options = pluckSpecOptions(options); - - const counter = makeCounter(1); - this[kConnections] = new Denque(); - this[kPermits] = typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100; - this[kMinPoolSizeTimer] = undefined; - this[kAcquireTimers] = new Set(); - this[kGeneration] = 0; this.closed = false; - - this.s = { - // Counter that increments for each new connection. - counter, - - // Spec mandated fields + this.options = resolveOptions(options, { + connectionType: Connection, maxPoolSize: typeof options.maxPoolSize === 'number' ? options.maxPoolSize : 100, minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, waitQueueTimeoutMS: - typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000, + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000 + }); - // State variables that do not fall into any other category - address: options.address - }; + this[kConnections] = new Denque(); + this[kPermits] = this.options.maxPoolSize; + this[kMinPoolSizeTimer] = undefined; + this[kAcquireTimers] = new Set(); + this[kGeneration] = 0; + this[kConnectionCounter] = makeCounter(1); process.nextTick(() => { this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); @@ -107,8 +100,8 @@ class ConnectionPool extends EventEmitter { } const pool = this; - const maxPoolSize = this.s.maxPoolSize; - const waitQueueTimeoutMS = this.s.waitQueueTimeoutMS; + const maxPoolSize = this.options.maxPoolSize; + const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; function attemptAcquire(start) { const duration = calculateDurationInMs(start); @@ -205,6 +198,11 @@ class ConnectionPool extends EventEmitter { clearTimeout(this[kMinPoolSizeTimer]); } + // end the connection counter + if (typeof this[kConnectionCounter].return === 'function') { + this[kConnectionCounter].return(); + } + // mark the pool as closed immediately this.closed = true; @@ -222,19 +220,8 @@ class ConnectionPool extends EventEmitter { ); } - destroy(callback) { - this.close(() => { - if (typeof this.s.counter.return === 'function') { - this.s.counter.return(); - } - - callback(); - }); - } - - // Accessors required by spec get totalConnectionCount() { - return this[kConnections].length + (this.s.maxPoolSize - this[kPermits]); + return this[kConnections].length + (this.options.maxPoolSize - this[kPermits]); } get availableConnectionCount() { @@ -242,7 +229,7 @@ class ConnectionPool extends EventEmitter { } get address() { - return this.s.address; + return `${this.options.host}:${this.options.port}`; } // Private Helpers @@ -252,7 +239,7 @@ class ConnectionPool extends EventEmitter { } function ensureMinPoolSize(pool) { - const minPoolSize = pool.s.minPoolSize; + const minPoolSize = pool.options.minPoolSize; for (let i = pool.totalConnectionCount; i < minPoolSize; ++i) { createConnection(pool); } @@ -265,15 +252,14 @@ function connectionIsStale(pool, connection) { } function connectionIsIdle(pool, connection) { - return !!(pool.s.maxIdleTimeMS && connection.timeIdle() > pool.s.maxIdleTimeMS); + return !!(pool.options.maxIdleTimeMS && connection.timeIdle() > pool.options.maxIdleTimeMS); } function createConnection(pool, callback) { const connectOptions = Object.assign( { - id: pool.s.counter.next().value, - generation: pool[kGeneration], - connectionType: pool.s.Connection + id: pool[kConnectionCounter].next().value, + generation: pool[kGeneration] }, pool.options ); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 7a109f0fa29..98b70eb3fa6 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -52,11 +52,10 @@ const PROMISIFIED_POOL_FUNCTIONS = { close: Promise.promisify(ConnectionPool.prototype.close) }; -function destroyPool(pool) { +function closePool(pool) { return new Promise(resolve => { ALL_POOL_EVENTS.forEach(ev => pool.removeAllListeners(ev)); - pool.destroy(resolve); - resolve(); + pool.close(resolve); }); } @@ -218,7 +217,7 @@ describe('Connection Pool', function() { }); afterEach(() => { - const p = pool ? destroyPool(pool) : Promise.resolve(); + const p = pool ? closePool(pool) : Promise.resolve(); return p.then(() => { pool = undefined; threads.clear(); From cb259a17f6f867f2f88f195e25402a1b1bc78db5 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 13:13:03 -0500 Subject: [PATCH 023/130] chore: add `deqnue` to dependencies --- package-lock.json | 5 +++++ package.json | 1 + 2 files changed, 6 insertions(+) diff --git a/package-lock.json b/package-lock.json index b638f7e271d..657dd0b1d13 100644 --- a/package-lock.json +++ b/package-lock.json @@ -982,6 +982,11 @@ "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", "dev": true }, + "denque": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/denque/-/denque-1.4.1.tgz", + "integrity": "sha512-OfzPuSZKGcgr96rf1oODnfjqBFmr1DVoc/TrItj3Ohe0Ah1C5WX5Baquw/9U9KovnQ88EqmJbD66rKYUQYN1tQ==" + }, "detect-libc": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", diff --git a/package.json b/package.json index 2e1ed6342e0..c7f4874e1e3 100644 --- a/package.json +++ b/package.json @@ -25,6 +25,7 @@ }, "dependencies": { "bson": "^1.1.1", + "denque": "^1.4.1", "require_optional": "^1.0.1", "safe-buffer": "^5.1.2" }, From 09333533baeb34edb24262a4c8b8096f47fc5ed4 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 13:30:47 -0500 Subject: [PATCH 024/130] refactor: use a cancellation token for immediate cleanup --- lib/cmap/connection_pool.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index bcc88cd5981..057ae6dbde9 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -34,6 +34,7 @@ const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); const kAcquireTimers = Symbol('acquireTimers'); const kGeneration = Symbol('generation'); const kConnectionCounter = Symbol('connectionCounter'); +const kCancellationToken = Symbol('cancellationToken'); const VALID_POOL_OPTIONS = new Set([ // `connect` options @@ -82,6 +83,8 @@ class ConnectionPool extends EventEmitter { this[kAcquireTimers] = new Set(); this[kGeneration] = 0; this[kConnectionCounter] = makeCounter(1); + this[kCancellationToken] = new EventEmitter(); + this[kCancellationToken].setMaxListeners(Infinity); process.nextTick(() => { this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); @@ -193,6 +196,10 @@ class ConnectionPool extends EventEmitter { return callback(); } + // immediately cancel any in-flight connections + this[kCancellationToken].emit('cancel'); + + // drain and clear all timers drainTimerQueue(this[kAcquireTimers]); if (this[kMinPoolSizeTimer]) { clearTimeout(this[kMinPoolSizeTimer]); @@ -265,9 +272,11 @@ function createConnection(pool, callback) { ); pool[kPermits]--; - connect(connectOptions, (err, connection) => { + connect(connectOptions, pool[kCancellationToken], (err, connection) => { if (err) { pool[kPermits]++; + + // NOTE: integrate logger here pool._propagateError(err); if (typeof callback === 'function') { callback(err); From 12843a71725555a16d9fb6d655a740a0a92569a4 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 13:31:13 -0500 Subject: [PATCH 025/130] test: cleanup orphaned connections in CMAP tests Certain CMAP tests may check connections out to explore pool behavior, and fail to instruct the runner to check them back in, leaving leaked connections in the wild. Since we already track named connection, we will clean those up if they haven't been checked back in, and start tracking unnamed checked out connections --- test/unit/cmap/connection_pool.test.js | 31 ++++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 98b70eb3fa6..d70c50acf8a 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -69,6 +69,7 @@ describe('Connection Pool', function() { describe('spec tests', function() { const threads = new Map(); const connections = new Map(); + const orphans = new Set(); const poolEvents = []; const poolEventsEventEmitter = new EventEmitter(); let pool = undefined; @@ -105,11 +106,14 @@ describe('Connection Pool', function() { return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { if (op.label != null) { connections.set(op.label, connection); + } else { + orphans.add(connection); } }); }, checkIn: function(op) { const connection = connections.get(op.connection); + connections.delete(op.connection); const force = op.force; if (!connection) { @@ -218,13 +222,26 @@ describe('Connection Pool', function() { afterEach(() => { const p = pool ? closePool(pool) : Promise.resolve(); - return p.then(() => { - pool = undefined; - threads.clear(); - connections.clear(); - poolEvents.length = 0; - poolEventsEventEmitter.removeAllListeners(); - }); + return p + .then(() => { + const connectionsToDestroy = Array.from(orphans).concat(Array.from(connections.values())); + return Promise.each(connectionsToDestroy, conn => { + return new Promise((resolve, reject) => + conn.destroy({ force: true }, err => { + if (err) return reject(err); + resolve(); + }) + ); + }); + }) + .then(() => { + pool = undefined; + threads.clear(); + connections.clear(); + orphans.clear(); + poolEvents.length = 0; + poolEventsEventEmitter.removeAllListeners(); + }); }); loadSpecTests('connection-monitoring-and-pooling').forEach(test => { From 9be9b97e13ffda1dd898ebd09f1e1fac79a9a3bd Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 18:00:51 -0500 Subject: [PATCH 026/130] refactor: use `Connection` for unit tests, drop `MockConnection` Since we are testing this against the mock server, we don't need to mock the connection out at all anymore. --- lib/cmap/connection.js | 24 ++++++++++++++++++++- lib/cmap/connection_pool.js | 6 +++--- test/unit/cmap/connection_pool.test.js | 29 +------------------------- 3 files changed, 27 insertions(+), 32 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index b2adf6e5570..ddcc5bc531d 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -12,18 +12,28 @@ const uuidV4 = require('../core/utils').uuidV4; const kStream = Symbol('stream'); const kQueue = Symbol('queue'); const kMessageStream = Symbol('messageStream'); +const kGeneration = Symbol('generation'); +const kLastUseTime = Symbol('lastUseTime'); class Connection extends EventEmitter { constructor(stream, options) { super(options); - this.id = streamIdentifier(stream); + if (typeof options.generation === 'undefined') { + throw new TypeError('connection requires a pool generation'); + } + + this.id = options.id; + this.address = streamIdentifier(stream); this.bson = options.bson; this.description = null; this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; this.monitorCommands = typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; + this[kGeneration] = options.generation; + this[kLastUseTime] = Date.now(); + // setup parser stream and message handling this[kQueue] = new Map(); this[kMessageStream] = new MessageStream(options); @@ -50,6 +60,18 @@ class Connection extends EventEmitter { this.description = response; } + get generation() { + return this[kGeneration]; + } + + get idleTime() { + return Date.now() - this[kLastUseTime]; + } + + markAvailable() { + this[kLastUseTime] = Date.now(); + } + destroy(options, callback) { if (typeof options === 'function') { callback = options; diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 057ae6dbde9..973791a2f43 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -166,7 +166,7 @@ class ConnectionPool extends EventEmitter { // Properly adjust state of connection if (!willDestroy) { - connection.makeReadyToUse(); + connection.markAvailable(); this[kConnections].push(connection); } @@ -259,7 +259,7 @@ function connectionIsStale(pool, connection) { } function connectionIsIdle(pool, connection) { - return !!(pool.options.maxIdleTimeMS && connection.timeIdle() > pool.options.maxIdleTimeMS); + return !!(pool.options.maxIdleTimeMS && connection.idleTime > pool.options.maxIdleTimeMS); } function createConnection(pool, callback) { @@ -288,7 +288,7 @@ function createConnection(pool, callback) { pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); pool[kConnections].push(connection); - connection.makeReadyToUse(); + connection.markAvailable(); pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); if (typeof callback === 'function') { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index d70c50acf8a..e42f4a80c47 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -3,7 +3,6 @@ const Promise = require('bluebird'); const loadSpecTests = require('../../spec').loadSpecTests; const ConnectionPool = require('../../../lib/cmap/connection_pool').ConnectionPool; -const Connection = require('../../../lib/cmap/connection').Connection; const EventEmitter = require('events').EventEmitter; const mock = require('mongodb-mock-server'); const BSON = require('bson'); @@ -12,26 +11,6 @@ const chai = require('chai'); chai.use(require('../../functional/spec-runner/matcher').default); const expect = chai.expect; -class MockConnection extends Connection { - constructor(stream, options) { - super(stream, options); - - this.id = options.id; - this.generation = options.generation; - this.readyToUse = false; - this.lastMadeAvailable = undefined; - } - - timeIdle() { - return this.readyToUse ? Date.now() - this.lastMadeAvailable : 0; - } - - makeReadyToUse() { - this.readyToUse = true; - this.lastMadeAvailable = Date.now(); - } -} - const ALL_POOL_EVENTS = new Set([ 'connectionPoolCreated', 'connectionPoolClosed', @@ -75,13 +54,7 @@ describe('Connection Pool', function() { let pool = undefined; function createPool(options) { - options = Object.assign( - {}, - options, - { connectionType: MockConnection, bson: new BSON() }, - server.address() - ); - + options = Object.assign({}, options, { bson: new BSON() }, server.address()); pool = new ConnectionPool(options); ALL_POOL_EVENTS.forEach(ev => { pool.on(ev, x => { From 7b62d79bbd6be9fd33b48cf9201b62b7e8a6150c Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 11 Dec 2019 18:01:51 -0500 Subject: [PATCH 027/130] refactor: add all wire protocol methods to the connection class --- lib/cmap/connection.js | 54 ++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index ddcc5bc531d..ce108beea93 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -19,10 +19,6 @@ class Connection extends EventEmitter { constructor(stream, options) { super(options); - if (typeof options.generation === 'undefined') { - throw new TypeError('connection requires a pool generation'); - } - this.id = options.id; this.address = streamIdentifier(stream); this.bson = options.bson; @@ -61,7 +57,7 @@ class Connection extends EventEmitter { } get generation() { - return this[kGeneration]; + return this[kGeneration] || 0; } get idleTime() { @@ -102,19 +98,47 @@ class Connection extends EventEmitter { }); } + // Wire protocol methods command(ns, cmd, options, callback) { - // NOTE: The wire protocol methods will eventually be migrated to this class, but for now - // we need to pretend we _are_ a server. - const server = { - description: this.description, - s: { - bson: this.bson, - pool: { write: write.bind(this) } - } - }; + wp.command(makeServerTrampoline(this), ns, cmd, options, callback); + } - wp.command(server, ns, cmd, options, callback); + query(ns, cmd, cursorState, options, callback) { + wp.query(makeServerTrampoline(this), ns, cmd, cursorState, options, callback); } + + getMore(ns, cursorState, batchSize, options, callback) { + wp.getMore(makeServerTrampoline(this), ns, cursorState, batchSize, options, callback); + } + + killCursors(ns, cursorState, callback) { + wp.killCursors(makeServerTrampoline(this), ns, cursorState, callback); + } + + insert(ns, ops, options, callback) { + wp.insert(makeServerTrampoline(this), ns, ops, options, callback); + } + + update(ns, ops, options, callback) { + wp.update(makeServerTrampoline(this), ns, ops, options, callback); + } + + remove(ns, ops, options, callback) { + wp.remove(makeServerTrampoline(this), ns, ops, options, callback); + } +} + +/// This lets us emulate a legacy `Server` instance so we can work with the existing wire +/// protocol methods. Eventually, the operation executor will return a `Connection` to execute +/// against. +function makeServerTrampoline(server) { + return { + description: server.description, + s: { + bson: server.bson, + pool: { write: write.bind(server) } + } + }; } function messageHandler(conn) { From d59dced25180a50eb4a62c0f30949d2087566f66 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 08:16:19 -0500 Subject: [PATCH 028/130] feat: add a `withConnection` helper to the connection pool This helper allows you to easily manage the "checkedoutness" of a connection. It will automatically check out a connection, pass it to the function you provide, and check it back in for you when your function calls back. --- lib/cmap/connection.js | 2 +- lib/cmap/connection_pool.js | 41 +++++++++++++- test/unit/cmap/connection_pool.test.js | 74 ++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 2 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index ce108beea93..e86b6f342f0 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -40,7 +40,7 @@ class Connection extends EventEmitter { }); stream.on('close', () => { - this[kQueue].forEach(op => op.callback(new MongoError('Connection closed'))); + this[kQueue].forEach(op => op.cb(new MongoError('Connection closed'))); this[kQueue].clear(); this.emit('close'); diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 973791a2f43..6c85ffa1be0 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -177,7 +177,9 @@ class ConnectionPool extends EventEmitter { destroyConnection(this, connection, reason); } - callback(null); + if (typeof callback === 'function') { + callback(); + } } clear(callback) { @@ -227,6 +229,34 @@ class ConnectionPool extends EventEmitter { ); } + /** + * Runs a lambda with an implicitly checked out connection, checking that connection back in when the lambda + * has completed by calling back. + * + * NOTE: please note the required signature of `fn` + * + * @param {ConnectionPool~withConnectionCallback} fn A function which operates on a managed connection + * @param {Function} callback The original callback + * @return {Promise} + */ + withConnection(fn, callback) { + this.checkOut((err, conn) => { + // don't callback with `err` here, we might want to act upon it inside `fn` + + fn(err, conn, (fnErr, result) => { + if (fnErr) { + callback(fnErr); + } else { + callback(undefined, result); + } + + if (conn) { + this.checkIn(conn); + } + }); + }); + } + get totalConnectionCount() { return this[kConnections].length + (this.options.maxPoolSize - this[kPermits]); } @@ -302,6 +332,15 @@ function destroyConnection(pool, connection, reason) { process.nextTick(() => connection.destroy()); } +/** + * A callback provided to `withConnection` + * + * @callback ConnectionPool~withConnectionCallback + * @param {MongoError} error An error instance representing the error during the execution. + * @param {Connection} connection The managed connection which was checked out of the pool. + * @param {Function} callback A function to call back after connection management is complete + */ + module.exports = { ConnectionPool }; diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index e42f4a80c47..8872839cd42 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -45,6 +45,80 @@ describe('Connection Pool', function() { mock.createServer().then(s => (server = s)); }); + describe('withConnection', function() { + it('should manage a connection for a successful operation', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool(Object.assign({ bson: new BSON() }, server.address())); + const callback = (err, result) => { + expect(err).to.not.exist; + expect(result).to.exist; + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + + conn.command('$admin.cmd', { ismaster: 1 }, (cmdErr, ismaster) => { + expect(cmdErr).to.not.exist; + cb(undefined, ismaster); + }); + }, callback); + }); + + it('should allow user interaction with an error', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.connection.destroy(); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), waitQueueTimeoutMS: 250 }, server.address()) + ); + + const callback = err => { + expect(err).to.exist; + expect(err).to.match(/Timed out/); + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.exist; + expect(err).to.match(/Timed out/); + cb(err); + }, callback); + }); + + it('should return an error to the original callback', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool(Object.assign({ bson: new BSON() }, server.address())); + const callback = (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + expect(err).to.match(/my great error/); + pool.close(done); + }; + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + cb(new Error('my great error')); + }, callback); + }); + }); + describe('spec tests', function() { const threads = new Map(); const connections = new Map(); From cf7a45cd1593dc33cff3a5211839afa79b462a96 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 10:11:51 -0500 Subject: [PATCH 029/130] refactor: checking in is not an asynchronous operation --- lib/cmap/connection_pool.js | 16 +++------------- test/unit/cmap/connection_pool.test.js | 4 +--- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 6c85ffa1be0..3d177b3fcbc 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -154,15 +154,10 @@ class ConnectionPool extends EventEmitter { attemptAcquire(process.hrtime()); } - checkIn(connection, force, callback) { - if (typeof force === 'function' && typeof callback !== 'function') { - callback = force; - force = false; - } - + checkIn(connection) { const closed = this.closed; const stale = connectionIsStale(this, connection); - const willDestroy = !!(force || closed || stale); + const willDestroy = !!(closed || stale); // Properly adjust state of connection if (!willDestroy) { @@ -173,12 +168,7 @@ class ConnectionPool extends EventEmitter { this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); if (willDestroy) { - const reason = force ? 'force' : closed ? 'poolClosed' : 'stale'; - destroyConnection(this, connection, reason); - } - - if (typeof callback === 'function') { - callback(); + destroyConnection(this, connection, closed ? 'poolClosed' : 'stale'); } } diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 8872839cd42..0dc1b80ae81 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -26,7 +26,6 @@ const ALL_POOL_EVENTS = new Set([ const PROMISIFIED_POOL_FUNCTIONS = { checkOut: Promise.promisify(ConnectionPool.prototype.checkOut), - checkIn: Promise.promisify(ConnectionPool.prototype.checkIn), clear: Promise.promisify(ConnectionPool.prototype.clear), close: Promise.promisify(ConnectionPool.prototype.close) }; @@ -161,13 +160,12 @@ describe('Connection Pool', function() { checkIn: function(op) { const connection = connections.get(op.connection); connections.delete(op.connection); - const force = op.force; if (!connection) { throw new Error(`Attempted to release non-existient connection ${op.connection}`); } - return PROMISIFIED_POOL_FUNCTIONS.checkIn.call(pool, connection, force); + return pool.checkIn(connection); }, clear: function() { return PROMISIFIED_POOL_FUNCTIONS.clear.call(pool); From 04d4afdcb6ad15f9a9b007baf3e2e79e21862e90 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 12:04:12 -0500 Subject: [PATCH 030/130] doc: add documentation for CMAP-based connection pool --- lib/cmap/connection_pool.js | 163 +++++++++++++++++++++++-- lib/cmap/events.js | 8 +- test/unit/cmap/connection_pool.test.js | 3 +- 3 files changed, 160 insertions(+), 14 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 3d177b3fcbc..d2c24c853d1 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -22,8 +22,8 @@ const PoolClosedEvent = events.PoolClosedEvent; const ConnectionCreatedEvent = events.ConnectionCreatedEvent; const ConnectionReadyEvent = events.ConnectionReadyEvent; const ConnectionClosedEvent = events.ConnectionClosedEvent; -const ConnectionCheckOutStarted = events.ConnectionCheckOutStarted; -const ConnectionCheckOutFailed = events.ConnectionCheckOutFailed; +const ConnectionCheckOutStartedEvent = events.ConnectionCheckOutStartedEvent; +const ConnectionCheckOutFailedEvent = events.ConnectionCheckOutFailedEvent; const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; const PoolClearedEvent = events.PoolClearedEvent; @@ -62,7 +62,45 @@ function resolveOptions(options, defaults) { return Object.freeze(Object.assign({}, defaults, newOptions)); } +/** + * Configuration options for drivers wrapping the node driver. + * + * @typedef {Object} ConnectionPoolOptions + * @property + * @property {string} [host] The host to connect to + * @property {number} [port] The port to connect to + * @property {bson} [bson] The BSON instance to use for new connections + * @property {number} [maxPoolSize=100] The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections. + * @property {number} [minPoolSize=0] The minimum number of connections that MUST exist at any moment in a single connection pool. + * @property {number} [maxIdleTimeMS] The maximum amount of time a connection should remain idle in the connection pool before being marked idle. + * @property {number} [waitQueueTimeoutMS=10000] The maximum amount of time operation execution should wait for a connection to become available. + */ + +/** + * A pool of connections which dynamically resizes, and emit events related to pool activity + * + * @property {number} generation An integer representing the SDAM generation of the pool + * @property {number} totalConnectionCount An integer expressing how many total connections (active + in use) the pool currently has + * @property {number} availableConnectionCount An integer expressing how many connections are currently available in the pool. + * @property {string} address The address of the endpoint the pool is connected to + * + * @emits ConnectionPool#connectionPoolCreated + * @emits ConnectionPool#connectionPoolClosed + * @emits ConnectionPool#connectionCreated + * @emits ConnectionPool#connectionReady + * @emits ConnectionPool#connectionClosed + * @emits ConnectionPool#connectionCheckOutStarted + * @emits ConnectionPool#connectionCheckOutFailed + * @emits ConnectionPool#connectionCheckedOut + * @emits ConnectionPool#connectionCheckedIn + * @emits ConnectionPool#connectionPoolCleared + */ class ConnectionPool extends EventEmitter { + /** + * Create a new Connection Pool + * + * @param {ConnectionPoolOptions} options + */ constructor(options) { super(); options = options || {}; @@ -77,6 +115,10 @@ class ConnectionPool extends EventEmitter { typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000 }); + if (options.minSize > options.maxSize) { + throw new TypeError('Pool minimum size must not be greater than maxiumum pool size'); + } + this[kConnections] = new Denque(); this[kPermits] = this.options.maxPoolSize; this[kMinPoolSizeTimer] = undefined; @@ -92,12 +134,18 @@ class ConnectionPool extends EventEmitter { }); } - // Public API + /** + * Check a connection out of this pool. The connection will continue to be tracked, but no reference to it + * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or + * explicitly destroyed by the new owner. + * + * @param {ConnectionPool~checkOutCallback} callback + */ checkOut(callback) { - this.emit('connectionCheckOutStarted', new ConnectionCheckOutStarted(this)); + this.emit('connectionCheckOutStarted', new ConnectionCheckOutStartedEvent(this)); if (this.closed) { - this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(this, 'poolClosed')); + this.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(this, 'poolClosed')); callback(new PoolClosedError(this)); return; } @@ -142,7 +190,7 @@ class ConnectionPool extends EventEmitter { pool.removeListener('connectionReady', retryAcquire); pool.removeListener('connectionCheckedIn', retryAcquire); - pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailed(pool, 'timeout')); + pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); callback(new WaitQueueTimeoutError(pool)); }, waitQueueTimeoutMS - duration); @@ -154,6 +202,11 @@ class ConnectionPool extends EventEmitter { attemptAcquire(process.hrtime()); } + /** + * Check a connection into the pool. + * + * @param {Connection} connection The connection to check in + */ checkIn(connection) { const closed = this.closed; const stale = connectionIsStale(this, connection); @@ -172,12 +225,24 @@ class ConnectionPool extends EventEmitter { } } - clear(callback) { + /** + * Clear the pool + * + * Pool reset is handled by incrementing the pool's generation count. Any existing connection of a + * previous generation will eventually be pruned during subsequent checkouts. + */ + clear() { this[kGeneration] += 1; this.emit('connectionPoolCleared', new PoolClearedEvent(this)); - callback(); } + /** + * Close the pool + * + * @param {object} [options] Optional settings + * @param {boolean} [options.force] Force close connections + * @param {Function} callback + */ close(options, callback) { if (typeof options === 'function') { callback = options; @@ -247,6 +312,10 @@ class ConnectionPool extends EventEmitter { }); } + get generation() { + return this[kGeneration]; + } + get totalConnectionCount() { return this[kConnections].length + (this.options.maxPoolSize - this[kPermits]); } @@ -331,6 +400,84 @@ function destroyConnection(pool, connection, reason) { * @param {Function} callback A function to call back after connection management is complete */ +/** + * A callback provided to `checkOut` + * + * @callback ConnectionPool~checkOutCallback + * @param {MongoError} error An error instance representing the error during checkout + * @param {Connection} connection A connection from the pool + */ + +/** + * Emitted once when the connection pool is created + * + * @event ConnectionPool#connectionPoolCreated + * @type {PoolCreatedEvent} + */ + +/** + * Emitted once when the connection pool is closed + * + * @event ConnectionPool#connectionPoolClosed + * @type {PoolClosedEvent} + */ + +/** + * Emitted each time a connection is created + * + * @event ConnectionPool#connectionCreated + * @type {ConnectionCreatedEvent} + */ + +/** + * Emitted when a connection becomes established, and is ready to use + * + * @event ConnectionPool#connectionReady + * @type {ConnectionReadyEvent} + */ + +/** + * Emitted when a connection is closed + * + * @event ConnectionPool#connectionClosed + * @type {ConnectionClosedEvent} + */ + +/** + * Emitted when an attempt to check out a connection begins + * + * @event ConnectionPool#connectionCheckOutStarted + * @type {ConnectionCheckOutStartedEvent} + */ + +/** + * Emitted when an attempt to check out a connection fails + * + * @event ConnectionPool#connectionCheckOutFailed + * @type {ConnectionCheckOutFailedEvent} + */ + +/** + * Emitted each time a connection is successfully checked out of the connection pool + * + * @event ConnectionPool#connectionCheckedOut + * @type {ConnectionCheckedOutEvent} + */ + +/** + * Emitted each time a connection is successfully checked into the connection pool + * + * @event ConnectionPool#connectionCheckedIn + * @type {ConnectionCheckedInEvent} + */ + +/** + * Emitted each time the connection pool is cleared and it's generation incremented + * + * @event ConnectionPool#connectionPoolCleared + * @type {PoolClearedEvent} + */ + module.exports = { ConnectionPool }; diff --git a/lib/cmap/events.js b/lib/cmap/events.js index a6f99c5f9c6..dd9f28a4693 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -43,13 +43,13 @@ class ConnectionClosedEvent extends PoolMonitoringEvent { } } -class ConnectionCheckOutStarted extends PoolMonitoringEvent { +class ConnectionCheckOutStartedEvent extends PoolMonitoringEvent { constructor(pool) { super('ConnectionCheckOutStarted', pool); } } -class ConnectionCheckOutFailed extends PoolMonitoringEvent { +class ConnectionCheckOutFailedEvent extends PoolMonitoringEvent { constructor(pool, reason) { super('ConnectionCheckOutFailed', pool); this.reason = reason; @@ -82,8 +82,8 @@ module.exports = { ConnectionCreatedEvent, ConnectionReadyEvent, ConnectionClosedEvent, - ConnectionCheckOutStarted, - ConnectionCheckOutFailed, + ConnectionCheckOutStartedEvent, + ConnectionCheckOutFailedEvent, ConnectionCheckedOutEvent, ConnectionCheckedInEvent, PoolClearedEvent diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 0dc1b80ae81..abb6eee39bd 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -26,7 +26,6 @@ const ALL_POOL_EVENTS = new Set([ const PROMISIFIED_POOL_FUNCTIONS = { checkOut: Promise.promisify(ConnectionPool.prototype.checkOut), - clear: Promise.promisify(ConnectionPool.prototype.clear), close: Promise.promisify(ConnectionPool.prototype.close) }; @@ -168,7 +167,7 @@ describe('Connection Pool', function() { return pool.checkIn(connection); }, clear: function() { - return PROMISIFIED_POOL_FUNCTIONS.clear.call(pool); + return pool.clear(); }, close: function() { return PROMISIFIED_POOL_FUNCTIONS.close.call(pool); From 73a4fdc425a012eef1c9ae22d2b8d8f96813d1d5 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 12:06:58 -0500 Subject: [PATCH 031/130] refactor: ensure pool errors are subclasses of MongoError --- lib/cmap/errors.js | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/lib/cmap/errors.js b/lib/cmap/errors.js index 71c4a33215c..d589699d8f0 100644 --- a/lib/cmap/errors.js +++ b/lib/cmap/errors.js @@ -1,6 +1,7 @@ 'use strict'; +const MongoError = require('../core/error').MongoError; -class PoolClosedError extends Error { +class PoolClosedError extends MongoError { constructor(pool) { super('Attempted to check out a connection from closed connection pool'); Error.captureStackTrace(this, this.constructor); @@ -9,7 +10,7 @@ class PoolClosedError extends Error { } } -class WaitQueueTimeoutError extends Error { +class WaitQueueTimeoutError extends MongoError { constructor(pool) { super('Timed out while checking out a connection from connection pool'); Error.captureStackTrace(this, this.constructor); @@ -18,18 +19,7 @@ class WaitQueueTimeoutError extends Error { } } -// Technically not part of the spec. -class PoolReleaseForeignConnectionError extends Error { - constructor(pool) { - super('Attempted to check in a connection created by a different pool'); - Error.captureStackTrace(this, this.constructor); - this.errorType = 'poolReleaseForeignConnectionError'; - this.address = pool.address; - } -} - module.exports = { PoolClosedError, - WaitQueueTimeoutError, - PoolReleaseForeignConnectionError + WaitQueueTimeoutError }; From e1e0decd712c208b646d393688db883d08cdfc2c Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 13:23:20 -0500 Subject: [PATCH 032/130] chore: resync CMAP tests --- .../README.rst | 2 + .../connection-must-have-id.json | 18 ++++-- .../connection-must-have-id.yml | 6 ++ .../connection-must-order-ids.json | 18 ++++-- .../connection-must-order-ids.yml | 6 ++ .../pool-checkin-destroy-closed.json | 9 ++- .../pool-checkin-destroy-closed.yml | 3 + .../pool-checkin-destroy-stale.json | 9 ++- .../pool-checkin-destroy-stale.yml | 3 + .../pool-checkin-make-available.json | 9 ++- .../pool-checkin-make-available.yml | 3 + .../pool-checkin.json | 3 +- .../pool-checkin.yml | 1 + .../pool-checkout-connection.json | 6 +- .../pool-checkout-connection.yml | 2 + .../pool-checkout-error-closed.json | 18 +++++- .../pool-checkout-error-closed.yml | 10 +++- .../pool-checkout-multiple.json | 9 ++- .../pool-checkout-multiple.yml | 3 + .../pool-checkout-no-idle.json | 12 ++-- .../pool-checkout-no-idle.yml | 4 ++ .../pool-checkout-no-stale.json | 12 ++-- .../pool-checkout-no-stale.yml | 4 ++ .../pool-close-destroy-conns.json | 6 +- .../pool-close-destroy-conns.yml | 2 + .../pool-create-max-size.json | 45 +++++++++----- .../pool-create-max-size.yml | 15 +++++ .../pool-create-min-size.json | 12 ++-- .../pool-create-min-size.yml | 4 ++ .../pool-create-with-options.json | 3 +- .../pool-create-with-options.yml | 1 + .../wait-queue-fairness.json | 60 ++++++++++++++----- .../wait-queue-fairness.yml | 32 +++++++++- .../wait-queue-timeout.json | 15 +++-- .../wait-queue-timeout.yml | 5 ++ 35 files changed, 289 insertions(+), 81 deletions(-) diff --git a/test/spec/connection-monitoring-and-pooling/README.rst b/test/spec/connection-monitoring-and-pooling/README.rst index b1605c14f0e..6480d7f43b7 100644 --- a/test/spec/connection-monitoring-and-pooling/README.rst +++ b/test/spec/connection-monitoring-and-pooling/README.rst @@ -154,3 +154,5 @@ The following tests have not yet been automated, but MUST still be tested #. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient #. A user MUST be able to specify all ConnectionPoolOptions via a URI string #. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver +#. When a check out attempt fails because connection set up throws an error, + assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json index 487a5979d08..7ed67902285 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.json @@ -12,26 +12,32 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml index 16d7fc2d8fd..5b7b660e54a 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml +++ b/test/spec/connection-monitoring-and-pooling/connection-must-have-id.yml @@ -6,15 +6,21 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 ignore: - ConnectionPoolCreated - ConnectionPoolClosed diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json index dda515c1a91..9b839e8f060 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.json @@ -12,26 +12,32 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml index c554fd2717e..162acfa7975 100644 --- a/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml +++ b/test/spec/connection-monitoring-and-pooling/connection-must-order-ids.yml @@ -6,15 +6,21 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 1 + address: 42 - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 2 + address: 42 - type: ConnectionCheckedOut connectionId: 2 + address: 42 ignore: - ConnectionPoolCreated - ConnectionPoolClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json index 3b6f1d24840..a73afbf752b 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.json @@ -18,7 +18,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolClosed", @@ -26,12 +27,14 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml index f1eaaae14db..cf9bdfc1d70 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-closed.yml @@ -10,13 +10,16 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionPoolClosed address: 42 - type: ConnectionCheckedIn connectionId: 1 + address: 42 - type: ConnectionClosed connectionId: 1 reason: poolClosed + address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json index 7faa44d33cb..600c0520719 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.json @@ -18,7 +18,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -26,12 +27,14 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml index a1851101f0e..2c95d5c03b6 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-destroy-stale.yml @@ -10,13 +10,16 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionPoolCleared address: 42 - type: ConnectionCheckedIn connectionId: 1 + address: 42 - type: ConnectionClosed connectionId: 1 reason: stale + address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json index 838194fe8eb..015928c50d3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.json @@ -18,15 +18,18 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml index 44272ebf42d..bebc035f702 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin-make-available.yml @@ -10,10 +10,13 @@ operations: events: - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionCheckedIn connectionId: 1 + address: 42 - type: ConnectionCheckedOut connectionId: 1 + address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.json b/test/spec/connection-monitoring-and-pooling/pool-checkin.json index 5e93c207a9e..7073895ad2a 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.json @@ -15,7 +15,8 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml index da78c34c8e6..c2560a5cd3b 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkin.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkin.yml @@ -9,6 +9,7 @@ operations: events: - type: ConnectionCheckedIn connectionId: 42 + address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json index e6e108ce58e..0343fa75568 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.json @@ -9,11 +9,13 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml index 34e9ae493e3..b0f61a275d6 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-connection.yml @@ -5,8 +5,10 @@ operations: - name: checkOut events: - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedOut connectionId: 1 + address: 42 ignore: - ConnectionPoolCreated - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json index 4b32ecb55d8..3823c23a780 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.json @@ -28,23 +28,37 @@ "address": 42, "options": 42 }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, { "type": "ConnectionCheckedOut", + "address": 42, "connectionId": 42 }, { "type": "ConnectionCheckedIn", + "address": 42, "connectionId": 42 }, { "type": "ConnectionPoolClosed", "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "reason": "poolClosed" } ], "ignore": [ "ConnectionCreated", "ConnectionReady", - "ConnectionClosed", - "ConnectionCheckOutStarted" + "ConnectionClosed" ] } diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml index 3a8d85e8e20..6621685545a 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-error-closed.yml @@ -15,14 +15,22 @@ events: - type: ConnectionPoolCreated address: 42 options: 42 + - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedOut + address: 42 connectionId: 42 - type: ConnectionCheckedIn + address: 42 connectionId: 42 - type: ConnectionPoolClosed address: 42 + - type: ConnectionCheckOutStarted + address: 42 + - type: ConnectionCheckOutFailed + address: 42 + reason: poolClosed ignore: - ConnectionCreated - ConnectionReady - ConnectionClosed - - ConnectionCheckOutStarted diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json index f3ecdb9be90..fee0d076cf1 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.json @@ -43,15 +43,18 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml index 1ac3236588c..714506ef7fe 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-multiple.yml @@ -23,10 +23,13 @@ operations: events: - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 ignore: - ConnectionCreated - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json index 77ce40deacf..74325d655d3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.json @@ -30,20 +30,24 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "idle" + "reason": "idle", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml index 77f36b19583..415906bb576 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-idle.yml @@ -17,14 +17,18 @@ events: options: 42 - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionCheckedIn connectionId: 1 + address: 42 # In between these, wait so connection becomes idle - type: ConnectionClosed connectionId: 1 reason: idle + address: 42 - type: ConnectionCheckedOut connectionId: 2 + address: 42 ignore: - ConnectionReady - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json index e5ebedfbe52..67ee507fe88 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.json @@ -26,11 +26,13 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -39,11 +41,13 @@ { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml index a4389b81ef3..c434f4b0656 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-checkout-no-stale.yml @@ -14,15 +14,19 @@ events: options: 42 - type: ConnectionCheckedOut connectionId: 1 + address: 42 - type: ConnectionCheckedIn connectionId: 1 + address: 42 - type: ConnectionPoolCleared address: 42 - type: ConnectionClosed connectionId: 1 reason: stale + address: 42 - type: ConnectionCheckedOut connectionId: 2 + address: 42 ignore: - ConnectionReady - ConnectionCreated diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json index 2bc50419b47..e1fb9d07837 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.json @@ -24,12 +24,14 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 2, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 }, { "type": "ConnectionPoolClosed", diff --git a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml index ddfd1fad1bb..65b13a6d51b 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-close-destroy-conns.yml @@ -12,9 +12,11 @@ operations: events: - type: ConnectionCheckedIn connectionId: 2 + address: 42 - type: ConnectionClosed connectionId: 2 reason: poolClosed + address: 42 - type: ConnectionPoolClosed address: 42 ignore: diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json index 2ba7bdf62bf..b585d0daec7 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.json @@ -53,59 +53,74 @@ "options": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml index 534917bc755..64e521c7ec3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-max-size.yml @@ -28,29 +28,44 @@ events: address: 42 options: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 ignore: - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json index 470988043f3..7b5cf202b31 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.json @@ -23,19 +23,23 @@ }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml index 848de835ddf..d87f7feec34 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-min-size.yml @@ -14,13 +14,17 @@ events: options: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 - type: ConnectionCreated connectionId: 42 + address: 42 # Ensures that by the time pool is closed, there are at least 3 connections - type: ConnectionCheckedOut connectionId: 42 + address: 42 ignore: - ConnectionReady - ConnectionClosed diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json index ab689448f4e..4e8223f91e3 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.json @@ -26,6 +26,7 @@ } ], "ignore": [ - "ConnectionCreated" + "ConnectionCreated", + "ConnectionReady" ] } diff --git a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml index 2915eb60017..32c8d0e54c8 100644 --- a/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml +++ b/test/spec/connection-monitoring-and-pooling/pool-create-with-options.yml @@ -18,3 +18,4 @@ events: maxIdleTimeMS: 100 ignore: - ConnectionCreated + - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json index 36c8a6dc1ba..c58fbadcff2 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.json @@ -4,7 +4,7 @@ "description": "must issue Connections to threads in the order that the threads entered the queue", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 1000 + "waitQueueTimeoutMS": 5000 }, "operations": [ { @@ -25,6 +25,10 @@ "event": "ConnectionCheckOutStarted", "count": 2 }, + { + "name": "wait", + "ms": 100 + }, { "name": "start", "target": "thread2" @@ -39,6 +43,10 @@ "event": "ConnectionCheckOutStarted", "count": 3 }, + { + "name": "wait", + "ms": 100 + }, { "name": "start", "target": "thread3" @@ -53,6 +61,10 @@ "event": "ConnectionCheckOutStarted", "count": 4 }, + { + "name": "wait", + "ms": 100 + }, { "name": "start", "target": "thread4" @@ -67,6 +79,10 @@ "event": "ConnectionCheckOutStarted", "count": 5 }, + { + "name": "wait", + "ms": 100 + }, { "name": "checkIn", "connection": "conn0" @@ -102,55 +118,69 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml index 564c010bab9..024ec69316a 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-fairness.yml @@ -3,7 +3,7 @@ style: unit description: must issue Connections to threads in the order that the threads entered the queue poolOptions: maxPoolSize: 1 - waitQueueTimeoutMS: 1000 + waitQueueTimeoutMS: 5000 operations: # Check out sole connection in pool - name: checkOut @@ -21,6 +21,10 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 2 + # Give thread1 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 - name: start target: thread2 - name: checkOut @@ -29,6 +33,10 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 3 + # Give thread2 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 - name: start target: thread3 - name: checkOut @@ -37,6 +45,10 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 4 + # Give thread3 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 - name: start target: thread4 - name: checkOut @@ -45,6 +57,10 @@ operations: - name: waitForEvent event: ConnectionCheckOutStarted count: 5 + # Give thread4 some time to actually enter the wait queue since the + # ConnectionCheckOutStarted event is publish beforehand. + - name: wait + ms: 100 # From main thread, keep checking in connection and then wait for appropriate thread # Test will timeout if threads are not enqueued in proper order - name: checkIn @@ -65,28 +81,42 @@ operations: target: thread4 events: - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 ignore: - ConnectionCreated - ConnectionReady diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json index 90ec2f62d95..ee7cf279552 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -39,22 +39,27 @@ }, "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckOutFailed", - "reason": "timeout" + "reason": "timeout", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml index 49c18df4cd7..eba4ab638da 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -27,13 +27,18 @@ error: message: Timed out while checking out a connection from connection pool events: - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckedOut connectionId: 42 + address: 42 - type: ConnectionCheckOutStarted + address: 42 - type: ConnectionCheckOutFailed reason: timeout + address: 42 - type: ConnectionCheckedIn connectionId: 42 + address: 42 ignore: - ConnectionCreated - ConnectionReady From bc5c24142b0396a6387e805eeca60bfbd6744b07 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 12 Dec 2019 13:28:57 -0500 Subject: [PATCH 033/130] chore: remove unused spec matcher duplicate --- test/match_spec.js | 76 ---------------------------------------------- 1 file changed, 76 deletions(-) delete mode 100644 test/match_spec.js diff --git a/test/match_spec.js b/test/match_spec.js deleted file mode 100644 index 68d46fc98b9..00000000000 --- a/test/match_spec.js +++ /dev/null @@ -1,76 +0,0 @@ -'use strict'; - -const SYMBOL_ANY = Symbol('[[any]]'); - -function transformSpecCompare(obj) { - if (obj === 42 || obj === '42') { - return SYMBOL_ANY; - } - - if (typeof obj !== 'object' || obj === null) { - return obj; - } - - if (obj instanceof Date) { - return obj; - } - - if (Array.isArray(obj)) { - return obj.map(transformSpecCompare); - } - - return Object.keys(obj).reduce((memo, key) => { - memo[key] = transformSpecCompare(obj[key]); - return memo; - }, {}); -} - -function matchSpecCompare(expected, actual) { - const typeOfExpected = typeof expected; - - if (expected === 42 || expected === '42') { - return actual != null; - } - - if (typeOfExpected !== typeof actual) { - return false; - } - - if (typeOfExpected !== 'object' || expected == null) { - return expected === actual; - } - - if (Array.isArray(expected)) { - if (!Array.isArray(actual)) { - return false; - } - - return expected.every((val, idx) => matchSpecCompare(val, actual[idx])); - } else if (expected instanceof Date) { - return actual instanceof Date ? expected.getTime() === actual.getTime() : false; - } - - return Object.keys(expected).every(key => matchSpecCompare(expected[key], actual[key])); -} - -function matchSpec(chai, utils) { - chai.Assertion.addMethod('matchSpec', function(expected) { - const actual = utils.flag(this, 'object'); - - chai.Assertion.prototype.assert.call( - this, - matchSpecCompare(expected, actual), - 'expected #{act} to match spec #{exp}', - 'expected #{act} to not match spec #{exp}', - transformSpecCompare(expected), - actual, - chai.config.showDiff - ); - }); - - chai.assert.matchSpec = function(val, exp, msg) { - new chai.Assertion(val, msg).to.matchSpec(exp); - }; -} - -module.exports.default = matchSpec; From 56aeb521e7273abc649c78b953ee1d389a7e757a Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 14 Dec 2019 07:53:22 -0500 Subject: [PATCH 034/130] refactor: don't require a final callback for `withConnection` --- lib/cmap/connection_pool.js | 10 +++++---- test/unit/cmap/connection_pool.test.js | 30 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index d2c24c853d1..77475b8a8c2 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -299,10 +299,12 @@ class ConnectionPool extends EventEmitter { // don't callback with `err` here, we might want to act upon it inside `fn` fn(err, conn, (fnErr, result) => { - if (fnErr) { - callback(fnErr); - } else { - callback(undefined, result); + if (typeof callback === 'function') { + if (fnErr) { + callback(fnErr); + } else { + callback(undefined, result); + } } if (conn) { diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index abb6eee39bd..27ca2a91efd 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -6,6 +6,7 @@ const ConnectionPool = require('../../../lib/cmap/connection_pool').ConnectionPo const EventEmitter = require('events').EventEmitter; const mock = require('mongodb-mock-server'); const BSON = require('bson'); +const cmapEvents = require('../../../lib/cmap/events'); const chai = require('chai'); chai.use(require('../../functional/spec-runner/matcher').default); @@ -115,6 +116,35 @@ describe('Connection Pool', function() { cb(new Error('my great error')); }, callback); }); + + it('should still manage a connection if no callback is provided', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1 }, server.address()) + ); + + const events = []; + pool.on('connectionCheckedOut', event => events.push(event)); + pool.on('connectionCheckedIn', event => { + events.push(event); + + expect(events).to.have.length(2); + expect(events[0]).to.be.instanceOf(cmapEvents.ConnectionCheckedOutEvent); + expect(events[1]).to.be.instanceOf(cmapEvents.ConnectionCheckedInEvent); + pool.close(done); + }); + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + cb(); + }); + }); }); describe('spec tests', function() { From 2bd17a6cb201fe6fcee557a4dc1c485d925d8f8d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 14 Dec 2019 10:02:41 -0500 Subject: [PATCH 035/130] fix: destroy connections marked as closed on checkIn / checkOut Connections which receive a `close` event are now marked as being in a `closed` state. The ConnectionPool will now check for this state, and destroy the connections accordingly. --- lib/cmap/connection.js | 7 ++++- lib/cmap/connection_pool.js | 12 ++++--- test/unit/cmap/connection_pool.test.js | 43 ++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index e86b6f342f0..fad92980694 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -3,6 +3,7 @@ const EventEmitter = require('events'); const MessageStream = require('./message_stream'); const MongoError = require('../core/error').MongoError; +const MongoNetworkError = require('../core/error').MongoNetworkError; const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; const wp = require('../core/wireprotocol'); const apm = require('../core/connection/apm'); @@ -26,6 +27,7 @@ class Connection extends EventEmitter { this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; this.monitorCommands = typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; + this.closed = false; this[kGeneration] = options.generation; this[kLastUseTime] = Date.now(); @@ -40,7 +42,10 @@ class Connection extends EventEmitter { }); stream.on('close', () => { - this[kQueue].forEach(op => op.cb(new MongoError('Connection closed'))); + this.closed = true; + this[kQueue].forEach(op => + op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} closed`)) + ); this[kQueue].clear(); this.emit('close'); diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 77475b8a8c2..1f363f5d63b 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -165,13 +165,14 @@ class ConnectionPool extends EventEmitter { const connection = pool[kConnections].pop(); const isStale = connectionIsStale(pool, connection); const isIdle = connectionIsIdle(pool, connection); - if (!isStale && !isIdle) { + if (!isStale && !isIdle && !connection.closed) { pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); callback(null, connection); return; } - destroyConnection(pool, connection, isStale ? 'stale' : 'idle'); + const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle'; + destroyConnection(pool, connection, reason); } if (maxPoolSize <= 0 || pool.totalConnectionCount < maxPoolSize) { @@ -208,9 +209,9 @@ class ConnectionPool extends EventEmitter { * @param {Connection} connection The connection to check in */ checkIn(connection) { - const closed = this.closed; + const poolClosed = this.closed; const stale = connectionIsStale(this, connection); - const willDestroy = !!(closed || stale); + const willDestroy = !!(poolClosed || stale || connection.closed); // Properly adjust state of connection if (!willDestroy) { @@ -221,7 +222,8 @@ class ConnectionPool extends EventEmitter { this.emit('connectionCheckedIn', new ConnectionCheckedInEvent(this, connection)); if (willDestroy) { - destroyConnection(this, connection, closed ? 'poolClosed' : 'stale'); + const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale'; + destroyConnection(this, connection, reason); } } diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 27ca2a91efd..e824b4204cb 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -44,6 +44,49 @@ describe('Connection Pool', function() { mock.createServer().then(s => (server = s)); }); + it('should destroy connections which have been closed', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } else { + // destroy on any other command + request.connection.destroy(); + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1 }, server.address()) + ); + + const events = []; + pool.on('connectionClosed', event => events.push(event)); + + pool.checkOut((err, conn) => { + expect(err).to.not.exist; + + conn.command('admin.$cmd', { ping: 1 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + + pool.checkIn(conn); + + expect(events).to.have.length(1); + const closeEvent = events[0]; + expect(closeEvent) + .have.property('reason') + .equal('error'); + + pool.close(done); + }); + }); + + pool.withConnection((err, conn, cb) => { + expect(err).to.not.exist; + cb(); + }); + }); + describe('withConnection', function() { it('should manage a connection for a successful operation', function(done) { server.setMessageHandler(request => { From 93e8ad0cf86e1bb7e472164e7590f69dccf11bb7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 14 Dec 2019 10:34:32 -0500 Subject: [PATCH 036/130] feat: support socket timeouts on a per-connection level --- lib/cmap/connection.js | 14 ++++++++++++-- lib/cmap/connection_pool.js | 2 ++ test/functional/cmap/connection.test.js | 15 +++++++++++++++ test/unit/cmap/connection_pool.test.js | 25 +++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 2 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index fad92980694..51ebb090f8a 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -51,6 +51,16 @@ class Connection extends EventEmitter { this.emit('close'); }); + stream.on('timeout', () => { + this.closed = true; + this[kQueue].forEach(op => + op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} timed out`)) + ); + this[kQueue].clear(); + + this.emit('close'); + }); + // hook the message stream up to the passed in stream stream.pipe(this[kMessageStream]); this[kMessageStream].pipe(stream); @@ -159,7 +169,7 @@ function messageHandler(conn) { const callback = operationDescription.cb; if (operationDescription.socketTimeoutOverride) { - this[kStream].setSocketTimeout(this.socketTimeout); + conn[kStream].setTimeout(conn.socketTimeout); } try { @@ -230,7 +240,7 @@ function write(command, options, callback) { if (typeof options.socketTimeout === 'number') { operationDescription.socketTimeoutOverride = true; - this[kStream].setSocketTimeout(options.socketTimeout); + this[kStream].setTimeout(options.socketTimeout); } // if command monitoring is enabled we need to modify the callback here diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 1f363f5d63b..5f878260b50 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -42,6 +42,8 @@ const VALID_POOL_OPTIONS = new Set([ 'port', 'bson', 'connectionType', + 'monitorCommands', + 'socketTimeout', // spec options 'maxPoolSize', diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index 05f45112ec8..52dce4d4184 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -49,4 +49,19 @@ describe('Connection', function() { }); }); }); + + it('should support socket timeouts', function(done) { + const connectOptions = Object.assign({ + host: '240.0.0.1', + connectionType: Connection, + bson: new BSON(), + connectionTimeout: 500 + }); + + connect(connectOptions, err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + done(); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index e824b4204cb..1ed8a10a043 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -87,6 +87,31 @@ describe('Connection Pool', function() { }); }); + it('should propagate socket timeouts to connections', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } else { + // blackhole other requests + } + }); + + const pool = new ConnectionPool( + Object.assign({ bson: new BSON(), maxPoolSize: 1, socketTimeout: 500 }, server.address()) + ); + + pool.withConnection((err, conn, cb) => { + conn.command('admin.$cmd', { ping: 1 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + expect(err).to.match(/timed out/); + cb(); + }); + }, () => pool.close(done)); + }); + + describe('withConnection', function() { it('should manage a connection for a successful operation', function(done) { server.setMessageHandler(request => { From 92b6fe5c12cc5e2d98eedd487083da1b5a1ee5dd Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 16 Dec 2019 08:02:27 -0500 Subject: [PATCH 037/130] style: clean up linting in connection pool unit tests --- lib/cmap/connection.js | 8 ++++++-- test/unit/cmap/connection_pool.test.js | 21 ++++++++++++--------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 51ebb090f8a..8f4a0cae278 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -92,6 +92,10 @@ class Connection extends EventEmitter { options = Object.assign({ force: false }, options); if (this[kStream] == null || this.destroyed) { this.destroyed = true; + if (typeof callback === 'function') { + callback(); + } + return; } @@ -99,7 +103,7 @@ class Connection extends EventEmitter { this[kStream].destroy(); this.destroyed = true; if (typeof callback === 'function') { - callback(null, null); + callback(); } return; @@ -108,7 +112,7 @@ class Connection extends EventEmitter { this[kStream].end(err => { this.destroyed = true; if (typeof callback === 'function') { - callback(err, null); + callback(err); } }); } diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 1ed8a10a043..081d94a1a64 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -101,17 +101,20 @@ describe('Connection Pool', function() { Object.assign({ bson: new BSON(), maxPoolSize: 1, socketTimeout: 500 }, server.address()) ); - pool.withConnection((err, conn, cb) => { - conn.command('admin.$cmd', { ping: 1 }, (err, result) => { - expect(err).to.exist; - expect(result).to.not.exist; - expect(err).to.match(/timed out/); - cb(); - }); - }, () => pool.close(done)); + pool.withConnection( + (err, conn, cb) => { + expect(err).to.not.exist; + conn.command('admin.$cmd', { ping: 1 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + expect(err).to.match(/timed out/); + cb(); + }); + }, + () => pool.close(done) + ); }); - describe('withConnection', function() { it('should manage a connection for a successful operation', function(done) { server.setMessageHandler(request => { From f02b41d67846e77b078af41d56ce51a024fa87af Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 13:36:40 -0500 Subject: [PATCH 038/130] refactor: use symbole for connction description field --- lib/cmap/connection.js | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 8f4a0cae278..6d6e57f95c9 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -15,6 +15,7 @@ const kQueue = Symbol('queue'); const kMessageStream = Symbol('messageStream'); const kGeneration = Symbol('generation'); const kLastUseTime = Symbol('lastUseTime'); +const kDescription = Symbol('description'); class Connection extends EventEmitter { constructor(stream, options) { @@ -23,7 +24,6 @@ class Connection extends EventEmitter { this.id = options.id; this.address = streamIdentifier(stream); this.bson = options.bson; - this.description = null; this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; this.monitorCommands = typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; @@ -64,11 +64,33 @@ class Connection extends EventEmitter { // hook the message stream up to the passed in stream stream.pipe(this[kMessageStream]); this[kMessageStream].pipe(stream); + + if (options.compression) { + this[kDescription] = { compression: options.compression }; + } else { + this[kDescription] = undefined; + } + } + + get description() { + return this[kDescription]; } // the `connect` method stores the result of the handshake ismaster on the connection set ismaster(response) { - this.description = response; + if (response.compression) { + const compression = this[kDescription].compression; + const compressors = compression.compressors; + response.compression = { + compressor: compressors.filter(c => response.compression.indexOf(c) !== -1)[0] + }; + + if (compression.zlibCompressionLevel) { + response.compression.zlibCompressionLevel = compression.zlibCompressionLevel; + } + } + + this[kDescription] = response; } get generation() { From 2e685a67db5ba018f6092a3817dfaa7546d0a52a Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 13:39:03 -0500 Subject: [PATCH 039/130] refactor: store cluster time on connection directly --- lib/cmap/connection.js | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 6d6e57f95c9..c3d2c20307d 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -15,6 +15,7 @@ const kQueue = Symbol('queue'); const kMessageStream = Symbol('messageStream'); const kGeneration = Symbol('generation'); const kLastUseTime = Symbol('lastUseTime'); +const kClusterTime = Symbol('clusterTime'); const kDescription = Symbol('description'); class Connection extends EventEmitter { @@ -101,6 +102,10 @@ class Connection extends EventEmitter { return Date.now() - this[kLastUseTime]; } + get clusterTime() { + return this[kClusterTime]; + } + markAvailable() { this[kLastUseTime] = Date.now(); } @@ -172,12 +177,13 @@ class Connection extends EventEmitter { /// This lets us emulate a legacy `Server` instance so we can work with the existing wire /// protocol methods. Eventually, the operation executor will return a `Connection` to execute /// against. -function makeServerTrampoline(server) { +function makeServerTrampoline(connection) { return { - description: server.description, + description: connection.description, + clusterTime: connection[kClusterTime], s: { - bson: server.bson, - pool: { write: write.bind(server) } + bson: connection.bson, + pool: { write: write.bind(connection) } } }; } @@ -214,7 +220,8 @@ function messageHandler(conn) { } if (document.$clusterTime) { - this.emit('clusterTimeReceived', document.$clusterTime); + conn[kClusterTime] = document.$clusterTime; + conn.emit('clusterTimeReceived', document.$clusterTime); } if (document.writeConcernError) { From 046b46cfbf8e684cf6a9f6d899d521d24f16813d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 13:45:37 -0500 Subject: [PATCH 040/130] refactor: always reply with a `CommandResult` Until we can migrate away from legacy topology types, we make many assumptions in the codebase about always receiving a `CommandResult`. --- lib/cmap/connection.js | 14 +++++++++++++- lib/core/connection/connect.js | 20 ++++++++++++++++++-- test/functional/cmap/connection.test.js | 10 ++++++++-- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index c3d2c20307d..fe5b5332e8c 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -5,6 +5,7 @@ const MessageStream = require('./message_stream'); const MongoError = require('../core/error').MongoError; const MongoNetworkError = require('../core/error').MongoNetworkError; const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; +const CommandResult = require('../core/connection/command_result'); const wp = require('../core/wireprotocol'); const apm = require('../core/connection/apm'); const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; @@ -29,6 +30,7 @@ class Connection extends EventEmitter { this.monitorCommands = typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false; this.closed = false; + this.destroyed = false; this[kGeneration] = options.generation; this[kLastUseTime] = Date.now(); @@ -235,7 +237,17 @@ function messageHandler(conn) { } } - callback(null, operationDescription.fullResult ? message : message.documents[0]); + // NODE-2382: reenable in our glorious non-leaky abstraction future + // callback(null, operationDescription.fullResult ? message : message.documents[0]); + + callback( + undefined, + new CommandResult( + operationDescription.fullResult ? message : message.documents[0], + conn, + message + ) + ); }; } diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index 3fd8be6f8a0..5ead433c676 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -36,6 +36,10 @@ function connect(options, cancellationToken, callback) { }); } +function isModernConnectionType(conn) { + return typeof conn.command === 'function'; +} + function getSaslSupportedMechs(options) { if (!(options && options.credentials)) { return {}; @@ -302,8 +306,20 @@ function makeConnection(family, options, cancellationToken, _callback) { const CONNECTION_ERROR_EVENTS = ['error', 'close', 'timeout', 'parseError']; function runCommand(conn, ns, command, options, callback) { - if (typeof conn.command === 'function') { - conn.command(ns, command, options, callback); + if (typeof options === 'function') (callback = options), (options = {}); + + // are we using the new connection type? if so, no need to simulate a rpc `command` method + if (isModernConnectionType(conn)) { + conn.command(ns, command, options, (err, result) => { + if (err) { + callback(err); + return; + } + + // NODE-2382: raw wire protocol messages, or command results should not be used anymore + callback(undefined, result.result); + }); + return; } diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index 52dce4d4184..35bb90747bf 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -16,7 +16,10 @@ describe('Connection', function() { expect(err).to.not.exist; this.defer(_done => conn.destroy(_done)); - conn.command('admin.$cmd', { ismaster: 1 }, (err, ismaster) => { + conn.command('admin.$cmd', { ismaster: 1 }, (err, result) => { + // NODE-2382: remove `result.result` when command returns just a raw response + const ismaster = result.result; + expect(err).to.not.exist; expect(ismaster).to.exist; expect(ismaster.ok).to.equal(1); @@ -40,7 +43,10 @@ describe('Connection', function() { conn.on('commandSucceeded', event => events.push(event)); conn.on('commandFailed', event => events.push(event)); - conn.command('admin.$cmd', { ismaster: 1 }, (err, ismaster) => { + conn.command('admin.$cmd', { ismaster: 1 }, (err, result) => { + // NODE-2382: remove `result.result` when command returns just a raw response + const ismaster = result.result; + expect(err).to.not.exist; expect(ismaster).to.exist; expect(ismaster.ok).to.equal(1); From 13b7892851cb2bc20c127b2ce91c79798f7893df Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 14:09:56 -0500 Subject: [PATCH 041/130] refactor: support fire-and-forget messages --- lib/cmap/connection.js | 34 ++++++++++++++++++------- test/unit/cmap/connection.test.js | 41 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 9 deletions(-) create mode 100644 test/unit/cmap/connection.test.js diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index fe5b5332e8c..97ad4f28718 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -191,7 +191,7 @@ function makeServerTrampoline(connection) { } function messageHandler(conn) { - return function(message) { + return function messageHandler(message) { // always emit the message, in case we are streaming conn.emit('message', message); if (!conn[kQueue].has(message.responseTo)) { @@ -269,20 +269,21 @@ function write(command, options, callback) { const operationDescription = { requestId: command.requestId, cb: callback, - fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false, session: options.session, + fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false, + noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false, // For BSON parsing promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false, - raw: typeof options.raw === 'boolean' ? options.raw : false, - - // NOTE: This property is set on the connection as part of `connect`, but should - // eventually live in the `StreamDescription` attached to this connection. - agreedCompressor: this.agreedCompressor + raw: typeof options.raw === 'boolean' ? options.raw : false }; + if (this[kDescription] && this[kDescription].compression) { + operationDescription.agreedCompressor = this[kDescription].compression.compressor; + } + if (typeof options.socketTimeout === 'number') { operationDescription.socketTimeoutOverride = true; this[kStream].setTimeout(options.socketTimeout); @@ -319,8 +320,23 @@ function write(command, options, callback) { }; } - this[kQueue].set(operationDescription.requestId, operationDescription); - this[kMessageStream].writeCommand(command, operationDescription); + if (!operationDescription.noResponse) { + this[kQueue].set(operationDescription.requestId, operationDescription); + } + + try { + this[kMessageStream].writeCommand(command, operationDescription); + } catch (e) { + if (!operationDescription.noResponse) { + this[kQueue].delete(operationDescription.requestId); + operationDescription.cb(e); + return; + } + } + + if (operationDescription.noResponse) { + operationDescription.cb(); + } } module.exports = { diff --git a/test/unit/cmap/connection.test.js b/test/unit/cmap/connection.test.js new file mode 100644 index 00000000000..0fb35cd0d40 --- /dev/null +++ b/test/unit/cmap/connection.test.js @@ -0,0 +1,41 @@ +'use strict'; + +const BSON = require('bson'); +const mock = require('mongodb-mock-server'); +const connect = require('../../../lib/core/connection/connect'); +const Connection = require('../../../lib/cmap/connection').Connection; +const expect = require('chai').expect; + +describe('Connection', function() { + let server; + after(() => mock.cleanup()); + before(() => { + mock.createServer().then(s => (server = s)); + }); + + it('should support fire-and-forget messages', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + + // blackhole all other requests + }); + + connect( + Object.assign({ bson: new BSON(), connectionType: Connection }, server.address()), + (err, conn) => { + expect(err).to.not.exist; + expect(conn).to.exist; + + conn.command('$admin.cmd', { ping: 1 }, { noResponse: true }, (err, result) => { + expect(err).to.not.exist; + expect(result).to.not.exist; + + done(); + }); + } + ); + }); +}); From 8826bf58f80914d066752f53dfdddcac86370133 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 19 Dec 2019 07:10:31 -0500 Subject: [PATCH 042/130] refactor: drop timers and event listeners in favor of wait queue The wait queue is now implemented as another linked list, and it has become the primary focus of checking out connections. This allows us to avoid having to set up temporary event listeners to check when connections become available. --- lib/cmap/connection_pool.js | 132 ++++++++++-------- .../wait-queue-timeout.json | 2 +- .../wait-queue-timeout.yml | 2 +- test/unit/cmap/connection_pool.test.js | 20 +-- 4 files changed, 90 insertions(+), 66 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 5f878260b50..3f8405fda99 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -3,15 +3,11 @@ const Denque = require('denque'); const EventEmitter = require('events').EventEmitter; const makeCounter = require('../utils').makeCounter; +const MongoError = require('../core/error').MongoError; const Connection = require('./connection').Connection; -const calculateDurationInMs = require('../core/utils').calculateDurationInMs; const eachAsync = require('../core/utils').eachAsync; const connect = require('../core/connection/connect'); -const common = require('../core/sdam/common'); -const drainTimerQueue = common.drainTimerQueue; -const clearAndRemoveTimerFrom = common.clearAndRemoveTimerFrom; - const errors = require('./errors'); const PoolClosedError = errors.PoolClosedError; const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; @@ -31,10 +27,11 @@ const PoolClearedEvent = events.PoolClearedEvent; const kConnections = Symbol('connections'); const kPermits = Symbol('permits'); const kMinPoolSizeTimer = Symbol('minPoolSizeTimer'); -const kAcquireTimers = Symbol('acquireTimers'); const kGeneration = Symbol('generation'); const kConnectionCounter = Symbol('connectionCounter'); const kCancellationToken = Symbol('cancellationToken'); +const kWaitQueue = Symbol('waitQueue'); +const kCancelled = Symbol('cancelled'); const VALID_POOL_OPTIONS = new Set([ // `connect` options @@ -124,11 +121,11 @@ class ConnectionPool extends EventEmitter { this[kConnections] = new Denque(); this[kPermits] = this.options.maxPoolSize; this[kMinPoolSizeTimer] = undefined; - this[kAcquireTimers] = new Set(); this[kGeneration] = 0; this[kConnectionCounter] = makeCounter(1); this[kCancellationToken] = new EventEmitter(); this[kCancellationToken].setMaxListeners(Infinity); + this[kWaitQueue] = new Denque(); process.nextTick(() => { this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); @@ -153,56 +150,23 @@ class ConnectionPool extends EventEmitter { } const pool = this; - const maxPoolSize = this.options.maxPoolSize; const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - function attemptAcquire(start) { - const duration = calculateDurationInMs(start); - if (duration >= waitQueueTimeoutMS) { - callback(new WaitQueueTimeoutError(pool)); - return; - } - - while (pool.availableConnectionCount > 0) { - const connection = pool[kConnections].pop(); - const isStale = connectionIsStale(pool, connection); - const isIdle = connectionIsIdle(pool, connection); - if (!isStale && !isIdle && !connection.closed) { - pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); - callback(null, connection); - return; - } - - const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle'; - destroyConnection(pool, connection, reason); - } - - if (maxPoolSize <= 0 || pool.totalConnectionCount < maxPoolSize) { - createConnection(pool); - } - - const retryAcquire = () => { - pool.removeListener('connectionReady', retryAcquire); - pool.removeListener('connectionCheckedIn', retryAcquire); - - clearAndRemoveTimerFrom(acquireTimer, pool[kAcquireTimers]); - attemptAcquire(start); - }; + // add this request to the wait queue + const waitQueueMember = { callback }; + waitQueueMember.timer = setTimeout(() => { + waitQueueMember[kCancelled] = true; + waitQueueMember.timer = undefined; - const acquireTimer = setTimeout(() => { - pool.removeListener('connectionReady', retryAcquire); - pool.removeListener('connectionCheckedIn', retryAcquire); + pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); + waitQueueMember.callback(new WaitQueueTimeoutError(pool)); + }, waitQueueTimeoutMS); - pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); - callback(new WaitQueueTimeoutError(pool)); - }, waitQueueTimeoutMS - duration); + // place the member at the end of the wait queue + this[kWaitQueue].push(waitQueueMember); - pool[kAcquireTimers].add(acquireTimer); - pool.once('connectionReady', retryAcquire); - pool.once('connectionCheckedIn', retryAcquire); - } - - attemptAcquire(process.hrtime()); + // process the wait queue + processWaitQueue(this); } /** @@ -218,6 +182,7 @@ class ConnectionPool extends EventEmitter { // Properly adjust state of connection if (!willDestroy) { connection.markAvailable(); + this[kConnections].push(connection); } @@ -227,6 +192,8 @@ class ConnectionPool extends EventEmitter { const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale'; destroyConnection(this, connection, reason); } + + processWaitQueue(this); } /** @@ -260,8 +227,16 @@ class ConnectionPool extends EventEmitter { // immediately cancel any in-flight connections this[kCancellationToken].emit('cancel'); - // drain and clear all timers - drainTimerQueue(this[kAcquireTimers]); + // drain the wait queue + while (this[kWaitQueue].length) { + const waitQueueMember = this[kWaitQueue].pop(); + clearTimeout(waitQueueMember.timer); + if (!waitQueueMember[kCancelled]) { + waitQueueMember.callback(new MongoError('connection pool closed')); + } + } + + // clear the min pool size timer if (this[kMinPoolSizeTimer]) { clearTimeout(this[kMinPoolSizeTimer]); } @@ -382,21 +357,66 @@ function createConnection(pool, callback) { pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); - pool[kConnections].push(connection); connection.markAvailable(); pool.emit('connectionReady', new ConnectionReadyEvent(pool, connection)); + // if a callback has been provided, check out the connection immediately if (typeof callback === 'function') { - callback(null, connection); + pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); + callback(undefined, connection); + return; } + + // otherwise add it to the pool for later acquisition, and try to process the wait queue + pool[kConnections].push(connection); + processWaitQueue(pool); }); } function destroyConnection(pool, connection, reason) { pool.emit('connectionClosed', new ConnectionClosedEvent(pool, connection, reason)); + + // allow more connections to be created + pool[kPermits]++; + + // destroy the connection process.nextTick(() => connection.destroy()); } +function processWaitQueue(pool) { + if (pool.closed) { + return; + } + + while (pool[kWaitQueue].length && pool.availableConnectionCount) { + const waitQueueMember = pool[kWaitQueue].peekFront(); + if (waitQueueMember[kCancelled]) { + pool[kWaitQueue].shift(); + continue; + } + + const connection = pool[kConnections].shift(); + const isStale = connectionIsStale(pool, connection); + const isIdle = connectionIsIdle(pool, connection); + if (!isStale && !isIdle && !connection.closed) { + pool.emit('connectionCheckedOut', new ConnectionCheckedOutEvent(pool, connection)); + clearTimeout(waitQueueMember.timer); + pool[kWaitQueue].shift(); + waitQueueMember.callback(undefined, connection); + return; + } + + const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle'; + destroyConnection(pool, connection, reason); + } + + const maxPoolSize = pool.options.maxPoolSize; + if (pool[kWaitQueue].length && (maxPoolSize <= 0 || pool.totalConnectionCount < maxPoolSize)) { + createConnection(pool); + return; + } +} + /** * A callback provided to `withConnection` * diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json index ee7cf279552..c489be36a19 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -4,7 +4,7 @@ "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 20 + "waitQueueTimeoutMS": 50 }, "operations": [ { diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml index eba4ab638da..bc3fd20c21d 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -3,7 +3,7 @@ style: unit description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS poolOptions: maxPoolSize: 1 - waitQueueTimeoutMS: 20 + waitQueueTimeoutMS: 50 operations: # Check out only possible connection - name: checkOut diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 081d94a1a64..2a67f31089d 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -76,15 +76,18 @@ describe('Connection Pool', function() { expect(closeEvent) .have.property('reason') .equal('error'); - - pool.close(done); }); }); - pool.withConnection((err, conn, cb) => { - expect(err).to.not.exist; - cb(); - }); + pool.withConnection( + (err, conn, cb) => { + expect(err).to.not.exist; + cb(); + }, + () => { + pool.close(done); + } + ); }); it('should propagate socket timeouts to connections', function(done) { @@ -98,7 +101,7 @@ describe('Connection Pool', function() { }); const pool = new ConnectionPool( - Object.assign({ bson: new BSON(), maxPoolSize: 1, socketTimeout: 500 }, server.address()) + Object.assign({ bson: new BSON(), maxPoolSize: 1, socketTimeout: 50 }, server.address()) ); pool.withConnection( @@ -150,7 +153,7 @@ describe('Connection Pool', function() { }); const pool = new ConnectionPool( - Object.assign({ bson: new BSON(), waitQueueTimeoutMS: 250 }, server.address()) + Object.assign({ bson: new BSON(), waitQueueTimeoutMS: 200 }, server.address()) ); const callback = err => { @@ -328,6 +331,7 @@ describe('Connection Pool', function() { if (this._killed || this._error) { return; } + this._promise = this._promise .then(() => this._runOperation(op)) .catch(e => (this._error = e)); From cca5b490ae152dd2e00f900b7476ca49502ebd95 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 26 Dec 2019 11:19:41 -0500 Subject: [PATCH 043/130] fix: ensure sync errors are thrown, and don't callback twice --- lib/core/utils.js | 31 +++++++++++++++---------------- test/unit/utils.test.js | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 16 deletions(-) create mode 100644 test/unit/utils.test.js diff --git a/lib/core/utils.js b/lib/core/utils.js index 5abd467a3ec..e32fc501d40 100644 --- a/lib/core/utils.js +++ b/lib/core/utils.js @@ -131,30 +131,29 @@ function isPromiseLike(maybePromise) { * @param {function} callback The callback called after every item has been iterated */ function eachAsync(arr, eachFn, callback) { - if (arr.length === 0) { - callback(null); + arr = arr || []; + + let idx = 0; + let awaiting = 0; + for (idx = 0; idx < arr.length; ++idx) { + awaiting++; + eachFn(arr[idx], eachCallback); + } + + if (awaiting === 0) { + callback(); return; } - const length = arr.length; - let completed = 0; function eachCallback(err) { + awaiting--; if (err) { - callback(err, null); + callback(err); return; } - if (++completed === length) { - callback(null); - } - } - - for (let idx = 0; idx < length; ++idx) { - try { - eachFn(arr[idx], eachCallback); - } catch (err) { - callback(err); - return; + if (idx === arr.length && awaiting <= 0) { + callback(); } } } diff --git a/test/unit/utils.test.js b/test/unit/utils.test.js new file mode 100644 index 00000000000..367e43625b7 --- /dev/null +++ b/test/unit/utils.test.js @@ -0,0 +1,36 @@ +'use strict'; +const eachAsync = require('../../lib/core/utils').eachAsync; +const expect = require('chai').expect; + +describe('utils', function() { + describe('eachAsync', function() { + it('should callback with an error', function(done) { + eachAsync( + [{ error: false }, { error: true }], + (item, cb) => { + cb(item.error ? new Error('error requested') : null); + }, + err => { + expect(err).to.exist; + done(); + } + ); + }); + + it('should propagate a synchronously thrown error', function(done) { + expect(() => + eachAsync( + [{}], + () => { + throw new Error('something wicked'); + }, + err => { + expect(err).to.not.exist; + done(err); + } + ) + ).to.throw(/something wicked/); + done(); + }); + }); +}); From 0a2d4e9e62c689942c91e16797f9d5570351c65d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 26 Dec 2019 16:07:11 -0500 Subject: [PATCH 044/130] fix(execute-operation): don't swallow synchronous errors This simplifies the logic for `executeOperation` such that errors are not swallowed accidentally when they are thrown synchronously. NODE-2400 --- lib/operations/execute_operation.js | 71 ++++++++++------------- lib/operations/find_one.js | 22 ++++--- test/functional/operation_example.test.js | 5 +- 3 files changed, 46 insertions(+), 52 deletions(-) diff --git a/lib/operations/execute_operation.js b/lib/operations/execute_operation.js index 77aa903a966..da487279e24 100644 --- a/lib/operations/execute_operation.js +++ b/lib/operations/execute_operation.js @@ -53,54 +53,45 @@ function executeOperation(topology, operation, callback) { } } - const makeExecuteCallback = (resolve, reject) => - function executeCallback(err, result) { - if (session && session.owner === owner) { - session.endSession(() => { - if (operation.session === session) { - operation.clearSession(); - } - if (err) return reject(err); - resolve(result); - }); - } else { + let result; + if (typeof callback !== 'function') { + result = new Promise((resolve, reject) => { + callback = (err, res) => { if (err) return reject(err); - resolve(result); - } - }; - - // Execute using callback - if (typeof callback === 'function') { - const handler = makeExecuteCallback( - result => callback(null, result), - err => callback(err, null) - ); + resolve(res); + }; + }); + } - try { - if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { - return executeWithServerSelection(topology, operation, handler); - } else { - return operation.execute(handler); + function executeCallback(err, result) { + if (session && session.owner === owner) { + session.endSession(); + if (operation.session === session) { + operation.clearSession(); } - } catch (e) { - handler(e); - throw e; } - } - return new Promise(function(resolve, reject) { - const handler = makeExecuteCallback(resolve, reject); + callback(err, result); + } - try { - if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { - return executeWithServerSelection(topology, operation, handler); - } else { - return operation.execute(handler); + try { + if (operation.hasAspect(Aspect.EXECUTE_WITH_SELECTION)) { + executeWithServerSelection(topology, operation, executeCallback); + } else { + operation.execute(executeCallback); + } + } catch (e) { + if (session && session.owner === owner) { + session.endSession(); + if (operation.session === session) { + operation.clearSession(); } - } catch (e) { - handler(e); } - }); + + throw e; + } + + return result; } function supportsRetryableReads(server) { diff --git a/lib/operations/find_one.js b/lib/operations/find_one.js index d3037a6dbfa..b584db643d9 100644 --- a/lib/operations/find_one.js +++ b/lib/operations/find_one.js @@ -17,16 +17,20 @@ class FindOneOperation extends OperationBase { const query = this.query; const options = this.options; - const cursor = coll - .find(query, options) - .limit(-1) - .batchSize(1); + try { + const cursor = coll + .find(query, options) + .limit(-1) + .batchSize(1); - // Return the item - cursor.next((err, item) => { - if (err != null) return handleCallback(callback, toError(err), null); - handleCallback(callback, null, item); - }); + // Return the item + cursor.next((err, item) => { + if (err != null) return handleCallback(callback, toError(err), null); + handleCallback(callback, null, item); + }); + } catch (e) { + callback(e); + } } } diff --git a/test/functional/operation_example.test.js b/test/functional/operation_example.test.js index 95939260ff2..45b89d3b5b2 100644 --- a/test/functional/operation_example.test.js +++ b/test/functional/operation_example.test.js @@ -3718,7 +3718,7 @@ describe('Operation Examples', function() { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { - test.equal(null, err); + expect(err).to.not.exist; // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -3732,8 +3732,7 @@ describe('Operation Examples', function() { // BEGIN // Close the connection with a callback that is optional client.close(function(err) { - test.equal(null, err); - + expect(err).to.not.exist; done(); }); }); From 7d26a3348c2005be0c53530408e51c3d3ee2186d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 09:08:48 -0500 Subject: [PATCH 045/130] refactor: ensure `endSessions` is sent before topology is closed While its true that we don't want to take action based on the response sent for an `endSessions` command, we do still want to ensure that the command has been sent before the topology begins its close process. --- lib/core/sdam/topology.js | 42 +++++++++++++++++++-------------------- lib/core/sessions.js | 19 +++++++++++++++--- lib/operations/connect.js | 8 ++++---- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 0863c7c164e..1c958ddcdfe 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -24,6 +24,7 @@ const getMMAPError = require('../topologies/shared').getMMAPError; const makeStateMachine = require('../utils').makeStateMachine; const eachAsync = require('../utils').eachAsync; const emitDeprecationWarning = require('../../utils').emitDeprecationWarning; +const ServerSessionPool = require('../sessions').ServerSessionPool; const common = require('./common'); const drainTimerQueue = common.drainTimerQueue; @@ -182,7 +183,7 @@ class Topology extends EventEmitter { // a map of server instances to normalized addresses servers: new Map(), // Server Session Pool - sessionPool: null, + sessionPool: new ServerSessionPool(this), // Active client sessions sessions: new Set(), // Promise library @@ -352,11 +353,6 @@ class Topology extends EventEmitter { drainTimerQueue(this.s.iterationTimers); drainTimerQueue(this.s.connectionTimers); - if (this.s.sessionPool) { - this.s.sessions.forEach(session => session.endSession()); - this.s.sessionPool.endAllPooledSessions(); - } - if (this.s.srvPoller) { this.s.srvPoller.stop(); if (this.s.handleSrvPolling) { @@ -370,26 +366,28 @@ class Topology extends EventEmitter { delete this.s.detectTopologyDescriptionChange; } - // defer state transition because we may need to send an `endSessions` command above stateTransition(this, STATE_CLOSING); - eachAsync( - Array.from(this.s.servers.values()), - (server, cb) => destroyServer(server, this, options, cb), - () => { - this.s.servers.clear(); + this.s.sessions.forEach(session => session.endSession()); + this.s.sessionPool.endAllPooledSessions(() => { + eachAsync( + Array.from(this.s.servers.values()), + (server, cb) => destroyServer(server, this, options, cb), + err => { + this.s.servers.clear(); - // emit an event for close - this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id)); + // emit an event for close + this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id)); - stateTransition(this, STATE_CLOSED); - this.emit('close'); + stateTransition(this, STATE_CLOSED); + this.emit('close'); - if (typeof callback === 'function') { - callback(); + if (typeof callback === 'function') { + callback(err); + } } - } - ); + ); + }); } /** @@ -791,7 +789,9 @@ function destroyServer(server, topology, options, callback) { new monitoring.ServerClosedEvent(topology.s.id, server.description.address) ); - if (typeof callback === 'function') callback(null, null); + if (typeof callback === 'function') { + callback(); + } }); } diff --git a/lib/core/sessions.js b/lib/core/sessions.js index 13576a46cc9..fcd3384645e 100644 --- a/lib/core/sessions.js +++ b/lib/core/sessions.js @@ -588,10 +588,23 @@ class ServerSessionPool { * Ends all sessions in the session pool. * @ignore */ - endAllPooledSessions() { + endAllPooledSessions(callback) { if (this.sessions.length) { - this.topology.endSessions(this.sessions.map(session => session.id)); - this.sessions = []; + this.topology.endSessions( + this.sessions.map(session => session.id), + () => { + this.sessions = []; + if (typeof callback === 'function') { + callback(); + } + } + ); + + return; + } + + if (typeof callback === 'function') { + callback(); } } diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 172c099102c..2f6f6f14415 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -221,10 +221,10 @@ function addListeners(mongoClient, topology) { function assignTopology(client, topology) { client.topology = topology; - topology.s.sessionPool = - topology instanceof NativeTopology - ? new ServerSessionPool(topology) - : new ServerSessionPool(topology.s.coreTopology); + + if (!(topology instanceof NativeTopology)) { + topology.s.sessionPool = new ServerSessionPool(topology.s.coreTopology); + } } // Clear out all events From 81e8897973f4562c96d6da2dfac44823ad237b5d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:48:06 -0500 Subject: [PATCH 046/130] test: fix flakey change stream example, only listen for event once --- test/examples/change_streams.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/examples/change_streams.js b/test/examples/change_streams.js index 95221f4afa2..2e1eed9ff02 100644 --- a/test/examples/change_streams.js +++ b/test/examples/change_streams.js @@ -129,7 +129,7 @@ describe('examples(change-stream):', function() { const changeStream = collection.watch(); let newChangeStream; - changeStream.on('change', next => { + changeStream.once('change', next => { const resumeToken = changeStream.resumeToken; changeStream.close(); From cd0e09c44f50c337f57eae63137351a0ccf95633 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:48:31 -0500 Subject: [PATCH 047/130] test: ensure output db exists for sharded clusters --- test/functional/mapreduce.test.js | 74 ++++++++++++++++++------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/test/functional/mapreduce.test.js b/test/functional/mapreduce.test.js index 1991d3e96c4..402dc088aff 100644 --- a/test/functional/mapreduce.test.js +++ b/test/functional/mapreduce.test.js @@ -364,44 +364,54 @@ describe('MapReduce', function() { var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); + const outDb = client.db('outputCollectionDb'); // Create a test collection db.createCollection('test_map_reduce_functions', function(err, collection) { - // Insert some documents to perform map reduce over - collection.insert( - [{ user_id: 1 }, { user_id: 2 }], - configuration.writeConcernMax(), - function(err) { - test.equal(null, err); - // Map function - var map = function() { - emit(this.user_id, 1); // eslint-disable-line - }; - // Reduce function - var reduce = function() { - return 1; - }; - - // Perform the map reduce - collection.mapReduce( - map, - reduce, - { out: { replace: 'tempCollection', db: 'outputCollectionDb' } }, - function(err, collection) { - // Mapreduce returns the temporary collection with the results - collection.findOne({ _id: 1 }, function(err, result) { - test.equal(1, result.value); - - collection.findOne({ _id: 2 }, function(err, result) { + // create the output collection + outDb.createCollection('tempCollection', err => { + test.equal(null, err); + + // Insert some documents to perform map reduce over + collection.insert( + [{ user_id: 1 }, { user_id: 2 }], + configuration.writeConcernMax(), + function(err) { + test.equal(null, err); + // Map function + var map = function() { + emit(this.user_id, 1); // eslint-disable-line + }; + // Reduce function + var reduce = function() { + return 1; + }; + + // Perform the map reduce + collection.mapReduce( + map, + reduce, + { out: { replace: 'tempCollection', db: 'outputCollectionDb' } }, + function(err, collection) { + test.equal(null, err); + + // Mapreduce returns the temporary collection with the results + collection.findOne({ _id: 1 }, function(err, result) { + test.equal(null, err); test.equal(1, result.value); - client.close(done); + collection.findOne({ _id: 2 }, function(err, result) { + test.equal(null, err); + test.equal(1, result.value); + + client.close(done); + }); }); - }); - } - ); - } - ); + } + ); + } + ); + }); }); }); } From 8a456db1456b9029793ef1573e67162d766af59e Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:48:46 -0500 Subject: [PATCH 048/130] test: increase wait queue timeout due to mock server latency --- .../connection-monitoring-and-pooling/wait-queue-timeout.json | 2 +- .../connection-monitoring-and-pooling/wait-queue-timeout.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json index c489be36a19..f28d69f61c3 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.json @@ -4,7 +4,7 @@ "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 50 + "waitQueueTimeoutMS": 100 }, "operations": [ { diff --git a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml index bc3fd20c21d..1a98cba8359 100644 --- a/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml +++ b/test/spec/connection-monitoring-and-pooling/wait-queue-timeout.yml @@ -3,7 +3,7 @@ style: unit description: must aggressively timeout threads enqueued longer than waitQueueTimeoutMS poolOptions: maxPoolSize: 1 - waitQueueTimeoutMS: 50 + waitQueueTimeoutMS: 100 operations: # Check out only possible connection - name: checkOut From b8805dc045392cd1193b556a695dabd7b81d4ba4 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 09:31:53 -0500 Subject: [PATCH 049/130] fix: ignore connection errors during pool destruction There are some issues with older versions of node where errors can be emitted after `destroy` is called (e.g. EUNREACH, ECONNRESET). We want to ignore these during pool close. --- lib/core/connection/pool.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/core/connection/pool.js b/lib/core/connection/pool.js index 000dbeb01ed..3fcec67ed7a 100644 --- a/lib/core/connection/pool.js +++ b/lib/core/connection/pool.js @@ -641,6 +641,9 @@ function destroy(self, connections, options, callback) { conn.removeAllListeners(eventName); } + // ignore any errors during destruction + conn.on('error', () => {}); + conn.destroy(options, cb); }, err => { From abe0ff974857263a429398b033a671514fad5c35 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 10:35:04 -0500 Subject: [PATCH 050/130] refactor: do stricter equality checks for server errors The old ServerDescription comparison operator was checking whether incoming errors were the same object as the existing one, a condition that would always be false. This change introduces a method for more strictly comparing two errors. --- lib/core/sdam/server_description.js | 3 +- lib/core/utils.js | 21 +++++++++++ test/unit/sdam/server_description.test.js | 44 +++++++++++++++++++++++ 3 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 test/unit/sdam/server_description.test.js diff --git a/lib/core/sdam/server_description.js b/lib/core/sdam/server_description.js index fe528814a09..cffed504ece 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/core/sdam/server_description.js @@ -2,6 +2,7 @@ const arrayStrictEqual = require('../utils').arrayStrictEqual; const tagsStrictEqual = require('../utils').tagsStrictEqual; +const errorStrictEqual = require('../utils').errorStrictEqual; const ServerType = require('./common').ServerType; const WRITABLE_SERVER_TYPES = new Set([ @@ -121,7 +122,7 @@ class ServerDescription { equals(other) { return ( other != null && - this.error === other.error && + errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && this.me === other.me && diff --git a/lib/core/utils.js b/lib/core/utils.js index e32fc501d40..9e71f09f473 100644 --- a/lib/core/utils.js +++ b/lib/core/utils.js @@ -176,6 +176,26 @@ function tagsStrictEqual(tags, tags2) { return tagsKeys.length === tags2Keys.length && tagsKeys.every(key => tags2[key] === tags[key]); } +function errorStrictEqual(lhs, rhs) { + if (lhs === rhs) { + return true; + } + + if ((lhs == null && rhs != null) || (lhs != null && rhs == null)) { + return false; + } + + if (lhs.constructor.name !== rhs.constructor.name) { + return false; + } + + if (lhs.message !== rhs.message) { + return false; + } + + return true; +} + function makeStateMachine(stateTable) { return function stateTransition(target, newState) { const legalStates = stateTable[target.s.state]; @@ -203,5 +223,6 @@ module.exports = { isUnifiedTopology, arrayStrictEqual, tagsStrictEqual, + errorStrictEqual, makeStateMachine }; diff --git a/test/unit/sdam/server_description.test.js b/test/unit/sdam/server_description.test.js new file mode 100644 index 00000000000..30de1592691 --- /dev/null +++ b/test/unit/sdam/server_description.test.js @@ -0,0 +1,44 @@ +'use strict'; +const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; +const expect = require('chai').expect; + +describe('ServerDescription', function() { + describe('error equality', function() { + [ + { + description: 'equal error types and messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: true + }, + { + description: 'equal error types and unequal messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('blah') }), + equal: false + }, + { + description: 'unequal error types and equal messages', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new TypeError('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: false + }, + { + description: 'null lhs', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: null }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: new Error('test') }), + equal: false + }, + { + description: 'null rhs', + lhs: new ServerDescription('127.0.0.1:27017', null, { error: new TypeError('test') }), + rhs: new ServerDescription('127.0.0.1:27017', null, { error: undefined }), + equal: false + } + ].forEach(test => { + it(test.description, function() { + expect(test.lhs.equals(test.rhs)).to.equal(test.equal); + }); + }); + }); +}); From 8c89b891bd1db7570547edffa7e30e092577d130 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 10:50:41 -0500 Subject: [PATCH 051/130] refactor: don't store errors directly on `TopologyDescription` Propagating and storing an error on the `TopologyDescription` has the unintended side-effect of never removing said error, thus effectively poisoning the description in the event of failure recovery. --- lib/core/sdam/topology_description.js | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/core/sdam/topology_description.js b/lib/core/sdam/topology_description.js index 51b4ecde9ad..ba6a2507ee6 100644 --- a/lib/core/sdam/topology_description.js +++ b/lib/core/sdam/topology_description.js @@ -28,8 +28,7 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - options, - error + options ) { options = options || {}; @@ -47,7 +46,6 @@ class TopologyDescription { this.logicalSessionTimeoutMinutes = null; this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0; this.localThresholdMS = options.localThresholdMS || 0; - this.error = error; this.commonWireVersion = commonWireVersion || null; // save this locally, but don't display when printing the instance out @@ -133,7 +131,6 @@ class TopologyDescription { let maxSetVersion = this.maxSetVersion; let maxElectionId = this.maxElectionId; let commonWireVersion = this.commonWireVersion; - let error = serverDescription.error || this.error; const serverType = serverDescription.type; let serverDescriptions = new Map(this.servers); @@ -159,8 +156,7 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - this.options, - error + this.options ); } @@ -241,11 +237,17 @@ class TopologyDescription { maxSetVersion, maxElectionId, commonWireVersion, - this.options, - error + this.options ); } + get error() { + const descriptionsWithError = Array.from(this.servers.values()).filter(sd => sd.error); + if (descriptionsWithError.length > 0) { + return descriptionsWithError[0].error; + } + } + /** * Determines if the topology description has any known servers */ From aee8f57bc219391c627a4e8d7189be4649580e57 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 12:50:26 -0500 Subject: [PATCH 052/130] refactor: improve error message for MongoTimeoutErrors We will now surface the first encountered error in the set of known server descriptions as the primary error message for a timeout error. The `reason` field for server selection errors will now include the `TopologyDescription` for better traceability NODE-2397 --- lib/core/error.js | 9 +++++++-- lib/core/sdam/server_selection.js | 4 ++-- test/functional/core/server.test.js | 3 +-- test/functional/scram_sha_256.test.js | 10 ++-------- test/unit/sdam/server_selection/select_servers.test.js | 4 ++-- 5 files changed, 14 insertions(+), 16 deletions(-) diff --git a/lib/core/error.js b/lib/core/error.js index 200d8a11267..e894a80bef4 100644 --- a/lib/core/error.js +++ b/lib/core/error.js @@ -95,9 +95,14 @@ class MongoParseError extends MongoError { */ class MongoTimeoutError extends MongoError { constructor(message, reason) { - super(message); + if (reason && reason.error) { + super(reason.error); + } else { + super(message); + } + this.name = 'MongoTimeoutError'; - if (reason != null) { + if (reason) { this.reason = reason; } } diff --git a/lib/core/sdam/server_selection.js b/lib/core/sdam/server_selection.js index bb01b1d6a98..996aca00fd2 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/core/sdam/server_selection.js @@ -261,7 +261,7 @@ function selectServers(topology, selector, timeout, start, callback) { if (duration >= timeout) { return callback( new MongoTimeoutError(`Server selection timed out after ${timeout} ms`), - topology.description.error + topology.description ); } @@ -308,7 +308,7 @@ function selectServers(topology, selector, timeout, start, callback) { callback( new MongoTimeoutError( `Server selection timed out after ${timeout} ms`, - topology.description.error + topology.description ) ); }, timeout - duration); diff --git a/test/functional/core/server.test.js b/test/functional/core/server.test.js index 0c2a8e2aeb7..c758ad4279f 100644 --- a/test/functional/core/server.test.js +++ b/test/functional/core/server.test.js @@ -1002,8 +1002,7 @@ describe('Server tests', function() { let err; try { expect(error).to.be.an.instanceOf(Error); - const errorMessage = error.reason ? error.reason.message : error.message; - expect(errorMessage).to.match(/but this version of the Node.js Driver requires/); + expect(error).to.match(/but this version of the Node.js Driver requires/); } catch (e) { err = e; } diff --git a/test/functional/scram_sha_256.test.js b/test/functional/scram_sha_256.test.js index 36d3d41eb7b..76edef5a503 100644 --- a/test/functional/scram_sha_256.test.js +++ b/test/functional/scram_sha_256.test.js @@ -186,10 +186,7 @@ describe('SCRAM-SHA-256 auth', function() { return withClient( this.configuration.newClient({}, options), () => Promise.reject(new Error('This request should have failed to authenticate')), - err => { - const errMessage = err.reason ? err.reason.message : err; - expect(errMessage).to.match(/Authentication failed/); - } + err => expect(err).to.match(/Authentication failed/) ); } }); @@ -223,10 +220,7 @@ describe('SCRAM-SHA-256 auth', function() { withClient( this.configuration.newClient({}, options), () => Promise.reject(new Error('This request should have failed to authenticate')), - err => { - const errMessage = err.reason ? err.reason.message : err; - expect(errMessage).to.match(/Authentication failed/); - } + err => expect(err).to.match(/Authentication failed/) ); return Promise.all([getErrorMsg(noUsernameOptions), getErrorMsg(badPasswordOptions)]); diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index fe55235545b..9561b389604 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -35,7 +35,7 @@ describe('selectServers', function() { selectServers(topology, ReadPreference.primary, 500, process.hrtime(), err => { expect(err).to.exist; expect(err).to.match(/Server selection timed out/); - expect(err).to.not.have.property('reason'); + expect(err).to.have.property('reason'); done(); }); @@ -60,7 +60,7 @@ describe('selectServers', function() { selectServers(topology, ReadPreference.primary, 1000, process.hrtime(), err => { expect(err).to.exist; expect(err).to.match(/Server selection timed out/); - expect(err).to.not.have.property('reason'); + expect(err).to.have.property('reason'); // expect a call to monitor for initial server creation, and another for the server selection expect(serverMonitor) From 0cf7ec9811ffa9004122f6e1128aed5aba0ad2c4 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 10:13:57 -0500 Subject: [PATCH 053/130] feat: introduce `MongoServerSelectionError` This error type subclasses the `MongoTimeout` error, but provides more context that the error was specific to an issue with server selection. --- index.js | 1 + lib/core/error.js | 19 ++++++++++++++++++- lib/core/index.js | 1 + lib/core/sdam/server_selection.js | 6 +++--- test/unit/sdam/server_selection/spec.test.js | 6 +++--- 5 files changed, 26 insertions(+), 7 deletions(-) diff --git a/index.js b/index.js index 51d06a765e8..4e9e6359e86 100644 --- a/index.js +++ b/index.js @@ -11,6 +11,7 @@ const connect = require('./lib/mongo_client').connect; connect.MongoError = core.MongoError; connect.MongoNetworkError = core.MongoNetworkError; connect.MongoTimeoutError = core.MongoTimeoutError; +connect.MongoServerSelectionError = core.MongoServerSelectionError; connect.MongoParseError = core.MongoParseError; connect.MongoWriteConcernError = core.MongoWriteConcernError; connect.MongoBulkWriteError = require('./lib/bulk/common').BulkWriteError; diff --git a/lib/core/error.js b/lib/core/error.js index e894a80bef4..fd1eaf7b4c1 100644 --- a/lib/core/error.js +++ b/lib/core/error.js @@ -96,7 +96,7 @@ class MongoParseError extends MongoError { class MongoTimeoutError extends MongoError { constructor(message, reason) { if (reason && reason.error) { - super(reason.error); + super(reason.error.message || reason.error); } else { super(message); } @@ -108,6 +108,22 @@ class MongoTimeoutError extends MongoError { } } +/** + * An error signifying a client-side server selection error + * + * @param {Error|string|object} message The error message + * @param {string|object} [reason] The reason the timeout occured + * @property {string} message The error message + * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers + * @extends MongoError + */ +class MongoServerSelectionError extends MongoTimeoutError { + constructor(message, reason) { + super(message, reason); + this.name = 'MongoServerSelectionError'; + } +} + function makeWriteConcernResultObject(input) { const output = Object.assign({}, input); @@ -246,6 +262,7 @@ module.exports = { MongoNetworkError, MongoParseError, MongoTimeoutError, + MongoServerSelectionError, MongoWriteConcernError, mongoErrorContextSymbol, isRetryableError, diff --git a/lib/core/index.js b/lib/core/index.js index a7f80738d9c..2da5573a47c 100644 --- a/lib/core/index.js +++ b/lib/core/index.js @@ -20,6 +20,7 @@ module.exports = { MongoNetworkError: require('./error').MongoNetworkError, MongoParseError: require('./error').MongoParseError, MongoTimeoutError: require('./error').MongoTimeoutError, + MongoServerSelectionError: require('./error').MongoServerSelectionError, MongoWriteConcernError: require('./error').MongoWriteConcernError, mongoErrorContextSymbol: require('./error').mongoErrorContextSymbol, // Core diff --git a/lib/core/sdam/server_selection.js b/lib/core/sdam/server_selection.js index 996aca00fd2..d658fc58769 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/core/sdam/server_selection.js @@ -4,7 +4,7 @@ const TopologyType = require('./common').TopologyType; const ReadPreference = require('../topologies/read_preference'); const MongoError = require('../error').MongoError; const calculateDurationInMs = require('../utils').calculateDurationInMs; -const MongoTimeoutError = require('../error').MongoTimeoutError; +const MongoServerSelectionError = require('../error').MongoServerSelectionError; const common = require('./common'); const STATE_CLOSED = common.STATE_CLOSED; @@ -260,7 +260,7 @@ function selectServers(topology, selector, timeout, start, callback) { const duration = calculateDurationInMs(start); if (duration >= timeout) { return callback( - new MongoTimeoutError(`Server selection timed out after ${timeout} ms`), + new MongoServerSelectionError(`Server selection timed out after ${timeout} ms`), topology.description ); } @@ -306,7 +306,7 @@ function selectServers(topology, selector, timeout, start, callback) { const iterationTimer = setTimeout(() => { topology.removeListener('topologyDescriptionChanged', descriptionChangedHandler); callback( - new MongoTimeoutError( + new MongoServerSelectionError( `Server selection timed out after ${timeout} ms`, topology.description ) diff --git a/test/unit/sdam/server_selection/spec.test.js b/test/unit/sdam/server_selection/spec.test.js index a80fa2b11b6..5d5fb621eb1 100644 --- a/test/unit/sdam/server_selection/spec.test.js +++ b/test/unit/sdam/server_selection/spec.test.js @@ -3,7 +3,7 @@ const path = require('path'); const fs = require('fs'); const core = require('../../../../lib/core'); const Topology = core.Topology; -const MongoTimeoutError = core.MongoTimeoutError; +const MongoServerSelectionError = core.MongoServerSelectionError; const ReadPreference = core.ReadPreference; // TODO: these should be from `core` when legacy topologies are removed @@ -275,7 +275,7 @@ function executeServerSelectionTest(testDefinition, options, testDone) { } // default to serverSelectionTimeoutMS of `100` for unit tests - topology.selectServer(selector, { serverSelectionTimeoutMS: 100 }, (err, server) => { + topology.selectServer(selector, { serverSelectionTimeoutMS: 50 }, (err, server) => { // are we expecting an error? if (testDefinition.error) { if (!err) { @@ -287,7 +287,7 @@ function executeServerSelectionTest(testDefinition, options, testDone) { if (err) { // this is another expected error case - if (expectedServers.length === 0 && err instanceof MongoTimeoutError) return done(); + if (expectedServers.length === 0 && err instanceof MongoServerSelectionError) return done(); return done(err); } From b286e7d95178fd70de2a9c2c74461a5371a8e73d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 16 Dec 2019 08:48:46 -0500 Subject: [PATCH 054/130] refactor: move all SDAM events into a common file --- lib/core/sdam/events.js | 124 +++++++++++++++ lib/core/sdam/monitor.js | 126 +++++++++++++++ lib/core/sdam/monitoring.js | 241 ----------------------------- lib/core/sdam/topology.js | 18 +-- test/unit/core/sdam_spec.test.js | 4 +- test/unit/sdam/srv_polling.test.js | 4 +- 6 files changed, 263 insertions(+), 254 deletions(-) create mode 100644 lib/core/sdam/events.js create mode 100644 lib/core/sdam/monitor.js delete mode 100644 lib/core/sdam/monitoring.js diff --git a/lib/core/sdam/events.js b/lib/core/sdam/events.js new file mode 100644 index 00000000000..08a14adca54 --- /dev/null +++ b/lib/core/sdam/events.js @@ -0,0 +1,124 @@ +'use strict'; + +/** + * Published when server description changes, but does NOT include changes to the RTT. + * + * @property {Object} topologyId A unique identifier for the topology + * @property {ServerAddress} address The address (host/port pair) of the server + * @property {ServerDescription} previousDescription The previous server description + * @property {ServerDescription} newDescription The new server description + */ +class ServerDescriptionChangedEvent { + constructor(topologyId, address, previousDescription, newDescription) { + Object.assign(this, { topologyId, address, previousDescription, newDescription }); + } +} + +/** + * Published when server is initialized. + * + * @property {Object} topologyId A unique identifier for the topology + * @property {ServerAddress} address The address (host/port pair) of the server + */ +class ServerOpeningEvent { + constructor(topologyId, address) { + Object.assign(this, { topologyId, address }); + } +} + +/** + * Published when server is closed. + * + * @property {ServerAddress} address The address (host/port pair) of the server + * @property {Object} topologyId A unique identifier for the topology + */ +class ServerClosedEvent { + constructor(topologyId, address) { + Object.assign(this, { topologyId, address }); + } +} + +/** + * Published when topology description changes. + * + * @property {Object} topologyId + * @property {TopologyDescription} previousDescription The old topology description + * @property {TopologyDescription} newDescription The new topology description + */ +class TopologyDescriptionChangedEvent { + constructor(topologyId, previousDescription, newDescription) { + Object.assign(this, { topologyId, previousDescription, newDescription }); + } +} + +/** + * Published when topology is initialized. + * + * @param {Object} topologyId A unique identifier for the topology + */ +class TopologyOpeningEvent { + constructor(topologyId) { + Object.assign(this, { topologyId }); + } +} + +/** + * Published when topology is closed. + * + * @param {Object} topologyId A unique identifier for the topology + */ +class TopologyClosedEvent { + constructor(topologyId) { + Object.assign(this, { topologyId }); + } +} + +/** + * Fired when the server monitor’s ismaster command is started - immediately before + * the ismaster command is serialized into raw BSON and written to the socket. + * + * @property {Object} connectionId The connection id for the command + */ +class ServerHeartbeatStartedEvent { + constructor(connectionId) { + Object.assign(this, { connectionId }); + } +} + +/** + * Fired when the server monitor’s ismaster succeeds. + * + * @param {Number} duration The execution time of the event in ms + * @param {Object} reply The command reply + * @param {Object} connectionId The connection id for the command + */ +class ServerHeartbeatSucceededEvent { + constructor(duration, reply, connectionId) { + Object.assign(this, { connectionId, duration, reply }); + } +} + +/** + * Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception. + * + * @param {Number} duration The execution time of the event in ms + * @param {MongoError|Object} failure The command failure + * @param {Object} connectionId The connection id for the command + */ +class ServerHeartbeatFailedEvent { + constructor(duration, failure, connectionId) { + Object.assign(this, { connectionId, duration, failure }); + } +} + +module.exports = { + ServerDescriptionChangedEvent, + ServerOpeningEvent, + ServerClosedEvent, + TopologyDescriptionChangedEvent, + TopologyOpeningEvent, + TopologyClosedEvent, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerHeartbeatFailedEvent +}; diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js new file mode 100644 index 00000000000..0b19234ffee --- /dev/null +++ b/lib/core/sdam/monitor.js @@ -0,0 +1,126 @@ +'use strict'; + +const ServerDescription = require('./server_description').ServerDescription; +const calculateDurationInMs = require('../utils').calculateDurationInMs; + +const sdamEvents = require('./events'); +const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent; +const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent; +const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent; + +// pulled from `Server` implementation +const STATE_CLOSED = 'closed'; +const STATE_CLOSING = 'closing'; + +/** + * Performs a server check as described by the SDAM spec. + * + * NOTE: This method automatically reschedules itself, so that there is always an active + * monitoring process + * + * @param {Server} server The server to monitor + */ +function monitorServer(server, options) { + options = options || {}; + const heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; + + if (options.initial === true) { + server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); + return; + } + + const rescheduleMonitoring = () => { + server.s.monitoring = false; + server.s.monitorId = setTimeout(() => { + server.s.monitorId = undefined; + server.monitor(); + }, heartbeatFrequencyMS); + }; + + // executes a single check of a server + const checkServer = callback => { + let start = process.hrtime(); + + // emit a signal indicating we have started the heartbeat + server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name)); + + // NOTE: legacy monitoring event + process.nextTick(() => server.emit('monitoring', server)); + + server.command( + 'admin.$cmd', + { ismaster: true }, + { + monitoring: true, + socketTimeout: server.s.options.connectionTimeout || 2000 + }, + (err, result) => { + let duration = calculateDurationInMs(start); + + if (err) { + server.emit( + 'serverHeartbeatFailed', + new ServerHeartbeatFailedEvent(duration, err, server.name) + ); + + return callback(err, null); + } + + // save round trip time + server.description.roundTripTime = duration; + + const isMaster = result.result; + server.emit( + 'serverHeartbeatSucceeded', + new ServerHeartbeatSucceededEvent(duration, isMaster, server.name) + ); + + return callback(null, isMaster); + } + ); + }; + + const successHandler = isMaster => { + // emit an event indicating that our description has changed + server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster)); + if (server.s.state === STATE_CLOSED || server.s.state === STATE_CLOSING) { + return; + } + + rescheduleMonitoring(); + }; + + // run the actual monitoring loop + server.s.monitoring = true; + checkServer((err, isMaster) => { + if (!err) { + successHandler(isMaster); + return; + } + + // According to the SDAM specification's "Network error during server check" section, if + // an ismaster call fails we reset the server's pool. If a server was once connected, + // change its type to `Unknown` only after retrying once. + server.s.pool.reset(() => { + // otherwise re-attempt monitoring once + checkServer((error, isMaster) => { + if (error) { + // we revert to an `Unknown` by emitting a default description with no isMaster + server.emit( + 'descriptionReceived', + new ServerDescription(server.description.address, null, { error }) + ); + + rescheduleMonitoring(); + return; + } + + successHandler(isMaster); + }); + }); + }); +} + +module.exports = { + monitorServer +}; diff --git a/lib/core/sdam/monitoring.js b/lib/core/sdam/monitoring.js deleted file mode 100644 index 4cbf2f1730f..00000000000 --- a/lib/core/sdam/monitoring.js +++ /dev/null @@ -1,241 +0,0 @@ -'use strict'; - -const ServerDescription = require('./server_description').ServerDescription; -const calculateDurationInMs = require('../utils').calculateDurationInMs; - -// pulled from `Server` implementation -const STATE_CLOSED = 'closed'; -const STATE_CLOSING = 'closing'; - -/** - * Published when server description changes, but does NOT include changes to the RTT. - * - * @property {Object} topologyId A unique identifier for the topology - * @property {ServerAddress} address The address (host/port pair) of the server - * @property {ServerDescription} previousDescription The previous server description - * @property {ServerDescription} newDescription The new server description - */ -class ServerDescriptionChangedEvent { - constructor(topologyId, address, previousDescription, newDescription) { - Object.assign(this, { topologyId, address, previousDescription, newDescription }); - } -} - -/** - * Published when server is initialized. - * - * @property {Object} topologyId A unique identifier for the topology - * @property {ServerAddress} address The address (host/port pair) of the server - */ -class ServerOpeningEvent { - constructor(topologyId, address) { - Object.assign(this, { topologyId, address }); - } -} - -/** - * Published when server is closed. - * - * @property {ServerAddress} address The address (host/port pair) of the server - * @property {Object} topologyId A unique identifier for the topology - */ -class ServerClosedEvent { - constructor(topologyId, address) { - Object.assign(this, { topologyId, address }); - } -} - -/** - * Published when topology description changes. - * - * @property {Object} topologyId - * @property {TopologyDescription} previousDescription The old topology description - * @property {TopologyDescription} newDescription The new topology description - */ -class TopologyDescriptionChangedEvent { - constructor(topologyId, previousDescription, newDescription) { - Object.assign(this, { topologyId, previousDescription, newDescription }); - } -} - -/** - * Published when topology is initialized. - * - * @param {Object} topologyId A unique identifier for the topology - */ -class TopologyOpeningEvent { - constructor(topologyId) { - Object.assign(this, { topologyId }); - } -} - -/** - * Published when topology is closed. - * - * @param {Object} topologyId A unique identifier for the topology - */ -class TopologyClosedEvent { - constructor(topologyId) { - Object.assign(this, { topologyId }); - } -} - -/** - * Fired when the server monitor’s ismaster command is started - immediately before - * the ismaster command is serialized into raw BSON and written to the socket. - * - * @property {Object} connectionId The connection id for the command - */ -class ServerHeartbeatStartedEvent { - constructor(connectionId) { - Object.assign(this, { connectionId }); - } -} - -/** - * Fired when the server monitor’s ismaster succeeds. - * - * @param {Number} duration The execution time of the event in ms - * @param {Object} reply The command reply - * @param {Object} connectionId The connection id for the command - */ -class ServerHeartbeatSucceededEvent { - constructor(duration, reply, connectionId) { - Object.assign(this, { duration, reply, connectionId }); - } -} - -/** - * Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception. - * - * @param {Number} duration The execution time of the event in ms - * @param {MongoError|Object} failure The command failure - * @param {Object} connectionId The connection id for the command - */ -class ServerHeartbeatFailedEvent { - constructor(duration, failure, connectionId) { - Object.assign(this, { duration, failure, connectionId }); - } -} - -/** - * Performs a server check as described by the SDAM spec. - * - * NOTE: This method automatically reschedules itself, so that there is always an active - * monitoring process - * - * @param {Server} server The server to monitor - */ -function monitorServer(server, options) { - options = options || {}; - const heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; - - if (options.initial === true) { - server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); - return; - } - - const rescheduleMonitoring = () => { - server.s.monitoring = false; - server.s.monitorId = setTimeout(() => { - server.s.monitorId = undefined; - server.monitor(); - }, heartbeatFrequencyMS); - }; - - // executes a single check of a server - const checkServer = callback => { - let start = process.hrtime(); - - // emit a signal indicating we have started the heartbeat - server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name)); - - // NOTE: legacy monitoring event - process.nextTick(() => server.emit('monitoring', server)); - - server.command( - 'admin.$cmd', - { ismaster: true }, - { - monitoring: true, - socketTimeout: server.s.options.connectionTimeout || 2000 - }, - (err, result) => { - let duration = calculateDurationInMs(start); - - if (err) { - server.emit( - 'serverHeartbeatFailed', - new ServerHeartbeatFailedEvent(duration, err, server.name) - ); - - return callback(err, null); - } - - // save round trip time - server.description.roundTripTime = duration; - - const isMaster = result.result; - server.emit( - 'serverHeartbeatSucceeded', - new ServerHeartbeatSucceededEvent(duration, isMaster, server.name) - ); - - return callback(null, isMaster); - } - ); - }; - - const successHandler = isMaster => { - // emit an event indicating that our description has changed - server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster)); - if (server.s.state === STATE_CLOSED || server.s.state === STATE_CLOSING) { - return; - } - - rescheduleMonitoring(); - }; - - // run the actual monitoring loop - server.s.monitoring = true; - checkServer((err, isMaster) => { - if (!err) { - successHandler(isMaster); - return; - } - - // According to the SDAM specification's "Network error during server check" section, if - // an ismaster call fails we reset the server's pool. If a server was once connected, - // change its type to `Unknown` only after retrying once. - server.s.pool.reset(() => { - // otherwise re-attempt monitoring once - checkServer((error, isMaster) => { - if (error) { - // we revert to an `Unknown` by emitting a default description with no isMaster - server.emit( - 'descriptionReceived', - new ServerDescription(server.description.address, null, { error }) - ); - - rescheduleMonitoring(); - return; - } - - successHandler(isMaster); - }); - }); - }); -} - -module.exports = { - ServerDescriptionChangedEvent, - ServerOpeningEvent, - ServerClosedEvent, - TopologyDescriptionChangedEvent, - TopologyOpeningEvent, - TopologyClosedEvent, - ServerHeartbeatStartedEvent, - ServerHeartbeatSucceededEvent, - ServerHeartbeatFailedEvent, - monitorServer -}; diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 1c958ddcdfe..80b61974dc4 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -4,7 +4,7 @@ const ServerDescription = require('./server_description').ServerDescription; const ServerType = require('./common').ServerType; const TopologyDescription = require('./topology_description').TopologyDescription; const TopologyType = require('./common').TopologyType; -const monitoring = require('./monitoring'); +const events = require('./events'); const Server = require('./server').Server; const relayEvents = require('../utils').relayEvents; const ReadPreference = require('../topologies/read_preference'); @@ -272,12 +272,12 @@ class Topology extends EventEmitter { stateTransition(this, STATE_CONNECTING); // emit SDAM monitoring events - this.emit('topologyOpening', new monitoring.TopologyOpeningEvent(this.s.id)); + this.emit('topologyOpening', new events.TopologyOpeningEvent(this.s.id)); // emit an event for the topology change this.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( this.s.id, new TopologyDescription(TopologyType.Unknown), // initial is always Unknown this.s.description @@ -377,7 +377,7 @@ class Topology extends EventEmitter { this.s.servers.clear(); // emit an event for close - this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id)); + this.emit('topologyClosed', new events.TopologyClosedEvent(this.s.id)); stateTransition(this, STATE_CLOSED); this.emit('close'); @@ -548,7 +548,7 @@ class Topology extends EventEmitter { // emit monitoring events for this change this.emit( 'serverDescriptionChanged', - new monitoring.ServerDescriptionChangedEvent( + new events.ServerDescriptionChangedEvent( this.s.id, serverDescription.address, previousServerDescription, @@ -561,7 +561,7 @@ class Topology extends EventEmitter { this.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( this.s.id, previousTopologyDescription, this.s.description @@ -786,7 +786,7 @@ function destroyServer(server, topology, options, callback) { server.destroy(options, () => { topology.emit( 'serverClosed', - new monitoring.ServerClosedEvent(topology.s.id, server.description.address) + new events.ServerClosedEvent(topology.s.id, server.description.address) ); if (typeof callback === 'function') { @@ -821,7 +821,7 @@ function randomSelection(array) { function createAndConnectServer(topology, serverDescription, connectDelay) { topology.emit( 'serverOpening', - new monitoring.ServerOpeningEvent(topology.s.id, serverDescription.address) + new events.ServerOpeningEvent(topology.s.id, serverDescription.address) ); const server = new Server(serverDescription, topology.s.options, topology); @@ -1061,7 +1061,7 @@ function srvPollingHandler(topology) { topology.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent( + new events.TopologyDescriptionChangedEvent( topology.s.id, previousTopologyDescription, topology.s.description diff --git a/test/unit/core/sdam_spec.test.js b/test/unit/core/sdam_spec.test.js index 2cfbe63fd8c..285ee3010d9 100644 --- a/test/unit/core/sdam_spec.test.js +++ b/test/unit/core/sdam_spec.test.js @@ -4,7 +4,7 @@ const path = require('path'); const Topology = require('../../../lib/core/sdam/topology').Topology; const Server = require('../../../lib/core/sdam/server').Server; const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; -const monitoring = require('../../../lib/core/sdam/monitoring'); +const sdamEvents = require('../../../lib/core/sdam/events'); const parse = require('../../../lib/core/uri_parser'); const sinon = require('sinon'); @@ -97,7 +97,7 @@ function convertOutcomeEvents(events) { let eventClass = eventType.replace(/_\w/g, c => c[1].toUpperCase()); eventClass = eventClass.charAt(0).toUpperCase() + eventClass.slice(1); args.unshift(null); - const eventConstructor = monitoring[eventClass]; + const eventConstructor = sdamEvents[eventClass]; const eventInstance = new (Function.prototype.bind.apply(eventConstructor, args))(); return eventInstance; }); diff --git a/test/unit/sdam/srv_polling.test.js b/test/unit/sdam/srv_polling.test.js index 5c06712177d..e81340b11ed 100644 --- a/test/unit/sdam/srv_polling.test.js +++ b/test/unit/sdam/srv_polling.test.js @@ -4,7 +4,7 @@ const Topology = require('../../../lib/core/sdam/topology').Topology; const TopologyDescription = require('../../../lib/core/sdam/topology_description') .TopologyDescription; const TopologyType = require('../../../lib/core/sdam/common').TopologyType; -const monitoring = require('../../../lib/core/sdam/monitoring'); +const sdamEvents = require('../../../lib/core/sdam/events'); const SrvPoller = require('../../../lib/core/sdam/srv_polling').SrvPoller; const SrvPollingEvent = require('../../../lib/core/sdam/srv_polling').SrvPollingEvent; @@ -283,7 +283,7 @@ describe('Mongos SRV Polling', function() { function emit(prev, current) { topology.emit( 'topologyDescriptionChanged', - new monitoring.TopologyDescriptionChangedEvent(topology.s.id, prev, current) + new sdamEvents.TopologyDescriptionChangedEvent(topology.s.id, prev, current) ); } From c15c359270632f1a8d037ebb64167a9d585eee2c Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 16 Dec 2019 09:03:24 -0500 Subject: [PATCH 055/130] chore: move `monitoring` => `monitor` --- lib/core/sdam/server.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 32cea9b2278..3453904511f 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -9,7 +9,7 @@ const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); -const monitorServer = require('./monitoring').monitorServer; +const monitorServer = require('./monitor').monitorServer; const MongoParseError = require('../error').MongoParseError; const MongoNetworkError = require('../error').MongoNetworkError; const collationNotSupported = require('../utils').collationNotSupported; From 2bfe2a1841417b34131d6d5ebd21a6d042f82f41 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 16 Dec 2019 10:47:40 -0500 Subject: [PATCH 056/130] feat: introduce a new `Monitor` type for server monitoring The `Monitor` replaces the legacy `monitorServer` function and completely manages the process of the server monitoring component of SDAM. NODE-2386 --- lib/core/sdam/monitor.js | 293 +++++++++++++++++++++--------- test/unit/sdam/monitoring.test.js | 184 ++++++++++++++++++- 2 files changed, 387 insertions(+), 90 deletions(-) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 0b19234ffee..10d45ba73f9 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -1,126 +1,247 @@ 'use strict'; -const ServerDescription = require('./server_description').ServerDescription; +const ServerType = require('./common').ServerType; const calculateDurationInMs = require('../utils').calculateDurationInMs; +const EventEmitter = require('events'); +const connect = require('../connection/connect'); +const Connection = require('../../cmap/connection').Connection; +const common = require('./common'); +const makeStateMachine = require('../utils').makeStateMachine; +const MongoError = require('../error').MongoError; const sdamEvents = require('./events'); const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent; const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent; const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent; -// pulled from `Server` implementation -const STATE_CLOSED = 'closed'; -const STATE_CLOSING = 'closing'; - -/** - * Performs a server check as described by the SDAM spec. - * - * NOTE: This method automatically reschedules itself, so that there is always an active - * monitoring process - * - * @param {Server} server The server to monitor - */ -function monitorServer(server, options) { - options = options || {}; - const heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; - - if (options.initial === true) { - server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); - return; +const kServer = Symbol('server'); +const kMonitorId = Symbol('monitorId'); +const kConnection = Symbol('connection'); +const kCancellationToken = Symbol('cancellationToken'); +const kLastCheckTime = Symbol('lastCheckTime'); + +const STATE_CLOSED = common.STATE_CLOSED; +const STATE_CLOSING = common.STATE_CLOSING; +const STATE_IDLE = 'idle'; +const STATE_MONITORING = 'monitoring'; +const stateTransition = makeStateMachine({ + [STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED], + [STATE_CLOSED]: [STATE_CLOSED, STATE_MONITORING], + [STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, STATE_CLOSING], + [STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, STATE_CLOSING] +}); + +const INVALID_REQUEST_CHECK_STATES = new Set([STATE_CLOSING, STATE_CLOSED, STATE_MONITORING]); + +class Monitor extends EventEmitter { + constructor(server, options) { + super(options); + + this[kServer] = server; + this[kConnection] = undefined; + this[kCancellationToken] = new EventEmitter(); + this[kCancellationToken].setMaxListeners(Infinity); + this.s = { + state: STATE_CLOSED + }; + + this.address = server.description.address; + this.options = Object.freeze({ + connectTimeoutMS: + typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 10000, + heartbeatFrequencyMS: + typeof options.heartbeatFrequencyMS === 'number' ? options.heartbeatFrequencyMS : 10000, + minHeartbeatFrequencyMS: + typeof options.minHeartbeatFrequencyMS === 'number' ? options.minHeartbeatFrequencyMS : 500 + }); + + // TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration + const addressParts = server.description.address.split(':'); + this.connectOptions = Object.freeze( + Object.assign( + { + host: addressParts[0], + port: parseInt(addressParts[1], 10), + bson: server.s.bson, + connectionType: Connection + }, + server.s.options, + + // force BSON serialization options + { + raw: false, + promoteLongs: true, + promoteValues: true, + promoteBuffers: true + } + ) + ); } - const rescheduleMonitoring = () => { - server.s.monitoring = false; - server.s.monitorId = setTimeout(() => { - server.s.monitorId = undefined; - server.monitor(); - }, heartbeatFrequencyMS); - }; + connect() { + if (this.s.state !== STATE_CLOSED) { + return; + } - // executes a single check of a server - const checkServer = callback => { - let start = process.hrtime(); + monitorServer(this); + } - // emit a signal indicating we have started the heartbeat - server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name)); + requestCheck() { + if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) { + return; + } - // NOTE: legacy monitoring event - process.nextTick(() => server.emit('monitoring', server)); + const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS; + const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS; + const remainingTime = heartbeatFrequencyMS - calculateDurationInMs(this[kLastCheckTime]); + if (remainingTime > minHeartbeatFrequencyMS && this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + rescheduleMonitoring(this, minHeartbeatFrequencyMS); + return; + } - server.command( - 'admin.$cmd', - { ismaster: true }, - { - monitoring: true, - socketTimeout: server.s.options.connectionTimeout || 2000 - }, - (err, result) => { - let duration = calculateDurationInMs(start); + if (this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + } - if (err) { - server.emit( - 'serverHeartbeatFailed', - new ServerHeartbeatFailedEvent(duration, err, server.name) - ); + monitorServer(this); + } - return callback(err, null); - } + close() { + if (this.s.state === STATE_CLOSED || this.s.state === STATE_CLOSING) { + return; + } - // save round trip time - server.description.roundTripTime = duration; + stateTransition(this, STATE_CLOSING); + this[kCancellationToken].emit('cancel'); + if (this[kMonitorId]) { + clearTimeout(this[kMonitorId]); + } - const isMaster = result.result; - server.emit( - 'serverHeartbeatSucceeded', - new ServerHeartbeatSucceededEvent(duration, isMaster, server.name) - ); + if (this[kConnection]) { + this[kConnection].destroy({ force: true }); + } - return callback(null, isMaster); + this.emit('close'); + stateTransition(this, STATE_CLOSED); + } +} + +function checkServer(monitor, callback) { + if (monitor[kConnection] && monitor[kConnection].closed) { + monitor[kConnection] = undefined; + } + + monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address)); + + if (monitor[kConnection] != null) { + const connectTimeoutMS = monitor.options.connectTimeoutMS; + monitor[kConnection].command( + 'admin.$cmd', + { ismaster: true }, + { socketTimeout: connectTimeoutMS }, + (err, isMaster) => { + if (err) { + return callback(err); + } + + return callback(undefined, isMaster); } ); - }; - const successHandler = isMaster => { - // emit an event indicating that our description has changed - server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster)); - if (server.s.state === STATE_CLOSED || server.s.state === STATE_CLOSING) { + return; + } + + // connecting does an implicit `ismaster` + connect(monitor.connectOptions, monitor[kCancellationToken], (err, conn) => { + if (err) { + monitor[kConnection] = undefined; + callback(err); + return; + } + + if (monitor.s.state === STATE_CLOSING || monitor.s.state === STATE_CLOSED) { + conn.destroy({ force: true }); + callback(new MongoError('monitor was destroyed')); return; } - rescheduleMonitoring(); - }; + monitor[kConnection] = conn; + callback(undefined, conn.description); + }); +} + +function monitorServer(monitor) { + const start = process.hrtime(); + stateTransition(monitor, STATE_MONITORING); + + // TODO: the next line is a legacy event, remove in v4 + process.nextTick(() => monitor.emit('monitoring', monitor[kServer])); + + checkServer(monitor, (err, isMaster) => { + if (isMaster) { + successHandler(monitor, start, isMaster); + return; + } - // run the actual monitoring loop - server.s.monitoring = true; - checkServer((err, isMaster) => { - if (!err) { - successHandler(isMaster); + // otherwise an error occured on initial discovery, also bail + if (monitor[kServer].description.type === ServerType.Unknown) { + failureHandler(monitor, start, err); return; } // According to the SDAM specification's "Network error during server check" section, if // an ismaster call fails we reset the server's pool. If a server was once connected, // change its type to `Unknown` only after retrying once. - server.s.pool.reset(() => { - // otherwise re-attempt monitoring once - checkServer((error, isMaster) => { - if (error) { - // we revert to an `Unknown` by emitting a default description with no isMaster - server.emit( - 'descriptionReceived', - new ServerDescription(server.description.address, null, { error }) - ); - - rescheduleMonitoring(); - return; - } + monitor.emit('resetConnectionPool'); + + checkServer(monitor, (error, isMaster) => { + if (error) { + // NOTE: using the _first_ error encountered here + failureHandler(monitor, start, err); + return; + } - successHandler(isMaster); - }); + successHandler(monitor, start, isMaster); }); }); } +function rescheduleMonitoring(monitor, ms) { + const heartbeatFrequencyMS = monitor.options.heartbeatFrequencyMS; + if (monitor.s.state === STATE_CLOSING || monitor.s.state === STATE_CLOSED) { + return; + } + + monitor[kLastCheckTime] = process.hrtime(); + monitor[kMonitorId] = setTimeout(() => { + monitor[kMonitorId] = undefined; + monitor.requestCheck(); + }, ms || heartbeatFrequencyMS); + + stateTransition(monitor, STATE_IDLE); +} + +function successHandler(monitor, start, isMaster) { + process.nextTick(() => + monitor.emit( + 'serverHeartbeatSucceeded', + new ServerHeartbeatSucceededEvent(calculateDurationInMs(start), isMaster, monitor.address) + ) + ); + + rescheduleMonitoring(monitor); +} + +function failureHandler(monitor, start, err) { + monitor.emit( + 'serverHeartbeatFailed', + new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) + ); + + rescheduleMonitoring(monitor); +} + module.exports = { - monitorServer + Monitor }; diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index c80e1638057..b134e69cbaa 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -1,18 +1,34 @@ 'use strict'; const mock = require('mongodb-mock-server'); +const BSON = require('bson'); const Topology = require('../../../lib/core/sdam/topology').Topology; +const Monitor = require('../../../lib/core/sdam/monitor').Monitor; +const ServerType = require('../../../lib/core/sdam/common').ServerType; const expect = require('chai').expect; +class MockServer { + constructor(options) { + this.s = { + bson: new BSON() + }; + + this.description = { + type: ServerType.Unknown, + address: `${options.host}:${options.port}` + }; + } +} + describe('monitoring', function() { - let server; + let mockServer; after(() => mock.cleanup()); beforeEach(function() { - return mock.createServer().then(_server => (server = _server)); + return mock.createServer().then(server => (mockServer = server)); }); it('should record roundTripTime', function(done) { - server.setMessageHandler(request => { + mockServer.setMessageHandler(request => { const doc = request.document; if (doc.ismaster) { request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); @@ -22,7 +38,7 @@ describe('monitoring', function() { }); // set `heartbeatFrequencyMS` to 250ms to force a quick monitoring check, and wait 500ms to validate below - const topology = new Topology(server.uri(), { heartbeatFrequencyMS: 250 }); + const topology = new Topology(mockServer.uri(), { heartbeatFrequencyMS: 250 }); topology.connect(err => { expect(err).to.not.exist; @@ -41,4 +57,164 @@ describe('monitoring', function() { }, 500); }); }); + + describe('Monitor', function() { + it('should connect and issue an initial server check', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + this.defer(() => monitor.close()); + + monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); + monitor.on('serverHeartbeatSucceeded', () => done()); + monitor.connect(); + }); + + it('should ignore attempts to connect when not already closed', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + this.defer(() => monitor.close()); + + monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); + monitor.on('serverHeartbeatSucceeded', () => done()); + monitor.connect(); + monitor.connect(); + }); + + it('should not initiate another check if one is in progress', function(done) { + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + setTimeout(() => request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)), 250); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, {}); + + const startedEvents = []; + monitor.on('serverHeartbeatStarted', event => startedEvents.push(event)); + monitor.on('close', () => { + expect(startedEvents).to.have.length(2); + done(); + }); + + monitor.requestCheck(); + monitor.once('serverHeartbeatSucceeded', () => { + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + monitor.requestCheck(); + + const minHeartbeatFrequencyMS = 500; + setTimeout(() => { + // wait for minHeartbeatFrequencyMS, then request a check and verify another check occurred + monitor.once('serverHeartbeatSucceeded', () => { + monitor.close(); + }); + + monitor.requestCheck(); + }, minHeartbeatFrequencyMS); + }); + }); + + it('should not close the monitor on a failed heartbeat', function(done) { + let isMasterCount = 0; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + isMasterCount++; + if (isMasterCount === 2) { + request.reply({ ok: 0, errmsg: 'forced from mock server' }); + return; + } + + if (isMasterCount === 3) { + request.connection.destroy(); + return; + } + + request.reply(mock.DEFAULT_ISMASTER_36); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + + const events = []; + monitor.on('serverHeartbeatFailed', event => events.push(event)); + + let successCount = 0; + monitor.on('serverHeartbeatSucceeded', () => { + if (successCount++ === 2) { + monitor.close(); + } + }); + + monitor.on('close', () => { + expect(events).to.have.length(2); + done(); + }); + + monitor.connect(); + }); + + it('should signal to reset the connection pool after first failed heartbeat', function(done) { + let isMasterCount = 0; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + isMasterCount++; + request.reply( + isMasterCount === 2 + ? { ok: 0, errmsg: 'forced from mock server' } + : mock.DEFAULT_ISMASTER_36 + ); + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + this.defer(() => monitor.close()); + + monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); + + let resetRequested = false; + monitor.on('resetConnectionPool', () => (resetRequested = true)); + monitor.on('serverHeartbeatSucceeded', () => { + if (server.description.type === ServerType.Unknown) { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + return; + } + + // otherwise, this is the second heartbeat success and we should verify + // a reset was requested + expect(resetRequested).to.be.true; + done(); + }); + + monitor.connect(); + }); + }); }); From 0076b38e17626befe47a6fa41a955d8126f80a87 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 17 Dec 2019 08:29:47 -0500 Subject: [PATCH 057/130] refactor: integrate new monitor into unified topology --- lib/core/connection/pool.js | 16 --------- lib/core/sdam/server.js | 60 ++++++++++++++++++++++++------- lib/core/sdam/server_selection.js | 4 +-- lib/core/sdam/topology.js | 42 ++-------------------- lib/operations/connect.js | 4 ++- 5 files changed, 53 insertions(+), 73 deletions(-) diff --git a/lib/core/connection/pool.js b/lib/core/connection/pool.js index 3fcec67ed7a..56d427e99c0 100644 --- a/lib/core/connection/pool.js +++ b/lib/core/connection/pool.js @@ -255,22 +255,6 @@ function connectionFailureHandler(pool, event, err, conn) { // Remove the connection removeConnection(pool, conn); - if ( - pool.state !== DRAINING && - pool.state !== DESTROYED && - pool.options.legacyCompatMode === false - ) { - // since an error/close/timeout means pool invalidation in a - // pre-CMAP world, we will issue a custom `drain` event here to - // signal that the server should be recycled - stateTransition(pool, DRAINING); - pool.emit('drain', err); - - // wait to flush work items so this server isn't selected again immediately - process.nextTick(() => conn.flush(err)); - return; - } - // flush remaining work items conn.flush(err); } diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 3453904511f..d37c008026f 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -9,7 +9,7 @@ const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); -const monitorServer = require('./monitor').monitorServer; +const Monitor = require('./monitor').Monitor; const MongoParseError = require('../error').MongoParseError; const MongoNetworkError = require('../error').MongoNetworkError; const collationNotSupported = require('../utils').collationNotSupported; @@ -57,6 +57,8 @@ const stateTransition = makeStateMachine({ [STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED] }); +const kMonitor = Symbol('monitor'); + /** * * @fires Server#serverHeartbeatStarted @@ -101,10 +103,6 @@ class Server extends EventEmitter { ]), // client metadata for the initial handshake clientInfo: createClientInfo(options), - // state variable to determine if there is an active server check in progress - monitoring: false, - // the implementation of the monitoring method - monitorFunction: options.monitorFunction || monitorServer, // the connection pool pool: null, // the server state @@ -112,6 +110,41 @@ class Server extends EventEmitter { credentials: options.credentials, topology }; + + this[kMonitor] = new Monitor(this, this.s.options); + relayEvents(this[kMonitor], this, [ + 'serverHeartbeatStarted', + 'serverHeartbeatSucceeded', + 'serverHeartbeatFailed', + + // legacy events + 'monitoring' + ]); + + this[kMonitor].on('resetConnectionPool', () => { + this.s.pool.reset(); + }); + + this[kMonitor].on('serverHeartbeatFailed', event => { + // Revert to an `Unknown` state by emitting a default description with no isMaster, and the + // error from the heartbeat attempt + this.emit( + 'descriptionReceived', + new ServerDescription(this.description.address, null, { + roundTripTime: event.duration, + error: event.failure + }) + ); + }); + + this[kMonitor].on('serverHeartbeatSucceeded', event => { + this.emit( + 'descriptionReceived', + new ServerDescription(this.description.address, event.reply, { + roundTripTime: event.duration + }) + ); + }); } get description() { @@ -201,6 +234,9 @@ class Server extends EventEmitter { } }; + // close the monitor + this[kMonitor].close(); + if (!this.s.pool) { return done(); } @@ -220,11 +256,8 @@ class Server extends EventEmitter { * Immediately schedule monitoring of this server. If there already an attempt being made * this will be a no-op. */ - monitor(options) { - options = options || {}; - if (this.s.state !== STATE_CONNECTED || this.s.monitoring) return; - if (this.s.monitorId) clearTimeout(this.s.monitorId); - this.s.monitorFunction(this, options); + requestCheck() { + this[kMonitor].requestCheck(); } /** @@ -533,12 +566,13 @@ function connectEventHandler(server) { ); } + // start up the server monitor + // TODO: move this to `connect` when new connection pool is installed + server[kMonitor].connect(); + // we are connected and handshaked (guaranteed by the pool) stateTransition(server, STATE_CONNECTED); server.emit('connect', server); - - // emit an event indicating that our description has changed - server.emit('descriptionReceived', new ServerDescription(server.description.address, ismaster)); }; } diff --git a/lib/core/sdam/server_selection.js b/lib/core/sdam/server_selection.js index d658fc58769..7c66ab12b77 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/core/sdam/server_selection.js @@ -299,9 +299,7 @@ function selectServers(topology, selector, timeout, start, callback) { const retrySelection = () => { // ensure all server monitors attempt monitoring soon - topology.s.servers.forEach(server => - server.monitor({ heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS }) - ); + topology.s.servers.forEach(server => server.requestCheck()); const iterationTimer = setTimeout(() => { topology.removeListener('topologyDescriptionChanged', descriptionChangedHandler); diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 80b61974dc4..f50c9a3d4b9 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -827,7 +827,6 @@ function createAndConnectServer(topology, serverDescription, connectDelay) { const server = new Server(serverDescription, topology.s.options, topology); relayEvents(server, topology, SERVER_RELAY_EVENTS); - server.once('connect', serverConnectEventHandler(server, topology)); server.on('descriptionReceived', topology.serverUpdateHandler.bind(topology)); server.on('error', serverErrorEventHandler(server, topology)); @@ -845,25 +844,6 @@ function createAndConnectServer(topology, serverDescription, connectDelay) { return server; } -function resetServer(topology, serverDescription) { - if (!topology.s.servers.has(serverDescription.address)) { - return; - } - - // first remove the old server - const server = topology.s.servers.get(serverDescription.address); - destroyServer(server, topology); - - // add the new server, and attempt connection after a delay - const newServer = createAndConnectServer( - topology, - serverDescription, - topology.s.minHeartbeatFrequencyMS - ); - - topology.s.servers.set(serverDescription.address, newServer); -} - /** * Create `Server` instances for all initially known servers, connect them, and assign * them to the passed in `Topology`. @@ -880,15 +860,6 @@ function connectServers(topology, serverDescriptions) { } function updateServers(topology, incomingServerDescription) { - // if the server was reset internally because of an error, we need to replace the - // `Server` instance for it so we can attempt reconnect. - // - // TODO: this logical can change once CMAP is put in place - if (incomingServerDescription && incomingServerDescription.error) { - resetServer(topology, incomingServerDescription); - return; - } - // update the internal server's description if (incomingServerDescription && topology.s.servers.has(incomingServerDescription.address)) { const server = topology.s.servers.get(incomingServerDescription.address); @@ -918,15 +889,6 @@ function updateServers(topology, incomingServerDescription) { } } -function serverConnectEventHandler(server, topology) { - return function(/* isMaster, err */) { - server.monitor({ - initial: true, - heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS - }); - }; -} - function serverErrorEventHandler(server, topology) { return function(err) { if (topology.s.state === STATE_CLOSING || topology.s.state === STATE_CLOSED) { @@ -936,7 +898,7 @@ function serverErrorEventHandler(server, topology) { if (isSDAMUnrecoverableError(err, server)) { // NOTE: this must be commented out until we switch to the new CMAP pool because // we presently _always_ clear the pool on error. - resetServerState(server, err /*, { clearPool: true } */); + resetServerState(server, err, { clearPool: true }); return; } @@ -1015,7 +977,7 @@ function resetServerState(server, error, options) { new ServerDescription(server.description.address, null, { error }) ); - process.nextTick(() => server.monitor()); + process.nextTick(() => server.requestCheck()); } if (options.clearPool && server.s.pool) { diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 2f6f6f14415..15e0a6abdb1 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -148,7 +148,9 @@ const validOptionNames = [ 'tlsCAFile', 'tlsCertificateFile', 'tlsCertificateKeyFile', - 'tlsCertificateKeyFilePassword' + 'tlsCertificateKeyFilePassword', + 'minHeartbeatFrequencyMS', + 'heartbeatFrequencyMS' ]; const ignoreOptionNames = ['native_parser']; From 1e64ed8c026af299f0ab83df01c976e3188e47d3 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 17 Dec 2019 08:29:59 -0500 Subject: [PATCH 058/130] test: minor modifications to support expectations of new monitor Generally, the changes introduced here are things like adding a `minHeartbeatFrequencyMS` value to speed up tests (where we used to use `haInterval`), as well as cases where we are now sending the correct amount of monitoring requests (compression tests for example) --- test/functional/connection.test.js | 2 +- .../core/single_mocks/compression.test.js | 40 +++++++++++++++---- test/tools/runner/config.js | 3 +- test/unit/sdam/monitoring.test.js | 2 +- .../server_selection/select_servers.test.js | 9 +++-- test/unit/sdam/topology.test.js | 2 +- 6 files changed, 42 insertions(+), 16 deletions(-) diff --git a/test/functional/connection.test.js b/test/functional/connection.test.js index dfc16d9c473..6d1ea8c8b7e 100644 --- a/test/functional/connection.test.js +++ b/test/functional/connection.test.js @@ -19,7 +19,7 @@ describe('Connection', function() { var configuration = this.configuration; var client = configuration.newClient( { w: 1 }, - { poolSize: 1, host: '/tmp/mongodb-27017.sock' } + { poolSize: 1, host: '/tmp/mongodb-27017.sock', heartbeatFrequencyMS: 250 } ); client.connect(function(err, client) { diff --git a/test/functional/core/single_mocks/compression.test.js b/test/functional/core/single_mocks/compression.test.js index 513d008b6aa..8a94866a520 100644 --- a/test/functional/core/single_mocks/compression.test.js +++ b/test/functional/core/single_mocks/compression.test.js @@ -68,13 +68,18 @@ describe('Single Compression (mocks)', function() { server.setMessageHandler(request => { var doc = request.document; - if (currentStep === 0) { + if (doc.ismaster) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with no compression request.reply(serverResponse, { compression: { compressor: 'no_compression' } }); - } else if (currentStep === 1) { + currentStep = 1; + return; + } + + if (currentStep === 1) { expect(server.isCompressed).to.be.false; + // Acknowledge insertion using OP_COMPRESSED with no compression request.reply( { ok: 1, n: doc.documents.length, lastOp: new Date() }, @@ -161,12 +166,16 @@ describe('Single Compression (mocks)', function() { server.setMessageHandler(request => { var doc = request.document; - if (currentStep === 0) { + if (doc.ismaster) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 1) { + currentStep = 1; + return; + } + + if (currentStep === 1) { expect(server.isCompressed).to.be.true; // Acknowledge insertion using OP_COMPRESSED with snappy request.reply( @@ -254,12 +263,16 @@ describe('Single Compression (mocks)', function() { server.setMessageHandler(request => { var doc = request.document; - if (currentStep === 0) { + if (doc.ismaster) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with zlib request.reply(serverResponse, { compression: { compressor: 'zlib' } }); - } else if (currentStep === 1) { + currentStep = 1; + return; + } + + if (currentStep === 1) { expect(server.isCompressed).to.be.true; // Acknowledge insertion using OP_COMPRESSED with zlib request.reply( @@ -345,12 +358,23 @@ describe('Single Compression (mocks)', function() { server = yield mock.createServer(); server.setMessageHandler(request => { - if (currentStep === 0) { + const doc = request.document; + if (doc.ismaster) { + if (doc.compression == null) { + expect(server.isCompressed).to.be.false; + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + return; + } + expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 1) { + currentStep = 1; + return; + } + + if (currentStep === 1) { expect(server.isCompressed).to.be.true; // Acknowledge ping using OP_COMPRESSED with snappy request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index 808c50e098f..faca173b6fb 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -62,7 +62,7 @@ class NativeConfiguration { return new MongoClient( dbOptions, this.usingUnifiedTopology() - ? Object.assign({ useUnifiedTopology: true }, serverOptions) + ? Object.assign({ useUnifiedTopology: true, minHeartbeatFrequencyMS: 100 }, serverOptions) : serverOptions ); } @@ -71,6 +71,7 @@ class NativeConfiguration { serverOptions = Object.assign({}, { haInterval: 100 }, serverOptions); if (this.usingUnifiedTopology()) { serverOptions.useUnifiedTopology = true; + serverOptions.minHeartbeatFrequencyMS = 100; } // Fall back diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index b134e69cbaa..3cb20954a10 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -112,7 +112,7 @@ describe('monitoring', function() { done(); }); - monitor.requestCheck(); + monitor.connect(); monitor.once('serverHeartbeatSucceeded', () => { monitor.requestCheck(); monitor.requestCheck(); diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index 9561b389604..142336efa75 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -43,7 +43,7 @@ describe('selectServers', function() { it('should schedule monitoring if no suitable server is found', function(done) { const topology = new Topology('someserver:27019'); - const serverMonitor = this.sinon.stub(Server.prototype, 'monitor'); + const requestCheck = this.sinon.stub(Server.prototype, 'requestCheck'); this.sinon .stub(Topology.prototype, 'selectServer') @@ -62,10 +62,11 @@ describe('selectServers', function() { expect(err).to.match(/Server selection timed out/); expect(err).to.have.property('reason'); - // expect a call to monitor for initial server creation, and another for the server selection - expect(serverMonitor) + // When server is created `connect` is called on the monitor. When server selection + // occurs `requestCheck` will be called for an immediate check. + expect(requestCheck) .property('callCount') - .to.equal(2); + .to.equal(1); topology.close(done); }); diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index 445bdaed9df..fcb688397de 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -12,7 +12,7 @@ describe('Topology (unit)', function() { this.sinon = sinon.sandbox.create(); // these are mocks we want across all tests - this.sinon.stub(Server.prototype, 'monitor'); + this.sinon.stub(Server.prototype, 'requestCheck'); this.sinon .stub(Topology.prototype, 'selectServer') .callsFake(function(selector, options, callback) { From ff9f7501427617d33d106c7bf2cf5c19e650db05 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 16:10:24 -0500 Subject: [PATCH 059/130] refactor: monitor checks should use `CommandResult` form --- lib/core/connection/connect.js | 27 ++++++++++++++------------- lib/core/sdam/monitor.js | 4 ++-- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index 5ead433c676..6b197c2c01e 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -129,18 +129,20 @@ function performInitialHandshake(conn, options, _callback) { return; } - // resolve compression - if (ismaster.compression) { - const agreedCompressors = compressors.filter( - compressor => ismaster.compression.indexOf(compressor) !== -1 - ); - - if (agreedCompressors.length) { - conn.agreedCompressor = agreedCompressors[0]; - } - - if (options.compression && options.compression.zlibCompressionLevel) { - conn.zlibCompressionLevel = options.compression.zlibCompressionLevel; + if (!isModernConnectionType(conn)) { + // resolve compression + if (ismaster.compression) { + const agreedCompressors = compressors.filter( + compressor => ismaster.compression.indexOf(compressor) !== -1 + ); + + if (agreedCompressors.length) { + conn.agreedCompressor = agreedCompressors[0]; + } + + if (options.compression && options.compression.zlibCompressionLevel) { + conn.zlibCompressionLevel = options.compression.zlibCompressionLevel; + } } } @@ -323,7 +325,6 @@ function runCommand(conn, ns, command, options, callback) { return; } - if (typeof options === 'function') (callback = options), (options = {}); const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const bson = conn.options.bson; const query = new Query(bson, ns, command, { diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 10d45ba73f9..bbf2ec8229f 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -140,12 +140,12 @@ function checkServer(monitor, callback) { 'admin.$cmd', { ismaster: true }, { socketTimeout: connectTimeoutMS }, - (err, isMaster) => { + (err, result) => { if (err) { return callback(err); } - return callback(undefined, isMaster); + return callback(undefined, result.result); } ); From 804ea87ee4baa8c5a265ea8b31af74e8dfc9c3dc Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 16:54:50 -0500 Subject: [PATCH 060/130] refactor: support more connection options in connection pool --- lib/cmap/connection_pool.js | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 3f8405fda99..0834aed5fac 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -41,6 +41,23 @@ const VALID_POOL_OPTIONS = new Set([ 'connectionType', 'monitorCommands', 'socketTimeout', + 'credentials', + 'compression', + + // node Net options + 'localAddress', + 'localPort', + 'family', + 'hints', + 'lookup', + 'checkServerIdentity', + 'rejectUnauthorized', + 'ALPNProtocols', + 'servername', + 'checkServerIdentity', + 'session', + 'minDHSize', + 'secureContext', // spec options 'maxPoolSize', From 10f07b03eee2160501accd8e0c9602ae3e068f77 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 16:56:26 -0500 Subject: [PATCH 061/130] refactor: relay all monitoring events from connection to pool --- lib/cmap/connection_pool.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 0834aed5fac..d3d22191618 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -7,6 +7,7 @@ const MongoError = require('../core/error').MongoError; const Connection = require('./connection').Connection; const eachAsync = require('../core/utils').eachAsync; const connect = require('../core/connection/connect'); +const relayEvents = require('../core/utils').relayEvents; const errors = require('./errors'); const PoolClosedError = errors.PoolClosedError; @@ -372,6 +373,14 @@ function createConnection(pool, callback) { return; } + // forward all events from the connection to the pool + relayEvents(connection, pool, [ + 'commandStarted', + 'commandFailed', + 'commandSucceeded', + 'clusterTimeReceived' + ]); + pool.emit('connectionCreated', new ConnectionCreatedEvent(pool, connection)); connection.markAvailable(); From 4b2b81fcf8baecb897072ab2fdbf95a158334a73 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 16:58:09 -0500 Subject: [PATCH 062/130] refactor: don't use `null` for missing callback values --- lib/core/connection/connect.js | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index 6b197c2c01e..f9fa06c2397 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -114,18 +114,18 @@ function performInitialHandshake(conn, options, _callback) { const start = new Date().getTime(); runCommand(conn, 'admin.$cmd', handshakeDoc, options, (err, ismaster) => { if (err) { - callback(err, null); + callback(err); return; } if (ismaster.ok === 0) { - callback(new MongoError(ismaster), null); + callback(new MongoError(ismaster)); return; } const supportedServerErr = checkSupportedServer(ismaster, options); if (supportedServerErr) { - callback(supportedServerErr, null); + callback(supportedServerErr); return; } @@ -159,7 +159,7 @@ function performInitialHandshake(conn, options, _callback) { return; } - callback(null, conn); + callback(undefined, conn); }); } @@ -350,7 +350,7 @@ function runCommand(conn, ns, command, options, callback) { // ignore all future errors conn.on('error', noop); - _callback(err, null); + _callback(err); } function messageHandler(msg) { @@ -363,7 +363,7 @@ function runCommand(conn, ns, command, options, callback) { conn.removeListener('message', messageHandler); msg.parse({ promoteValues: true }); - _callback(null, msg.documents[0]); + _callback(undefined, msg.documents[0]); } conn.setSocketTimeout(socketTimeout); @@ -382,7 +382,7 @@ function authenticate(conn, credentials, callback) { const provider = AUTH_PROVIDERS[mechanism]; provider.auth(runCommand, [conn], credentials, err => { if (err) return callback(err); - callback(null, conn); + callback(undefined, conn); }); } From e4df5f4f102231dea2054fae3b9fe7bbbf256c20 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 18:13:14 -0500 Subject: [PATCH 063/130] fix: not all message payloads are arrays of Buffer --- lib/cmap/message_stream.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/cmap/message_stream.js b/lib/cmap/message_stream.js index 8bab9250f74..15a4c75a6a4 100644 --- a/lib/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -76,7 +76,8 @@ class MessageStream extends Duplex { // TODO: agreed compressor should live in `StreamDescription` const shouldCompress = operationDescription && !!operationDescription.agreedCompressor; if (!shouldCompress || !canCompress(command)) { - this.push(Buffer.concat(command.toBin())); + const data = command.toBin(); + this.push(Array.isArray(data) ? Buffer.concat(data) : data); return; } From 5600af9227815a1ecd20508b3501236b2a8e2aa7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 18:14:10 -0500 Subject: [PATCH 064/130] refactor: simplify resetServerState since reset is synchronous now --- lib/core/sdam/topology.js | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index f50c9a3d4b9..7da456f7b35 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -971,21 +971,16 @@ function executeWriteOperation(args, options, callback) { function resetServerState(server, error, options) { options = Object.assign({}, { clearPool: false }, options); - function resetState() { - server.emit( - 'descriptionReceived', - new ServerDescription(server.description.address, null, { error }) - ); - - process.nextTick(() => server.requestCheck()); - } - if (options.clearPool && server.s.pool) { - server.s.pool.reset(() => resetState()); - return; + server.s.pool.clear(); } - resetState(); + server.emit( + 'descriptionReceived', + new ServerDescription(server.description.address, null, { error }) + ); + + process.nextTick(() => server.requestCheck()); } function translateReadPreference(options) { From 29a4cd6fd852d1675ddd1539c85565855608e050 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 18 Dec 2019 18:14:30 -0500 Subject: [PATCH 065/130] refactor: use spec-compliant variable names for pool settings --- lib/topologies/native_topology.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/topologies/native_topology.js b/lib/topologies/native_topology.js index 51574878d74..ce53f7f3e96 100644 --- a/lib/topologies/native_topology.js +++ b/lib/topologies/native_topology.js @@ -15,7 +15,8 @@ class NativeTopology extends Topology { cursorFactory: Cursor, reconnect: false, emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, + maxPoolSize: typeof options.poolSize === 'number' ? options.poolSize : 5, + minPoolSize: typeof options.minSize === 'number' ? options.minSize : 0, monitorCommands: typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false } From cc02f838d2c6c71071e62f31def8479fe42fb751 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 21 Dec 2019 07:57:45 -0500 Subject: [PATCH 066/130] refactor: store full ismaster on connection until StreamDescription Storing a local `ServerDescription` is not adequate for the needs of wire protocol determination, in particular when a monitor connection is initially connected. Until then, we will store the full `ismaster` directly on the connection, like we did with the previous connection type. --- lib/cmap/connection.js | 12 +++++++++++- lib/core/sdam/monitor.js | 2 +- output | Bin 0 -> 1214 bytes 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 output diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 97ad4f28718..e54eff90641 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -6,6 +6,7 @@ const MongoError = require('../core/error').MongoError; const MongoNetworkError = require('../core/error').MongoNetworkError; const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; const CommandResult = require('../core/connection/command_result'); +const ServerDescription = require('../core/sdam/server_description').ServerDescription; const wp = require('../core/wireprotocol'); const apm = require('../core/connection/apm'); const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; @@ -18,6 +19,7 @@ const kGeneration = Symbol('generation'); const kLastUseTime = Symbol('lastUseTime'); const kClusterTime = Symbol('clusterTime'); const kDescription = Symbol('description'); +const kIsMaster = Symbol('ismaster'); class Connection extends EventEmitter { constructor(stream, options) { @@ -79,6 +81,10 @@ class Connection extends EventEmitter { return this[kDescription]; } + get ismaster() { + return this[kIsMaster]; + } + // the `connect` method stores the result of the handshake ismaster on the connection set ismaster(response) { if (response.compression) { @@ -93,7 +99,11 @@ class Connection extends EventEmitter { } } - this[kDescription] = response; + // TODO: This should be using a `StreamDescription` + this[kDescription] = new ServerDescription(this.address, response); + + // TODO: remove this, and only use the `StreamDescription` in the future + this[kIsMaster] = response; } get generation() { diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index bbf2ec8229f..35ce6b4e157 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -167,7 +167,7 @@ function checkServer(monitor, callback) { } monitor[kConnection] = conn; - callback(undefined, conn.description); + callback(undefined, conn.ismaster); }); } diff --git a/output b/output new file mode 100644 index 0000000000000000000000000000000000000000..e041d078a51d87c891c7e8c0def7ca5c2491fe6f GIT binary patch literal 1214 VcmZQz7zLvtFd71*Autp}000EO00961 literal 0 HcmV?d00001 From ace5c9d9fca9394b13a303ce259552fc027d66dd Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 21 Dec 2019 07:59:26 -0500 Subject: [PATCH 067/130] refactor: show the correct message length on `BinMsg` instances --- lib/cmap/message_stream.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cmap/message_stream.js b/lib/cmap/message_stream.js index 15a4c75a6a4..689cc81a47d 100644 --- a/lib/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -126,7 +126,7 @@ function canCompress(command) { function processMessage(stream, message, callback) { const messageHeader = { - messageLength: message.readInt32LE(0), + length: message.readInt32LE(0), requestId: message.readInt32LE(4), responseTo: message.readInt32LE(8), opCode: message.readInt32LE(12) From b24e33e70ca9925cb7c43f4743addf458ab72168 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 21 Dec 2019 08:03:46 -0500 Subject: [PATCH 068/130] refactor: remove `connections` accessor on unified topology class This accessor is private API, and predominantly used for poorly constructed tests which could track connections another way (by using a `ConnectionSpy` for instance). Since the new connection pool doesn't track all connections, it's no longer even possible to provide this functionality this way. --- lib/core/sdam/topology.js | 11 ------ test/functional/connection.test.js | 2 +- test/functional/mongo_client.test.js | 16 +++++---- test/functional/promises_collection.test.js | 1 - test/functional/promises_cursor.test.js | 40 --------------------- test/functional/promises_db.test.js | 8 ++--- test/functional/uri.test.js | 2 +- 7 files changed, 13 insertions(+), 67 deletions(-) delete mode 100644 test/functional/promises_cursor.test.js diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 7da456f7b35..dc80d2ad110 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -240,17 +240,6 @@ class Topology extends EventEmitter { return BSON.native ? 'c++' : 'js'; } - /** - * All raw connections - * @method - * @return {Connection[]} - */ - connections() { - return Array.from(this.s.servers.values()).reduce((result, server) => { - return result.concat(server.s.pool.allConnections()); - }, []); - } - /** * Initiate server connect * diff --git a/test/functional/connection.test.js b/test/functional/connection.test.js index 6d1ea8c8b7e..3b6084275c3 100644 --- a/test/functional/connection.test.js +++ b/test/functional/connection.test.js @@ -459,7 +459,7 @@ describe('Connection', function() { * @ignore */ it('should correctly reconnect and finish query operation', { - metadata: { requires: { topology: 'single' } }, + metadata: { requires: { topology: 'single', unifiedTopology: false } }, // The actual test we wish to run test: function(done) { diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index 42f52cb4541..e27fd9c06a1 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -525,7 +525,10 @@ describe('MongoClient', function() { */ it('correctly connect setting keepAlive to 100', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false + } }, // The actual test we wish to run @@ -566,7 +569,10 @@ describe('MongoClient', function() { */ it('default keepAlive behavior', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false + } }, // The actual test we wish to run @@ -806,11 +812,7 @@ describe('MongoClient', function() { }); it('Should use compression from URI', { - metadata: { - requires: { - topology: ['single'] - } - }, + metadata: { requires: { topology: ['single'], unifiedTopology: false } }, // The actual test we wish to run test: function(done) { diff --git a/test/functional/promises_collection.test.js b/test/functional/promises_collection.test.js index 94f5b9de827..eb92f7f8ec8 100644 --- a/test/functional/promises_collection.test.js +++ b/test/functional/promises_collection.test.js @@ -26,7 +26,6 @@ describe('Promises (Collection)', function() { const client = configuration.newClient(url); client.connect().then(function(client) { - test.equal(1, client.topology.connections().length); var db = client.db(configuration.db); db.collection('insertOne') diff --git a/test/functional/promises_cursor.test.js b/test/functional/promises_cursor.test.js deleted file mode 100644 index b8c613055f3..00000000000 --- a/test/functional/promises_cursor.test.js +++ /dev/null @@ -1,40 +0,0 @@ -'use strict'; -var test = require('./shared').assert; -var setupDatabase = require('./shared').setupDatabase; -var f = require('util').format; - -describe('Promises (Cursor)', function() { - before(function() { - return setupDatabase(this.configuration); - }); - - it('Should correctly execute Collection.prototype.insertOne as promise', { - metadata: { - requires: { - topology: ['single'] - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var url = configuration.url(); - url = - url.indexOf('?') !== -1 - ? f('%s&%s', url, 'maxPoolSize=100') - : f('%s?%s', url, 'maxPoolSize=100'); - - const client = configuration.newClient(url); - client.connect().then(function(client) { - var db = client.db(configuration.db); - test.equal(1, client.topology.connections().length); - - db.collection('insertOne') - .insertOne({ a: 1 }) - .then(function() { - client.close(done); - }); - }); - } - }); -}); diff --git a/test/functional/promises_db.test.js b/test/functional/promises_db.test.js index a5f45c9b687..e06a9e706f4 100644 --- a/test/functional/promises_db.test.js +++ b/test/functional/promises_db.test.js @@ -16,7 +16,7 @@ describe('Promises (Db)', function() { }, // The actual test we wish to run - test: function(done) { + test: function() { var configuration = this.configuration; var url = configuration.url(); url = @@ -25,11 +25,7 @@ describe('Promises (Db)', function() { : f('%s?%s', url, 'maxPoolSize=100'); const client = configuration.newClient(url); - client.connect().then(function(client) { - test.equal(1, client.topology.connections().length); - - client.close(done); - }); + return client.connect().then(() => client.close()); } }); diff --git a/test/functional/uri.test.js b/test/functional/uri.test.js index c99c2014bc6..a98b77e0375 100644 --- a/test/functional/uri.test.js +++ b/test/functional/uri.test.js @@ -14,7 +14,7 @@ describe('URI', function() { { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: 'single' } }, + metadata: { requires: { topology: 'single', unifiedTopology: false } }, // The actual test we wish to run test: function(done) { From b4ec5b8f1d728d73d0e5449360a3352f5887b707 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 21 Dec 2019 08:28:34 -0500 Subject: [PATCH 069/130] fix(gridfs): make a copy of chunk before writing to server A shared buffer is used for gridfs uploads, but there is a race where the buffer could be changed before the write has a chance to execute. Instead, we cut a copy of the buffer for use during the write operation. --- lib/gridfs-stream/upload.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/gridfs-stream/upload.js b/lib/gridfs-stream/upload.js index 40e1eab3ffe..578949a53d7 100644 --- a/lib/gridfs-stream/upload.js +++ b/lib/gridfs-stream/upload.js @@ -422,7 +422,7 @@ function doWrite(_this, chunk, encoding, callback) { if (_this.md5) { _this.md5.update(_this.bufToStore); } - var doc = createChunkDoc(_this.id, _this.n, _this.bufToStore); + var doc = createChunkDoc(_this.id, _this.n, Buffer.from(_this.bufToStore)); ++_this.state.outstandingRequests; ++outstandingRequests; From 0fe028a967663106f836a01cee89ce6563af339d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 08:16:42 -0500 Subject: [PATCH 070/130] test: account for new connection pool in unit test setup The legacy pool had no background task for keeping the minimum pool size up. Now, we need to ensure that the tests set the correct state to ensure that the pool is cleanly shutdown at test completion. --- test/functional/core/server.test.js | 2 +- test/functional/db.test.js | 22 +++++++----- test/functional/mongo_client.test.js | 23 ------------ test/unit/core/sdam_spec.test.js | 4 ++- .../server_selection/select_servers.test.js | 2 ++ test/unit/sdam/server_selection/spec.test.js | 8 +++-- test/unit/sdam/topology.test.js | 35 ++++++++++++------- 7 files changed, 48 insertions(+), 48 deletions(-) diff --git a/test/functional/core/server.test.js b/test/functional/core/server.test.js index c758ad4279f..75328cbb68c 100644 --- a/test/functional/core/server.test.js +++ b/test/functional/core/server.test.js @@ -1007,7 +1007,7 @@ describe('Server tests', function() { err = e; } - client.destroy(err2 => done(err || err2)); + client.close(err2 => done(err || err2)); }); client.on('connect', () => { diff --git a/test/functional/db.test.js b/test/functional/db.test.js index 4f003d3ee4a..e222e71141d 100644 --- a/test/functional/db.test.js +++ b/test/functional/db.test.js @@ -674,18 +674,24 @@ describe('Db', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); + client.connect(function(err, client) { - test.equal(null, err); - var items = []; + expect(err).to.not.exist; + + // run one command to ensure connections exist, otherwise `close` is near immediate + client.db('admin').command({ ping: 1 }, err => { + expect(err).to.not.exist; + + var items = []; + items.push(1); + client.close(function() { + expect(items).to.have.length(2); + done(); + }); - items.push(1); - client.close(function() { - test.equal(2, items.length); - done(); + items.push(2); }); - items.push(2); }); } }); diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index e27fd9c06a1..463ac3380ac 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -497,29 +497,6 @@ describe('MongoClient', function() { } }); - /** - * @ignore - */ - it('correctly error out when no socket available on MongoClient `connect` with domain', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - const client = configuration.newClient('mongodb://test.does.not.exist.com:80/test', { - serverSelectionTimeoutMS: 10 - }); - - client.connect(function(err) { - test.ok(err != null); - - done(); - }); - } - }); - /** * @ignore */ diff --git a/test/unit/core/sdam_spec.test.js b/test/unit/core/sdam_spec.test.js index 285ee3010d9..b7a0efbc6f3 100644 --- a/test/unit/core/sdam_spec.test.js +++ b/test/unit/core/sdam_spec.test.js @@ -37,7 +37,9 @@ function collectTests() { describe('Server Discovery and Monitoring (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index 142336efa75..e20d34f6785 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -53,6 +53,7 @@ describe('selectServers', function() { }); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); @@ -76,6 +77,7 @@ describe('selectServers', function() { it('should disallow selection when the topology is explicitly closed', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); diff --git a/test/unit/sdam/server_selection/spec.test.js b/test/unit/sdam/server_selection/spec.test.js index 5d5fb621eb1..dddae2501d4 100644 --- a/test/unit/sdam/server_selection/spec.test.js +++ b/test/unit/sdam/server_selection/spec.test.js @@ -55,7 +55,9 @@ function collectSelectionTests(specDir) { describe('Server Selection (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { @@ -119,7 +121,9 @@ function collectStalenessTests(specDir) { describe('Max Staleness (spec)', function() { let serverConnect; before(() => { - serverConnect = sinon.stub(Server.prototype, 'connect'); + serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; + }); }); after(() => { diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index fcb688397de..e9755511626 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -16,8 +16,10 @@ describe('Topology (unit)', function() { this.sinon .stub(Topology.prototype, 'selectServer') .callsFake(function(selector, options, callback) { - const server = Array.from(this.s.servers.values())[0]; - callback(null, server); + setTimeout(() => { + const server = Array.from(this.s.servers.values())[0]; + callback(null, server); + }, 50); }); }); @@ -28,6 +30,7 @@ describe('Topology (unit)', function() { it('should check for sessions if connected to a single server and has no known servers', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { + this.s.state = 'connected'; this.emit('connect'); }); @@ -40,12 +43,15 @@ describe('Topology (unit)', function() { it('should not check for sessions if connected to a single server', function(done) { const topology = new Topology('someserver:27019'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { - this.emit( - 'descriptionReceived', - new ServerDescription('someserver:27019', { ok: 1, maxWireVersion: 5 }) - ); - + this.s.state = 'connected'; this.emit('connect'); + + setTimeout(() => { + this.emit( + 'descriptionReceived', + new ServerDescription('someserver:27019', { ok: 1, maxWireVersion: 5 }) + ); + }, 20); }); topology.connect(() => { @@ -57,12 +63,15 @@ describe('Topology (unit)', function() { it('should check for sessions if there are no data-bearing nodes', function(done) { const topology = new Topology('mongos:27019,mongos:27018,mongos:27017'); this.sinon.stub(Server.prototype, 'connect').callsFake(function() { - this.emit( - 'descriptionReceived', - new ServerDescription(this.name, { ok: 1, msg: 'isdbgrid', maxWireVersion: 5 }) - ); - + this.s.state = 'connected'; this.emit('connect'); + + setTimeout(() => { + this.emit( + 'descriptionReceived', + new ServerDescription(this.name, { ok: 1, msg: 'isdbgrid', maxWireVersion: 5 }) + ); + }, 20); }); topology.connect(() => { @@ -94,7 +103,7 @@ describe('Topology (unit)', function() { topology.connect(err => { expect(err).to.not.exist; - topology.command('admin.$cmd', { ping: 1 }, { socketTimeout: 1500 }, (err, result) => { + topology.command('admin.$cmd', { ping: 1 }, { socketTimeout: 250 }, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.match(/timed out/); From e6c9b0900dce3947d169ce5a3c50768340e6e57a Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 09:03:20 -0500 Subject: [PATCH 071/130] chore: use correct test suffix for `run_each_test` --- test/tools/run_each_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tools/run_each_test.sh b/test/tools/run_each_test.sh index f7722b8f6c0..78921c8b331 100755 --- a/test/tools/run_each_test.sh +++ b/test/tools/run_each_test.sh @@ -6,4 +6,4 @@ if [ "$#" -ne 1 ]; then fi TEST_PATH=$1 -find $TEST_PATH -type f \( -iname "*_tests.js" ! -iname "*atlas*" ! -path "*node-next*" \) -exec npx mocha {} \; +find $TEST_PATH -type f \( -iname "*.test.js" ! -iname "*atlas*" ! -path "*node-next*" \) -exec npx mocha {} \; From bc1aa3755e1cb5f0e31469894c96aa3698d53b95 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 09:05:15 -0500 Subject: [PATCH 072/130] refactor: track where documents are returned in server response --- lib/cmap/connection.js | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index e54eff90641..fbc80dc78e4 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -282,6 +282,7 @@ function write(command, options, callback) { session: options.session, fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false, noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false, + documentsReturnedIn: options.documentsReturnedIn, // For BSON parsing promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, From 6a41cb6a31797a5e6cd5ccf575549848efe7fe40 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 09:05:44 -0500 Subject: [PATCH 073/130] refactor: ensure connections are destroyed if pool was closed If a pool was closed sometime before a cancellation token could be called, and `connect` successfully calls back, we need to ensure that those connections are destroyed as well. --- lib/cmap/connection_pool.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index d3d22191618..6bbda63b0a3 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -334,6 +334,10 @@ class ConnectionPool extends EventEmitter { } function ensureMinPoolSize(pool) { + if (pool.closed) { + return; + } + const minPoolSize = pool.options.minPoolSize; for (let i = pool.totalConnectionCount; i < minPoolSize; ++i) { createConnection(pool); @@ -373,6 +377,12 @@ function createConnection(pool, callback) { return; } + // The pool might have closed since we started trying to create a connection + if (pool.closed) { + connection.destroy({ force: true }); + return; + } + // forward all events from the connection to the pool relayEvents(connection, pool, [ 'commandStarted', From 0cac1254db4a521a81931c14213ae9b2e7fcc6c1 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 09:06:47 -0500 Subject: [PATCH 074/130] refactor: process message after the buffer has been consumed --- lib/cmap/message_stream.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/cmap/message_stream.js b/lib/cmap/message_stream.js index 689cc81a47d..4c648957243 100644 --- a/lib/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -61,8 +61,9 @@ class MessageStream extends Duplex { } const messageBuffer = buffer.slice(0, sizeOfMessage); - processMessage(this, messageBuffer, callback); buffer.consume(sizeOfMessage); + + processMessage(this, messageBuffer, callback); } } From f6bf82ce4c135df1e79559550e0e6f1ca5a07c4f Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 29 Dec 2019 11:07:12 -0500 Subject: [PATCH 075/130] feat: introduce a class for tracking stream specific attributes Similar to the `ServerDescription` we now have a `StreamDescription` which describes details specific to a single stream connected to a MongoDB topology --- lib/cmap/connection.js | 34 +++++++--------------- lib/cmap/stream_description.js | 45 +++++++++++++++++++++++++++++ lib/core/sdam/server_description.js | 3 +- 3 files changed, 57 insertions(+), 25 deletions(-) create mode 100644 lib/cmap/stream_description.js diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index fbc80dc78e4..f2e26666b05 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -6,7 +6,7 @@ const MongoError = require('../core/error').MongoError; const MongoNetworkError = require('../core/error').MongoNetworkError; const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; const CommandResult = require('../core/connection/command_result'); -const ServerDescription = require('../core/sdam/server_description').ServerDescription; +const StreamDescription = require('./stream_description').StreamDescription; const wp = require('../core/wireprotocol'); const apm = require('../core/connection/apm'); const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; @@ -34,6 +34,7 @@ class Connection extends EventEmitter { this.closed = false; this.destroyed = false; + this[kDescription] = new StreamDescription(this.address, options); this[kGeneration] = options.generation; this[kLastUseTime] = Date.now(); @@ -69,12 +70,6 @@ class Connection extends EventEmitter { // hook the message stream up to the passed in stream stream.pipe(this[kMessageStream]); this[kMessageStream].pipe(stream); - - if (options.compression) { - this[kDescription] = { compression: options.compression }; - } else { - this[kDescription] = undefined; - } } get description() { @@ -87,20 +82,7 @@ class Connection extends EventEmitter { // the `connect` method stores the result of the handshake ismaster on the connection set ismaster(response) { - if (response.compression) { - const compression = this[kDescription].compression; - const compressors = compression.compressors; - response.compression = { - compressor: compressors.filter(c => response.compression.indexOf(c) !== -1)[0] - }; - - if (compression.zlibCompressionLevel) { - response.compression.zlibCompressionLevel = compression.zlibCompressionLevel; - } - } - - // TODO: This should be using a `StreamDescription` - this[kDescription] = new ServerDescription(this.address, response); + this[kDescription].receiveResponse(response); // TODO: remove this, and only use the `StreamDescription` in the future this[kIsMaster] = response; @@ -284,15 +266,19 @@ function write(command, options, callback) { noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false, documentsReturnedIn: options.documentsReturnedIn, - // For BSON parsing + // for BSON parsing promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false, raw: typeof options.raw === 'boolean' ? options.raw : false }; - if (this[kDescription] && this[kDescription].compression) { - operationDescription.agreedCompressor = this[kDescription].compression.compressor; + if (this[kDescription] && this[kDescription].compressor) { + operationDescription.agreedCompressor = this[kDescription].compressor; + + if (this[kDescription].zlibCompressionLevel) { + operationDescription.zlibCompressionLevel = this[kDescription].zlibCompressionLevel; + } } if (typeof options.socketTimeout === 'number') { diff --git a/lib/cmap/stream_description.js b/lib/cmap/stream_description.js new file mode 100644 index 00000000000..923a885791a --- /dev/null +++ b/lib/cmap/stream_description.js @@ -0,0 +1,45 @@ +'use strict'; +const parseServerType = require('../core/sdam/server_description').parseServerType; + +const RESPONSE_FIELDS = [ + 'minWireVersion', + 'maxWireVersion', + 'maxBsonObjectSize', + 'maxMessageSizeBytes', + 'maxWriteBatchSize', + '__nodejs_mock_server__' +]; + +class StreamDescription { + constructor(address, options) { + this.address = address; + this.type = parseServerType(null); + this.minWireVersion = 1; + this.maxWireVersion = 2; + this.maxBsonObjectSize = 16777216; + this.maxMessageSizeBytes = 48000000; + this.maxWriteBatchSize = 100000; + this.compressors = + options && options.compression && Array.isArray(options.compression.compressors) + ? options.compression.compressors + : []; + } + + receiveResponse(response) { + this.type = parseServerType(response); + + RESPONSE_FIELDS.forEach(field => { + if (typeof response[field] !== 'undefined') { + this[field] = response[field]; + } + }); + + if (response.compression) { + this.compressor = this.compressors.filter(c => response.compression.indexOf(c) !== -1)[0]; + } + } +} + +module.exports = { + StreamDescription +}; diff --git a/lib/core/sdam/server_description.js b/lib/core/sdam/server_description.js index cffed504ece..b6c96e20a86 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/core/sdam/server_description.js @@ -176,5 +176,6 @@ function parseServerType(ismaster) { } module.exports = { - ServerDescription + ServerDescription, + parseServerType }; From 8fc0dd1f6797d7a9433fa1c2c2e245b1680b5b35 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:02:21 -0500 Subject: [PATCH 076/130] test: disable and rework legacy tests to work with unified topology --- .../core/single_mocks/compression.test.js | 478 +++++++++--------- test/functional/cursor.test.js | 3 +- test/functional/db.test.js | 3 +- test/functional/mongo_client.test.js | 3 +- 4 files changed, 242 insertions(+), 245 deletions(-) diff --git a/test/functional/core/single_mocks/compression.test.js b/test/functional/core/single_mocks/compression.test.js index 8a94866a520..48a29b796fe 100644 --- a/test/functional/core/single_mocks/compression.test.js +++ b/test/functional/core/single_mocks/compression.test.js @@ -1,10 +1,11 @@ 'use strict'; const expect = require('chai').expect; -const co = require('co'); const mock = require('mongodb-mock-server'); describe('Single Compression (mocks)', function() { + let server; afterEach(() => mock.cleanup()); + beforeEach(() => mock.createServer().then(s => (server = s))); it("server should recieve list of client's supported compressors in handshake", { metadata: { @@ -19,29 +20,23 @@ describe('Single Compression (mocks)', function() { var serverResponse = Object.assign({}, mock.DEFAULT_ISMASTER); const config = this.configuration; - // Boot the mock - co(function*() { - const server = yield mock.createServer(); - - server.setMessageHandler(request => { - expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); - request.reply(serverResponse); - }); - - const client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'], zlibCompressionLevel: -1 } - }); + server.setMessageHandler(request => { + expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); + request.reply(serverResponse); + }); - client.on('connect', function() { - client.destroy(); - done(); - }); + const client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'], zlibCompressionLevel: -1 } + }); - client.connect(); + client.on('connect', function() { + client.close(done); }); + + client.connect(); } }); @@ -63,80 +58,82 @@ describe('Single Compression (mocks)', function() { let serverResponse = Object.assign({}, mock.DEFAULT_ISMASTER); // Boot the mock - co(function*() { - const server = yield mock.createServer(); + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; - server.setMessageHandler(request => { - var doc = request.document; - if (doc.ismaster) { + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); + expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with no compression request.reply(serverResponse, { compression: { compressor: 'no_compression' } }); currentStep = 1; - return; + firstIsMasterSeen = true; + } else { + // this is an ismaster for initial connection setup in the pool + request.reply(serverResponse); } - if (currentStep === 1) { - expect(server.isCompressed).to.be.false; + return; + } - // Acknowledge insertion using OP_COMPRESSED with no compression - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'no_compression' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - expect(server.isCompressed).to.be.false; - // Acknowledge update using OP_COMPRESSED with no compression - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'no_compression' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.false; - request.reply({ ok: 1 }, { compression: { compressor: 'no_compression' } }); - } - currentStep++; - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.false; - // Attempt to connect - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + // Acknowledge insertion using OP_COMPRESSED with no compression + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'no_compression' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + expect(server.isCompressed).to.be.false; + // Acknowledge update using OP_COMPRESSED with no compression + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'no_compression' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.false; + request.reply({ ok: 1 }, { compression: { compressor: 'no_compression' } }); + } + currentStep++; + }); + + // Attempt to connect + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver will be uncompressed - // Inbound messages from the server should be OP_COMPRESSED with no compression - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + // Connect and try inserting, updating, and removing + // All outbound messages from the driver will be uncompressed + // Inbound messages from the server should be OP_COMPRESSED with no compression + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); + + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); + + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.close(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -160,79 +157,77 @@ describe('Single Compression (mocks)', function() { compression: ['snappy'] }); - // Boot the mock - co(function*() { - const server = yield mock.createServer(); - - server.setMessageHandler(request => { - var doc = request.document; - if (doc.ismaster) { + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); currentStep = 1; - return; + firstIsMasterSeen = true; + } else { + request.reply(serverResponse); } - if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge insertion using OP_COMPRESSED with snappy - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'snappy' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - expect(server.isCompressed).to.be.true; - // Acknowledge update using OP_COMPRESSED with snappy - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'snappy' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); - } - currentStep++; - }); + return; + } - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; + // Acknowledge insertion using OP_COMPRESSED with snappy + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'snappy' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + expect(server.isCompressed).to.be.true; + // Acknowledge update using OP_COMPRESSED with snappy + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'snappy' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } + currentStep++; + }); + + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); + + // Connect and try inserting, updating, and removing + // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using snappy + // Inbound messages from the server should be OP_COMPRESSED with snappy + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using snappy - // Inbound messages from the server should be OP_COMPRESSED with snappy - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); + + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.close(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -249,7 +244,6 @@ describe('Single Compression (mocks)', function() { test: function(done) { const config = this.configuration; - var server = null; var currentStep = 0; // Prepare the server's response @@ -257,80 +251,80 @@ describe('Single Compression (mocks)', function() { compression: ['zlib'] }); - // Boot the mock - co(function*() { - server = yield mock.createServer(); - - server.setMessageHandler(request => { - var doc = request.document; - if (doc.ismaster) { + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + var doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { expect(request.response.documents[0].compression).to.have.members(['snappy', 'zlib']); expect(server.isCompressed).to.be.false; // Acknowledge connection using OP_COMPRESSED with zlib request.reply(serverResponse, { compression: { compressor: 'zlib' } }); currentStep = 1; + firstIsMasterSeen = true; + return; + } else { + request.reply(serverResponse); return; } + } - if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge insertion using OP_COMPRESSED with zlib - request.reply( - { ok: 1, n: doc.documents.length, lastOp: new Date() }, - { compression: { compressor: 'zlib' } } - ); - } else if (currentStep === 2 || currentStep === 3) { - // Acknowledge update using OP_COMPRESSED with zlib - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'zlib' } }); - } else if (currentStep === 4) { - expect(server.isCompressed).to.be.true; - request.reply({ ok: 1 }, { compression: { compressor: 'zlib' } }); - } - currentStep++; - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; - // Attempt to connect - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + // Acknowledge insertion using OP_COMPRESSED with zlib + request.reply( + { ok: 1, n: doc.documents.length, lastOp: new Date() }, + { compression: { compressor: 'zlib' } } + ); + } else if (currentStep === 2 || currentStep === 3) { + // Acknowledge update using OP_COMPRESSED with zlib + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1, n: 1 }, { compression: { compressor: 'zlib' } }); + } else if (currentStep === 4) { + expect(server.isCompressed).to.be.true; + request.reply({ ok: 1 }, { compression: { compressor: 'zlib' } }); + } + currentStep++; + }); + + // Attempt to connect + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); + + // Connect and try inserting, updating, and removing + // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using zlib + // Inbound messages from the server should be OP_COMPRESSED with zlib + client.on('connect', function(_server) { + _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { + expect(err).to.be.null; + expect(r.result.n).to.equal(1); + + _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.n).to.equal(1); + + _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.n).to.equal(1); - // Connect and try inserting, updating, and removing - // All outbound messages from the driver (after initial connection) will be OP_COMPRESSED using zlib - // Inbound messages from the server should be OP_COMPRESSED with zlib - client.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1, created: new Date() }], function(err, r) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - - _server.update('test.test', { q: { a: 1 }, u: { $set: { b: 1 } } }, function( - _err, - _r - ) { - expect(_err).to.be.null; - expect(_r.result.n).to.equal(1); - - _server.remove('test.test', { q: { a: 1 } }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.n).to.equal(1); - - _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); - - client.destroy(); - done(); - }); + _server.command('system.$cmd', { ping: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); + + client.close(); + done(); }); }); }); }); - - client.connect(); }); + + client.connect(); } } ); @@ -345,7 +339,6 @@ describe('Single Compression (mocks)', function() { test: function(done) { const config = this.configuration; - var server = null; var currentStep = 0; // Prepare the server's response @@ -353,13 +346,11 @@ describe('Single Compression (mocks)', function() { compression: ['snappy'] }); - // Boot the mock - co(function*() { - server = yield mock.createServer(); - - server.setMessageHandler(request => { - const doc = request.document; - if (doc.ismaster) { + let firstIsMasterSeen = false; + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!firstIsMasterSeen) { if (doc.compression == null) { expect(server.isCompressed).to.be.false; request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); @@ -371,56 +362,59 @@ describe('Single Compression (mocks)', function() { // Acknowledge connection using OP_COMPRESSED with snappy request.reply(serverResponse, { compression: { compressor: 'snappy' } }); currentStep = 1; + firstIsMasterSeen = true; + return; + } else { + request.reply(serverResponse); return; } + } - if (currentStep === 1) { - expect(server.isCompressed).to.be.true; - // Acknowledge ping using OP_COMPRESSED with snappy - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); - } else if (currentStep >= 2) { - expect(server.isCompressed).to.be.false; - // Acknowledge further uncompressible commands using OP_COMPRESSED with snappy - request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); - } - currentStep++; - }); + if (currentStep === 1) { + expect(server.isCompressed).to.be.true; + // Acknowledge ping using OP_COMPRESSED with snappy + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } else if (currentStep >= 2) { + expect(server.isCompressed).to.be.false; + // Acknowledge further uncompressible commands using OP_COMPRESSED with snappy + request.reply({ ok: 1 }, { compression: { compressor: 'snappy' } }); + } + currentStep++; + }); - var client = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1, - compression: { compressors: ['snappy', 'zlib'] } - }); + var client = config.newTopology(server.address().host, server.address().port, { + connectionTimeout: 5000, + socketTimeout: 1000, + size: 1, + compression: { compressors: ['snappy', 'zlib'] } + }); - // Connect and try some commands, checking that uncompressible commands are indeed not compressed - client.on('connect', function(_server) { - _server.command('system.$cmd', { ping: 1 }, function(err, r) { - expect(err).to.be.null; - expect(r.result.ok).to.equal(1); + // Connect and try some commands, checking that uncompressible commands are indeed not compressed + client.on('connect', function(_server) { + _server.command('system.$cmd', { ping: 1 }, function(err, r) { + expect(err).to.be.null; + expect(r.result.ok).to.equal(1); - _server.command('system.$cmd', { ismaster: 1 }, function(_err, _r) { - expect(_err).to.be.null; - expect(_r.result.ok).to.equal(1); + _server.command('system.$cmd', { ismaster: 1 }, function(_err, _r) { + expect(_err).to.be.null; + expect(_r.result.ok).to.equal(1); - _server.command('system.$cmd', { getnonce: 1 }, function(__err, __r) { - expect(__err).to.be.null; - expect(__r.result.ok).to.equal(1); + _server.command('system.$cmd', { getnonce: 1 }, function(__err, __r) { + expect(__err).to.be.null; + expect(__r.result.ok).to.equal(1); - _server.command('system.$cmd', { ismaster: 1 }, function(___err, ___r) { - expect(___err).to.be.null; - expect(___r.result.ok).to.equal(1); + _server.command('system.$cmd', { ismaster: 1 }, function(___err, ___r) { + expect(___err).to.be.null; + expect(___r.result.ok).to.equal(1); - client.destroy(); - done(); - }); + client.close(done); }); }); }); }); - - client.connect(); }); + + client.connect(); } }); }); diff --git a/test/functional/cursor.test.js b/test/functional/cursor.test.js index 8e5c42cece8..5661376778b 100644 --- a/test/functional/cursor.test.js +++ b/test/functional/cursor.test.js @@ -2148,7 +2148,8 @@ describe('Cursor', function() { metadata: { requires: { mongodb: '<=3.5.0', // NOTE: remove this when SERVER-30576 is resolved - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] + topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], + unifiedTopology: false } }, diff --git a/test/functional/db.test.js b/test/functional/db.test.js index e222e71141d..b9503f35034 100644 --- a/test/functional/db.test.js +++ b/test/functional/db.test.js @@ -667,7 +667,8 @@ describe('Db', function() { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - mongodb: '>= 2.8.0' + mongodb: '>= 2.8.0', + unifiedTopology: false } }, diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index 463ac3380ac..2bc742f2428 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -345,7 +345,8 @@ describe('MongoClient', function() { it('Should correctly set MaxPoolSize on replicaset server', { metadata: { requires: { - topology: ['replicaset'] + topology: ['replicaset'], + unifiedTopology: false } }, From e38fa85599917881edbd3f442ac7b3bbd31dc214 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:03:36 -0500 Subject: [PATCH 077/130] refactor: use the connection's address for monitoring events --- lib/core/connection/apm.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/core/connection/apm.js b/lib/core/connection/apm.js index 9bec4cec4d5..3e01bb63e8a 100644 --- a/lib/core/connection/apm.js +++ b/lib/core/connection/apm.js @@ -23,7 +23,7 @@ const namespace = command => command.ns; const databaseName = command => command.ns.split('.')[0]; const collectionName = command => command.ns.split('.')[1]; const generateConnectionId = pool => - pool.options ? `${pool.options.host}:${pool.options.port}` : pool.id; + pool.options ? `${pool.options.host}:${pool.options.port}` : pool.address; const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); const LEGACY_FIND_QUERY_MAP = { From 509a118309b208511367082b4e3ffb02e162db10 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:04:05 -0500 Subject: [PATCH 078/130] refactor: server trampoline should always indicate pool is opened --- lib/cmap/connection.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index f2e26666b05..a8a106fa477 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -177,7 +177,7 @@ function makeServerTrampoline(connection) { clusterTime: connection[kClusterTime], s: { bson: connection.bson, - pool: { write: write.bind(connection) } + pool: { write: write.bind(connection), isConnected: () => true } } }; } @@ -223,7 +223,7 @@ function messageHandler(conn) { return; } - if (document.ok === 0 || document.$err || document.errmsg || document.code) { + if (document.ok === 0 || document.$err || document.errmsg) { callback(new MongoError(document)); return; } From 9dd3939dd9c501fc5e8a9954d45eb0d74e19f15f Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:06:01 -0500 Subject: [PATCH 079/130] feat: integrate CMAP connection pool into unified topology NODE-2402 --- lib/core/sdam/server.js | 300 ++++++++++++++-------------------------- 1 file changed, 106 insertions(+), 194 deletions(-) diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index d37c008026f..5ce601331ac 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -1,16 +1,14 @@ 'use strict'; const EventEmitter = require('events'); +const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool; const MongoError = require('../error').MongoError; -const Pool = require('../connection/pool'); const relayEvents = require('../utils').relayEvents; -const wireProtocol = require('../wireprotocol'); const BSON = require('../connection/utils').retrieveBSON(); const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); const Monitor = require('./monitor').Monitor; -const MongoParseError = require('../error').MongoParseError; const MongoNetworkError = require('../error').MongoNetworkError; const collationNotSupported = require('../utils').collationNotSupported; const debugOptions = require('../connection/utils').debugOptions; @@ -103,14 +101,27 @@ class Server extends EventEmitter { ]), // client metadata for the initial handshake clientInfo: createClientInfo(options), - // the connection pool - pool: null, // the server state state: STATE_CLOSED, credentials: options.credentials, topology }; + // create the connection pool + // NOTE: this used to happen in `connect`, we supported overriding pool options there + const addressParts = this.description.address.split(':'); + const poolOptions = Object.assign( + { host: addressParts[0], port: parseInt(addressParts[1], 10), bson: this.s.bson }, + options + ); + + this.s.pool = new ConnectionPool(poolOptions); + relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']); + this.s.pool.on('clusterTimeReceived', clusterTime => { + this.clusterTime = clusterTime; + }); + + // create the monitor this[kMonitor] = new Monitor(this, this.s.options); relayEvents(this[kMonitor], this, [ 'serverHeartbeatStarted', @@ -122,7 +133,7 @@ class Server extends EventEmitter { ]); this[kMonitor].on('resetConnectionPool', () => { - this.s.pool.reset(); + this.s.pool.clear(); }); this[kMonitor].on('serverHeartbeatFailed', event => { @@ -135,6 +146,11 @@ class Server extends EventEmitter { error: event.failure }) ); + + if (this.s.state === STATE_CONNECTING) { + this.emit('error', new MongoNetworkError(event.failure)); + this.destroy(); + } }); this[kMonitor].on('serverHeartbeatSucceeded', event => { @@ -144,6 +160,11 @@ class Server extends EventEmitter { roundTripTime: event.duration }) ); + + if (this.s.state === STATE_CONNECTING) { + stateTransition(this, STATE_CONNECTED); + this.emit('connect', this); + } }); } @@ -165,57 +186,21 @@ class Server extends EventEmitter { /** * Initiate server connect */ - connect(options) { - options = options || {}; - - // do not allow connect to be called on anything that's not disconnected - if (this.s.pool && !this.s.pool.isDisconnected() && !this.s.pool.isDestroyed()) { - throw new MongoError(`Server instance in invalid state ${this.s.pool.state}`); - } - - // create a pool - const addressParts = this.description.address.split(':'); - const poolOptions = Object.assign( - { host: addressParts[0], port: parseInt(addressParts[1], 10) }, - this.s.options, - options, - { bson: this.s.bson } - ); - - // NOTE: reconnect is explicitly false because of the server selection loop - poolOptions.reconnect = false; - poolOptions.legacyCompatMode = false; - - this.s.pool = new Pool(this, poolOptions); - - // setup listeners - this.s.pool.on('parseError', parseErrorEventHandler(this)); - - this.s.pool.on('drain', err => { - this.emit('error', err); - }); - - // it is unclear whether consumers should even know about these events - // this.s.pool.on('timeout', timeoutEventHandler(this)); - // this.s.pool.on('reconnect', reconnectEventHandler(this)); - // this.s.pool.on('reconnectFailed', errorEventHandler(this)); - - // relay all command monitoring events - relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']); - + connect() { stateTransition(this, STATE_CONNECTING); - - this.s.pool.connect(connectEventHandler(this)); + this[kMonitor].connect(); } /** * Destroy the server connection * + * @param {object} [options] Optional settings * @param {Boolean} [options.force=false] Force destroy the pool */ destroy(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, { force: false }, options); + if (this.s.state === STATE_CLOSED) { if (typeof callback === 'function') { callback(); @@ -226,30 +211,14 @@ class Server extends EventEmitter { stateTransition(this, STATE_CLOSING); - const done = err => { + this[kMonitor].close(); + this.s.pool.close(options, err => { stateTransition(this, STATE_CLOSED); this.emit('closed'); if (typeof callback === 'function') { - callback(err, null); + callback(err); } - }; - - // close the monitor - this[kMonitor].close(); - - if (!this.s.pool) { - return done(); - } - - ['close', 'error', 'timeout', 'parseError', 'connect'].forEach(event => { - this.s.pool.removeAllListeners(event); }); - - if (this.s.monitorId) { - clearTimeout(this.s.monitorId); - } - - this.s.pool.destroy(options.force, done); } /** @@ -265,12 +234,13 @@ class Server extends EventEmitter { * * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash + * @param {object} [options] Optional settings * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ command(ns, cmd, options, callback) { @@ -285,7 +255,7 @@ class Server extends EventEmitter { const error = basicReadValidations(this, options); if (error) { - return callback(error, null); + return callback(error); } // Clone the options @@ -308,19 +278,23 @@ class Server extends EventEmitter { return; } - wireProtocol.command(this, ns, cmd, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.command(ns, cmd, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -337,19 +311,23 @@ class Server extends EventEmitter { return; } - wireProtocol.query(this, ns, cmd, cursorState, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.query(ns, cmd, cursorState, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -366,19 +344,23 @@ class Server extends EventEmitter { return; } - wireProtocol.getMore(this, ns, cursorState, batchSize, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); + conn.getMore(ns, cursorState, batchSize, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } + + if (isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } } - } - callback(err, result); - }); + cb(err, result); + }); + }, callback); } /** @@ -397,15 +379,17 @@ class Server extends EventEmitter { return; } - wireProtocol.killCursors(this, ns, cursorState, (err, result) => { - if (err && isSDAMUnrecoverableError(err, this)) { - this.emit('error', err); - } + this.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (typeof callback === 'function') { - callback(err, result); - } - }); + conn.killCursors(ns, cursorState, (err, result) => { + if (err && isSDAMUnrecoverableError(err, this)) { + this.emit('error', err); + } + + cb(err, result); + }); + }, callback); } /** @@ -417,7 +401,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ insert(ns, ops, options, callback) { @@ -433,7 +417,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ update(ns, ops, options, callback) { @@ -449,7 +433,7 @@ class Server extends EventEmitter { * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation + * @param {ClientSession} [options.session] Session to use for the operation * @param {opResultCallback} callback A callback function */ remove(ns, ops, options, callback) { @@ -466,24 +450,7 @@ Object.defineProperty(Server.prototype, 'clusterTime', { } }); -function basicWriteValidations(server) { - if (!server.s.pool) { - return new MongoError('server instance is not connected'); - } - - if (server.s.pool.isDestroyed()) { - return new MongoError('server instance pool was destroyed'); - } - - return null; -} - function basicReadValidations(server, options) { - const error = basicWriteValidations(server, options); - if (error) { - return error; - } - if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { return new MongoError('readPreference must be an instance of ReadPreference'); } @@ -504,83 +471,28 @@ function executeWriteOperation(args, options, callback) { return; } - const error = basicWriteValidations(server, options); - if (error) { - callback(error, null); - return; - } - if (collationNotSupported(server, options)) { callback(new MongoError(`server ${server.name} does not support collation`)); return; } - return wireProtocol[op](server, ns, ops, options, (err, result) => { - if (err) { - if (options.session && err instanceof MongoNetworkError) { - options.session.serverSession.isDirty = true; - } + server.s.pool.withConnection((err, conn, cb) => { + if (err) return cb(err); - if (isSDAMUnrecoverableError(err, server)) { - server.emit('error', err); - } - } - - callback(err, result); - }); -} - -function connectEventHandler(server) { - return function(err, conn) { - if (server.s.state === STATE_CLOSING || server.s.state === STATE_CLOSED) { - return; - } - - if (err) { - server.emit('error', new MongoNetworkError(err)); - - stateTransition(server, STATE_CLOSED); - server.emit('close'); - return; - } - - const ismaster = conn.ismaster; - server.s.lastIsMasterMS = conn.lastIsMasterMS; - if (conn.agreedCompressor) { - server.s.pool.options.agreedCompressor = conn.agreedCompressor; - } - - if (conn.zlibCompressionLevel) { - server.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; - } - - if (conn.ismaster.$clusterTime) { - const $clusterTime = conn.ismaster.$clusterTime; - server.s.sclusterTime = $clusterTime; - } - - // log the connection event if requested - if (server.s.logger.isInfo()) { - server.s.logger.info( - `server ${server.name} connected with ismaster [${JSON.stringify(ismaster)}]` - ); - } - - // start up the server monitor - // TODO: move this to `connect` when new connection pool is installed - server[kMonitor].connect(); + conn[op](ns, ops, options, (err, result) => { + if (err) { + if (options.session && err instanceof MongoNetworkError) { + options.session.serverSession.isDirty = true; + } - // we are connected and handshaked (guaranteed by the pool) - stateTransition(server, STATE_CONNECTED); - server.emit('connect', server); - }; -} + if (isSDAMUnrecoverableError(err, server)) { + server.emit('error', err); + } + } -function parseErrorEventHandler(server) { - return function(err) { - stateTransition(this, STATE_CLOSED); - server.emit('error', new MongoParseError(err)); - }; + cb(err, result); + }); + }, callback); } module.exports = { From 33a56cf665b80cf45fe5f19856e1a353f9eb73f6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 08:24:16 -0500 Subject: [PATCH 080/130] test: must continue to use common `destroy` method Until the legacy topologies are removed, we must continue to use the legacy `destroy` method on them. --- test/functional/core/server.test.js | 2 +- test/functional/core/single_mocks/compression.test.js | 11 +++++------ test/functional/mongo_client.test.js | 3 ++- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/functional/core/server.test.js b/test/functional/core/server.test.js index 75328cbb68c..c758ad4279f 100644 --- a/test/functional/core/server.test.js +++ b/test/functional/core/server.test.js @@ -1007,7 +1007,7 @@ describe('Server tests', function() { err = e; } - client.close(err2 => done(err || err2)); + client.destroy(err2 => done(err || err2)); }); client.on('connect', () => { diff --git a/test/functional/core/single_mocks/compression.test.js b/test/functional/core/single_mocks/compression.test.js index 48a29b796fe..6d135c01b6a 100644 --- a/test/functional/core/single_mocks/compression.test.js +++ b/test/functional/core/single_mocks/compression.test.js @@ -33,7 +33,7 @@ describe('Single Compression (mocks)', function() { }); client.on('connect', function() { - client.close(done); + client.destroy(done); }); client.connect(); @@ -126,7 +126,7 @@ describe('Single Compression (mocks)', function() { expect(___err).to.be.null; expect(___r.result.ok).to.equal(1); - client.close(done); + client.destroy(done); }); }); }); @@ -220,7 +220,7 @@ describe('Single Compression (mocks)', function() { expect(___err).to.be.null; expect(___r.result.ok).to.equal(1); - client.close(done); + client.destroy(done); }); }); }); @@ -316,8 +316,7 @@ describe('Single Compression (mocks)', function() { expect(___err).to.be.null; expect(___r.result.ok).to.equal(1); - client.close(); - done(); + client.destroy(done); }); }); }); @@ -407,7 +406,7 @@ describe('Single Compression (mocks)', function() { expect(___err).to.be.null; expect(___r.result.ok).to.equal(1); - client.close(done); + client.destroy(done); }); }); }); diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index 2bc742f2428..786fe678d03 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -397,7 +397,8 @@ describe('MongoClient', function() { it('Should correctly set MaxPoolSize on sharded server', { metadata: { requires: { - topology: ['sharded'] + topology: ['sharded'], + unifiedTopology: false } }, From d446be5a793f143747a5c36dc4de245894059384 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 30 Dec 2019 17:37:04 -0500 Subject: [PATCH 081/130] fix: used weighted RTT calculation for server selection --- lib/core/sdam/server.js | 9 +++++++-- lib/core/sdam/server_description.js | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 5ce601331ac..fc5226a71c4 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -142,7 +142,7 @@ class Server extends EventEmitter { this.emit( 'descriptionReceived', new ServerDescription(this.description.address, null, { - roundTripTime: event.duration, + roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration), error: event.failure }) ); @@ -157,7 +157,7 @@ class Server extends EventEmitter { this.emit( 'descriptionReceived', new ServerDescription(this.description.address, event.reply, { - roundTripTime: event.duration + roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration) }) ); @@ -450,6 +450,11 @@ Object.defineProperty(Server.prototype, 'clusterTime', { } }); +function calculateRoundTripTime(oldRtt, duration) { + const alpha = 0.2; + return alpha * duration + (1 - alpha) * oldRtt; +} + function basicReadValidations(server, options) { if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { return new MongoError('readPreference must be an instance of ReadPreference'); diff --git a/lib/core/sdam/server_description.js b/lib/core/sdam/server_description.js index b6c96e20a86..98dd460e993 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/core/sdam/server_description.js @@ -69,7 +69,7 @@ class ServerDescription { this.address = address; this.error = options.error || null; - this.roundTripTime = options.roundTripTime || 0; + this.roundTripTime = options.roundTripTime || -1; this.lastUpdateTime = Date.now(); this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null; this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null; From a13dc6803209494c41745ab7f9890dac26711849 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 13:03:23 -0500 Subject: [PATCH 082/130] fix: recover on network error during initial connect --- lib/core/sdam/server.js | 9 +++---- lib/core/sdam/topology.js | 6 ++--- test/unit/sdam/monitoring.test.js | 41 +++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 8 deletions(-) diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index fc5226a71c4..f06ae35450a 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -146,11 +146,6 @@ class Server extends EventEmitter { error: event.failure }) ); - - if (this.s.state === STATE_CONNECTING) { - this.emit('error', new MongoNetworkError(event.failure)); - this.destroy(); - } }); this[kMonitor].on('serverHeartbeatSucceeded', event => { @@ -187,6 +182,10 @@ class Server extends EventEmitter { * Initiate server connect */ connect() { + if (this.s.state !== STATE_CLOSED) { + return; + } + stateTransition(this, STATE_CONNECTING); this[kMonitor].connect(); } diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index dc80d2ad110..02a65ac6b17 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -280,9 +280,10 @@ class Topology extends EventEmitter { const readPreference = options.readPreference || ReadPreference.primary; this.selectServer(readPreferenceServerSelector(readPreference), options, (err, server) => { if (err) { - stateTransition(this, STATE_CLOSED); + this.close(); + if (typeof callback === 'function') { - callback(err, null); + callback(err); } else { this.emit('error', err); } @@ -305,7 +306,6 @@ class Topology extends EventEmitter { if (typeof callback === 'function') callback(err, this); }; - const STATE_CONNECTING = 1; if (server.s.state === STATE_CONNECTING) { server.once('error', errorHandler); server.once('connect', connectHandler); diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index 3cb20954a10..23dc1155b1b 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -58,6 +58,47 @@ describe('monitoring', function() { }); }); + it('should recover on error during initial connect', function(done) { + let acceptConnections = false; + mockServer.setMessageHandler(request => { + if (!acceptConnections) { + request.connection.destroy(); + return; + } + + const doc = request.document; + if (doc.ismaster) { + request.reply(Object.assign({}, mock.DEFAULT_ISMASTER)); + } else if (doc.endSessions) { + request.reply({ ok: 1 }); + } + }); + + setTimeout(() => { + acceptConnections = true; + }, 250); + + // set `heartbeatFrequencyMS` to 250ms to force a quick monitoring check, and wait 500ms to validate below + const topology = new Topology(mockServer.uri(), { heartbeatFrequencyMS: 250 }); + topology.connect(err => { + expect(err).to.not.exist; + + setTimeout(() => { + expect(topology) + .property('description') + .property('servers') + .to.have.length(1); + + const serverDescription = Array.from(topology.description.servers.values())[0]; + expect(serverDescription) + .property('roundTripTime') + .to.be.greaterThan(0); + + topology.close(done); + }, 500); + }); + }); + describe('Monitor', function() { it('should connect and issue an initial server check', function(done) { mockServer.setMessageHandler(request => { From 7fcbeb57c6b164f654a1b0e7cbc85b568d613daf Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 19:02:27 -0500 Subject: [PATCH 083/130] fix: reschedule monitoring before emitting heartbeat events --- lib/core/sdam/monitor.js | 20 +++++++++++--------- test/unit/sdam/monitoring.test.js | 3 +-- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 35ce6b4e157..095e54b3d69 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -213,33 +213,35 @@ function rescheduleMonitoring(monitor, ms) { return; } + stateTransition(monitor, STATE_IDLE); + monitor[kLastCheckTime] = process.hrtime(); monitor[kMonitorId] = setTimeout(() => { monitor[kMonitorId] = undefined; monitor.requestCheck(); }, ms || heartbeatFrequencyMS); - - stateTransition(monitor, STATE_IDLE); } function successHandler(monitor, start, isMaster) { + rescheduleMonitoring(monitor); + process.nextTick(() => monitor.emit( 'serverHeartbeatSucceeded', new ServerHeartbeatSucceededEvent(calculateDurationInMs(start), isMaster, monitor.address) ) ); - - rescheduleMonitoring(monitor); } function failureHandler(monitor, start, err) { - monitor.emit( - 'serverHeartbeatFailed', - new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) - ); - rescheduleMonitoring(monitor); + + process.nextTick(() => + monitor.emit( + 'serverHeartbeatFailed', + new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) + ) + ); } module.exports = { diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index 23dc1155b1b..8bac318883f 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -78,8 +78,7 @@ describe('monitoring', function() { acceptConnections = true; }, 250); - // set `heartbeatFrequencyMS` to 250ms to force a quick monitoring check, and wait 500ms to validate below - const topology = new Topology(mockServer.uri(), { heartbeatFrequencyMS: 250 }); + const topology = new Topology(mockServer.uri()); topology.connect(err => { expect(err).to.not.exist; From dd1e7170729d2c305e0d75673cbab38a773c4365 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 08:28:03 -0500 Subject: [PATCH 084/130] refactor: ensure StreamDescription starts with unknown wire version --- lib/cmap/stream_description.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cmap/stream_description.js b/lib/cmap/stream_description.js index 923a885791a..e806a5f6522 100644 --- a/lib/cmap/stream_description.js +++ b/lib/cmap/stream_description.js @@ -14,8 +14,8 @@ class StreamDescription { constructor(address, options) { this.address = address; this.type = parseServerType(null); - this.minWireVersion = 1; - this.maxWireVersion = 2; + this.minWireVersion = undefined; + this.maxWireVersion = undefined; this.maxBsonObjectSize = 16777216; this.maxMessageSizeBytes = 48000000; this.maxWriteBatchSize = 100000; From 3844dd8fb78444b1d8484a798b56874d40c0420f Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 08:29:45 -0500 Subject: [PATCH 085/130] refactor: support passing an `autoEncrypter` to the connection pool --- lib/cmap/connection.js | 14 +++++++++++++- lib/cmap/connection_pool.js | 3 ++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index a8a106fa477..5de1b96e7b4 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -20,6 +20,7 @@ const kLastUseTime = Symbol('lastUseTime'); const kClusterTime = Symbol('clusterTime'); const kDescription = Symbol('description'); const kIsMaster = Symbol('ismaster'); +const kAutoEncrypter = Symbol('autoEncrypter'); class Connection extends EventEmitter { constructor(stream, options) { @@ -38,6 +39,11 @@ class Connection extends EventEmitter { this[kGeneration] = options.generation; this[kLastUseTime] = Date.now(); + // retain a reference to an `AutoEncrypter` if present + if (options.autoEncrypter) { + this[kAutoEncrypter] = options.autoEncrypter; + } + // setup parser stream and message handling this[kQueue] = new Map(); this[kMessageStream] = new MessageStream(options); @@ -172,7 +178,7 @@ class Connection extends EventEmitter { /// protocol methods. Eventually, the operation executor will return a `Connection` to execute /// against. function makeServerTrampoline(connection) { - return { + const server = { description: connection.description, clusterTime: connection[kClusterTime], s: { @@ -180,6 +186,12 @@ function makeServerTrampoline(connection) { pool: { write: write.bind(connection), isConnected: () => true } } }; + + if (connection[kAutoEncrypter]) { + server.autoEncrypter = connection[kAutoEncrypter]; + } + + return server; } function messageHandler(conn) { diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 6bbda63b0a3..2a0125447fd 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -129,7 +129,8 @@ class ConnectionPool extends EventEmitter { minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, waitQueueTimeoutMS: - typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000 + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000, + autoEncrypter: options.autoEncrypter }); if (options.minSize > options.maxSize) { From f13c20b64bd2d453281295bc58dba2ebc5ade453 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 08:30:18 -0500 Subject: [PATCH 086/130] fix: use remote address for stream identifier --- lib/cmap/connection.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 5de1b96e7b4..06476a05324 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -257,7 +257,7 @@ function messageHandler(conn) { function streamIdentifier(stream) { if (typeof stream.address === 'function') { - return `${stream.address().address}:${stream.address().port}`; + return `${stream.remoteAddress}:${stream.remotePort}`; } return uuidV4().toString('hex'); From 50fa70828ae91a6a20a11bff8e7dac03a8e14f2b Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 08:30:46 -0500 Subject: [PATCH 087/130] refactor: delegate `bypassAutoEncryption` to encryption module --- lib/core/wireprotocol/command.js | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/lib/core/wireprotocol/command.js b/lib/core/wireprotocol/command.js index e6af5dabb9e..214385bbc6f 100644 --- a/lib/core/wireprotocol/command.js +++ b/lib/core/wireprotocol/command.js @@ -12,7 +12,8 @@ const MongoNetworkError = require('../error').MongoNetworkError; const maxWireVersion = require('../utils').maxWireVersion; function isClientEncryptionEnabled(server) { - return server.autoEncrypter; + const wireVersion = maxWireVersion(server); + return wireVersion && server.autoEncrypter; } function command(server, ns, cmd, options, callback) { @@ -152,9 +153,6 @@ function supportsOpMsg(topologyOrServer) { } function _cryptCommand(server, ns, cmd, options, callback) { - const shouldBypassAutoEncryption = !!( - server.s.options.autoEncryption && server.s.options.autoEncryption.bypassAutoEncryption - ); const autoEncrypter = server.autoEncrypter; function commandResponseHandler(err, response) { if (err || response == null) { @@ -174,11 +172,6 @@ function _cryptCommand(server, ns, cmd, options, callback) { }); } - if (shouldBypassAutoEncryption) { - _command(server, ns, cmd, options, commandResponseHandler); - return; - } - autoEncrypter.encrypt(ns, cmd, options, (err, encrypted) => { if (err) { callback(err, null); From 548b63a031e8f82dc0309c30054fbfed36f9c082 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 19:12:38 -0500 Subject: [PATCH 088/130] refactor: configure autoEncrypter before topology connect --- lib/operations/connect.js | 119 ++++++++++++++++++++++++++------------ 1 file changed, 83 insertions(+), 36 deletions(-) diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 15e0a6abdb1..b2ab9196f12 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -17,6 +17,7 @@ const Server = require('../topologies/server'); const ServerSessionPool = require('../core').Sessions.ServerSessionPool; const emitDeprecationWarning = require('../utils').emitDeprecationWarning; const fs = require('fs'); +const BSON = require('../core/connection/utils').retrieveBSON(); let client; function loadClient() { @@ -487,6 +488,62 @@ function createTopology(mongoClient, topologyType, options, callback) { // Set default options const servers = translateOptions(options, translationOptions); + // determine CSFLE support + if (options.autoEncryption != null) { + let AutoEncrypter; + try { + require.resolve('mongodb-client-encryption'); + } catch (err) { + callback( + new MongoError( + 'Auto-encryption requested, but the module is not installed. Please add `mongodb-client-encryption` as a dependency of your project' + ) + ); + return; + } + + try { + let mongodbClientEncryption = require('mongodb-client-encryption'); + if (typeof mongodbClientEncryption.extension !== 'function') { + callback( + new MongoError( + 'loaded version of `mongodb-client-encryption` does not have property `extension`. Please make sure you are loading the correct version of `mongodb-client-encryption`' + ) + ); + } + AutoEncrypter = mongodbClientEncryption.extension(require('../../index')).AutoEncrypter; + } catch (err) { + callback(err); + return; + } + + const mongoCryptOptions = Object.assign( + { + bson: + options.bson || + new BSON([ + BSON.Binary, + BSON.Code, + BSON.DBRef, + BSON.Decimal128, + BSON.Double, + BSON.Int32, + BSON.Long, + BSON.Map, + BSON.MaxKey, + BSON.MinKey, + BSON.ObjectId, + BSON.BSONRegExp, + BSON.Symbol, + BSON.Timestamp + ]) + }, + options.autoEncryption + ); + + options.autoEncrypter = new AutoEncrypter(mongoClient, mongoCryptOptions); + } + // Create the topology let topology; if (topologyType === 'mongos') { @@ -506,48 +563,38 @@ function createTopology(mongoClient, topologyType, options, callback) { // Open the connection assignTopology(mongoClient, topology); + + // initialize CSFLE if requested + if (options.autoEncrypter) { + options.autoEncrypter.init(err => { + if (err) { + callback(err); + return; + } + + topology.connect(options, err => { + if (err) { + topology.close(true); + callback(err); + return; + } + + callback(undefined, topology); + }); + }); + + return; + } + + // otherwise connect normally topology.connect(options, err => { if (err) { topology.close(true); return callback(err); } - if (options.autoEncryption == null) { - callback(null, topology); - return; - } - - // setup for client side encryption - let AutoEncrypter; - try { - require.resolve('mongodb-client-encryption'); - } catch (err) { - callback( - new MongoError( - 'Auto-encryption requested, but the module is not installed. Please add `mongodb-client-encryption` as a dependency of your project' - ) - ); - return; - } - try { - let mongodbClientEncryption = require('mongodb-client-encryption'); - if (typeof mongodbClientEncryption.extension !== 'function') { - throw new MongoError( - 'loaded version of `mongodb-client-encryption` does not have property `extension`. Please make sure you are loading the correct version of `mongodb-client-encryption`' - ); - } - AutoEncrypter = mongodbClientEncryption.extension(require('../../index')).AutoEncrypter; - } catch (err) { - callback(err); - return; - } - - const mongoCryptOptions = Object.assign({}, options.autoEncryption); - topology.s.options.autoEncrypter = new AutoEncrypter(mongoClient, mongoCryptOptions); - topology.s.options.autoEncrypter.init(err => { - if (err) return callback(err, null); - callback(null, topology); - }); + callback(undefined, topology); + return; }); } From 2689823951a3a3ec924209c8a9b0057eeb0851a8 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 19:12:55 -0500 Subject: [PATCH 089/130] chore: don't install explicit version of client encryption module --- .evergreen/run-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index e22fab4c966..d031023101a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -27,7 +27,7 @@ if [[ -z "${CLIENT_ENCRYPTION}" ]]; then unset AWS_ACCESS_KEY_ID; unset AWS_SECRET_ACCESS_KEY; else - npm install mongodb-client-encryption@1.0.0 + npm install mongodb-client-encryption fi MONGODB_UNIFIED_TOPOLOGY=${UNIFIED} MONGODB_URI=${MONGODB_URI} npm test From 6ba409a97620fce708900a49bccd5f01707ee4d6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 31 Dec 2019 19:24:29 -0500 Subject: [PATCH 090/130] chore: reduce max test run time to a more reasonable 500s --- .evergreen/config.yml | 2 +- .evergreen/config.yml.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 12838bebba8..1a83ab81da7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1,6 +1,6 @@ stepback: true command_type: system -exec_timeout_secs: 1800 +exec_timeout_secs: 500 timeout: - command: shell.exec params: diff --git a/.evergreen/config.yml.in b/.evergreen/config.yml.in index 64681b5adb3..82654bad400 100644 --- a/.evergreen/config.yml.in +++ b/.evergreen/config.yml.in @@ -11,7 +11,7 @@ command_type: system # Protect ourself against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 6 minutes is the longest we'll ever run +exec_timeout_secs: 500 # 6 minutes is the longest we'll ever run # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: From 9109360fb89a3b3f0199c7d13b9c7a2341372182 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Thu, 2 Jan 2020 10:02:39 -0500 Subject: [PATCH 091/130] refactor: make event names consistent with spec, remove error types --- lib/cmap/connection_pool.js | 12 +++++------ lib/cmap/errors.js | 6 ++---- lib/cmap/events.js | 28 +++++++++++++------------- test/unit/cmap/connection_pool.test.js | 7 ++++++- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 2a0125447fd..7e6d4aac341 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -14,8 +14,8 @@ const PoolClosedError = errors.PoolClosedError; const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; const events = require('./events'); -const PoolCreatedEvent = events.PoolCreatedEvent; -const PoolClosedEvent = events.PoolClosedEvent; +const ConnectionPoolCreatedEvent = events.ConnectionPoolCreatedEvent; +const ConnectionPoolClosedEvent = events.ConnectionPoolClosedEvent; const ConnectionCreatedEvent = events.ConnectionCreatedEvent; const ConnectionReadyEvent = events.ConnectionReadyEvent; const ConnectionClosedEvent = events.ConnectionClosedEvent; @@ -23,7 +23,7 @@ const ConnectionCheckOutStartedEvent = events.ConnectionCheckOutStartedEvent; const ConnectionCheckOutFailedEvent = events.ConnectionCheckOutFailedEvent; const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; -const PoolClearedEvent = events.PoolClearedEvent; +const ConnectionPoolClearedEvent = events.ConnectionPoolClearedEvent; const kConnections = Symbol('connections'); const kPermits = Symbol('permits'); @@ -147,7 +147,7 @@ class ConnectionPool extends EventEmitter { this[kWaitQueue] = new Denque(); process.nextTick(() => { - this.emit('connectionPoolCreated', new PoolCreatedEvent(this)); + this.emit('connectionPoolCreated', new ConnectionPoolCreatedEvent(this)); ensureMinPoolSize(this); }); } @@ -223,7 +223,7 @@ class ConnectionPool extends EventEmitter { */ clear() { this[kGeneration] += 1; - this.emit('connectionPoolCleared', new PoolClearedEvent(this)); + this.emit('connectionPoolCleared', new ConnectionPoolClearedEvent(this)); } /** @@ -276,7 +276,7 @@ class ConnectionPool extends EventEmitter { }, err => { this[kConnections].clear(); - this.emit('connectionPoolClosed', new PoolClosedEvent(this)); + this.emit('connectionPoolClosed', new ConnectionPoolClosedEvent(this)); callback(err); } ); diff --git a/lib/cmap/errors.js b/lib/cmap/errors.js index d589699d8f0..87998b05ef4 100644 --- a/lib/cmap/errors.js +++ b/lib/cmap/errors.js @@ -4,8 +4,7 @@ const MongoError = require('../core/error').MongoError; class PoolClosedError extends MongoError { constructor(pool) { super('Attempted to check out a connection from closed connection pool'); - Error.captureStackTrace(this, this.constructor); - this.type = 'PoolClosedError'; + this.name = 'MongoPoolClosedError'; this.address = pool.address; } } @@ -13,8 +12,7 @@ class PoolClosedError extends MongoError { class WaitQueueTimeoutError extends MongoError { constructor(pool) { super('Timed out while checking out a connection from connection pool'); - Error.captureStackTrace(this, this.constructor); - this.type = 'WaitQueueTimeoutError'; + this.name = 'MongoWaitQueueTimeoutError'; this.address = pool.address; } } diff --git a/lib/cmap/events.js b/lib/cmap/events.js index dd9f28a4693..999a31b4366 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -1,6 +1,6 @@ 'use strict'; -class PoolMonitoringEvent { +class ConnectionPoolMonitoringEvent { constructor(type, pool) { this.time = new Date(); this.type = type; @@ -8,34 +8,34 @@ class PoolMonitoringEvent { } } -class PoolCreatedEvent extends PoolMonitoringEvent { +class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolCreated', pool); this.options = pool.options; } } -class PoolClosedEvent extends PoolMonitoringEvent { +class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolClosed', pool); } } -class ConnectionCreatedEvent extends PoolMonitoringEvent { +class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCreated', pool); this.connectionId = connection.id; } } -class ConnectionReadyEvent extends PoolMonitoringEvent { +class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionReady', pool); this.connectionId = connection.id; } } -class ConnectionClosedEvent extends PoolMonitoringEvent { +class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection, reason) { super('ConnectionClosed', pool); this.connectionId = connection.id; @@ -43,42 +43,42 @@ class ConnectionClosedEvent extends PoolMonitoringEvent { } } -class ConnectionCheckOutStartedEvent extends PoolMonitoringEvent { +class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionCheckOutStarted', pool); } } -class ConnectionCheckOutFailedEvent extends PoolMonitoringEvent { +class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, reason) { super('ConnectionCheckOutFailed', pool); this.reason = reason; } } -class ConnectionCheckedOutEvent extends PoolMonitoringEvent { +class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCheckedOut', pool); this.connectionId = connection.id; } } -class ConnectionCheckedInEvent extends PoolMonitoringEvent { +class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCheckedIn', pool); this.connectionId = connection.id; } } -class PoolClearedEvent extends PoolMonitoringEvent { +class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolCleared', pool); } } module.exports = { - PoolCreatedEvent, - PoolClosedEvent, + ConnectionPoolCreatedEvent, + ConnectionPoolClosedEvent, ConnectionCreatedEvent, ConnectionReadyEvent, ConnectionClosedEvent, @@ -86,5 +86,5 @@ module.exports = { ConnectionCheckOutFailedEvent, ConnectionCheckedOutEvent, ConnectionCheckedInEvent, - PoolClearedEvent + ConnectionPoolClearedEvent }; diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 2a67f31089d..1c74737fe65 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -250,6 +250,11 @@ describe('Connection Pool', function() { return thread; } + function eventType(event) { + const eventName = event.constructor.name; + return eventName.substring(0, eventName.lastIndexOf('Event')); + } + const OPERATION_FUNCTIONS = { checkOut: function(op) { return PROMISIFIED_POOL_FUNCTIONS.checkOut.call(pool).then(connection => { @@ -307,7 +312,7 @@ describe('Connection Pool', function() { const count = options.count; return new Promise(resolve => { function run() { - if (poolEvents.filter(ev => ev.type === event).length >= count) { + if (poolEvents.filter(ev => eventType(ev) === event).length >= count) { return resolve(); } From d88ada6ee1c2261f14d6b0198186141824ccc2fa Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 15:04:27 -0500 Subject: [PATCH 092/130] refactor: pull monitoring events visualization into reusable method --- test/tools/sdam_viz | 110 +-------------------------------------- test/tools/utils.js | 122 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 109 deletions(-) diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index d9ad4053de1..38240a96eac 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -2,7 +2,7 @@ 'use strict'; const { MongoClient } = require('../..'); -const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; +const visualizeMonitoringEvents = require('./utils').visualizeMonitoringEvents; const chalk = require('chalk'); const argv = require('yargs') .usage('Usage: $0 [options] ') @@ -32,61 +32,7 @@ async function run() { )} topology` ); - client.on('serverHeartbeatSucceeded', event => - print( - `${chalk.yellow('heartbeat')} ${chalk.green('succeeded')} host: '${ - event.connectionId - }' ${chalk.gray(`(${event.duration} ms)`)}` - ) - ); - - client.on('serverHeartbeatFailed', event => - print( - `${chalk.yellow('heartbeat')} ${chalk.red('failed')} host: '${ - event.connectionId - }' ${chalk.gray(`(${event.duration} ms)`)}` - ) - ); - - // server information - client.on('serverOpening', event => { - print( - `${chalk.cyan('server')} [${event.address}] ${chalk.bold('opening')} in topology#${ - event.topologyId - }` - ); - }); - - client.on('serverClosed', event => { - print( - `${chalk.cyan('server')} [${event.address}] ${chalk.bold('closed')} in topology#${ - event.topologyId - }` - ); - }); - - client.on('serverDescriptionChanged', event => { - print(`${chalk.cyan('server')} [${event.address}] changed:`); - console.log(serverDescriptionDiff(event.previousDescription, event.newDescription)); - }); - - // topology information - client.on('topologyOpening', event => { - print(`${chalk.magenta('topology')} adding topology#${event.topologyId}`); - }); - - client.on('topologyClosed', event => { - print(`${chalk.magenta('topology')} removing topology#${event.topologyId}`); - }); - - client.on('topologyDescriptionChanged', event => { - const diff = topologyDescriptionDiff(event.previousDescription, event.newDescription); - if (diff !== '') { - print(`${chalk.magenta('topology')} [topology#${event.topologyId}] changed:`); - console.log(diff); - } - }); - + visualizeMonitoringEvents(client); await client.connect(); if (argv.workload) { @@ -111,58 +57,6 @@ async function run() { const wait = ms => new Promise(resolve => setTimeout(resolve, ms)); -function diff(lhs, rhs, fields, comparator) { - return fields.reduce((diff, field) => { - if (lhs[field] == null || rhs[field] == null) { - return diff; - } - - if (!comparator(lhs[field], rhs[field])) { - diff.push( - ` ${field}: ${chalk.green(`[${lhs[field]}]`)} => ${chalk.green(`[${rhs[field]}]`)}` - ); - } - - return diff; - }, []); -} - -function serverDescriptionDiff(lhs, rhs) { - const objectIdFields = ['electionId']; - const arrayFields = ['hosts', 'tags']; - const simpleFields = [ - 'type', - 'minWireVersion', - 'me', - 'setName', - 'setVersion', - 'electionId', - 'primary', - 'logicalSessionTimeoutMinutes' - ]; - - return diff(lhs, rhs, simpleFields, (x, y) => x === y) - .concat(diff(lhs, rhs, arrayFields, (x, y) => arrayStrictEqual(x, y))) - .concat(diff(lhs, rhs, objectIdFields, (x, y) => x.equals(y))) - .join(',\n'); -} - -function topologyDescriptionDiff(lhs, rhs) { - const simpleFields = [ - 'type', - 'setName', - 'maxSetVersion', - 'stale', - 'compatible', - 'compatibilityError', - 'logicalSessionTimeoutMinutes', - 'error', - 'commonWireVersion' - ]; - - return diff(lhs, rhs, simpleFields, (x, y) => x === y).join(',\n'); -} - run().catch(error => console.log('Caught', error)); process.on('SIGINT', async function() { workloadInterrupt = true; diff --git a/test/tools/utils.js b/test/tools/utils.js index ec8826d88db..2a82fb6bb37 100644 --- a/test/tools/utils.js +++ b/test/tools/utils.js @@ -2,6 +2,8 @@ const Logger = require('../../lib/core').Logger; const deprecateOptions = require('../../lib/utils').deprecateOptions; +const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; +const chalk = require('chalk'); const chai = require('chai'); const expect = chai.expect; const sinonChai = require('sinon-chai'); @@ -55,10 +57,128 @@ ClassWithUndefinedLogger.prototype.getLogger = function() { return undefined; }; +function diff(lhs, rhs, fields, comparator) { + return fields.reduce((diff, field) => { + if (lhs[field] == null || rhs[field] == null) { + return diff; + } + + if (!comparator(lhs[field], rhs[field])) { + diff.push( + ` ${field}: ${chalk.green(`[${lhs[field]}]`)} => ${chalk.green(`[${rhs[field]}]`)}` + ); + } + + return diff; + }, []); +} + +function serverDescriptionDiff(lhs, rhs) { + const objectIdFields = ['electionId']; + const arrayFields = ['hosts', 'tags']; + const simpleFields = [ + 'type', + 'minWireVersion', + 'me', + 'setName', + 'setVersion', + 'electionId', + 'primary', + 'logicalSessionTimeoutMinutes' + ]; + + return diff(lhs, rhs, simpleFields, (x, y) => x === y) + .concat(diff(lhs, rhs, arrayFields, (x, y) => arrayStrictEqual(x, y))) + .concat(diff(lhs, rhs, objectIdFields, (x, y) => x.equals(y))) + .join(',\n'); +} + +function topologyDescriptionDiff(lhs, rhs) { + const simpleFields = [ + 'type', + 'setName', + 'maxSetVersion', + 'stale', + 'compatible', + 'compatibilityError', + 'logicalSessionTimeoutMinutes', + 'error', + 'commonWireVersion' + ]; + + return diff(lhs, rhs, simpleFields, (x, y) => x === y).join(',\n'); +} + +function visualizeMonitoringEvents(client) { + function print(msg) { + console.log(`${chalk.white(new Date().toISOString())} ${msg}`); + } + + client.on('serverHeartbeatStarted', event => + print(`${chalk.yellow('heartbeat')} ${chalk.bold('started')} host: '${event.connectionId}`) + ); + + client.on('serverHeartbeatSucceeded', event => + print( + `${chalk.yellow('heartbeat')} ${chalk.green('succeeded')} host: '${ + event.connectionId + }' ${chalk.gray(`(${event.duration} ms)`)}` + ) + ); + + client.on('serverHeartbeatFailed', event => + print( + `${chalk.yellow('heartbeat')} ${chalk.red('failed')} host: '${ + event.connectionId + }' ${chalk.gray(`(${event.duration} ms)`)}` + ) + ); + + // server information + client.on('serverOpening', event => { + print( + `${chalk.cyan('server')} [${event.address}] ${chalk.bold('opening')} in topology#${ + event.topologyId + }` + ); + }); + + client.on('serverClosed', event => { + print( + `${chalk.cyan('server')} [${event.address}] ${chalk.bold('closed')} in topology#${ + event.topologyId + }` + ); + }); + + client.on('serverDescriptionChanged', event => { + print(`${chalk.cyan('server')} [${event.address}] changed:`); + console.log(serverDescriptionDiff(event.previousDescription, event.newDescription)); + }); + + // topology information + client.on('topologyOpening', event => { + print(`${chalk.magenta('topology')} adding topology#${event.topologyId}`); + }); + + client.on('topologyClosed', event => { + print(`${chalk.magenta('topology')} removing topology#${event.topologyId}`); + }); + + client.on('topologyDescriptionChanged', event => { + const diff = topologyDescriptionDiff(event.previousDescription, event.newDescription); + if (diff !== '') { + print(`${chalk.magenta('topology')} [topology#${event.topologyId}] changed:`); + console.log(diff); + } + }); +} + module.exports = { makeTestFunction, ensureCalledWith, ClassWithLogger, ClassWithoutLogger, - ClassWithUndefinedLogger + ClassWithUndefinedLogger, + visualizeMonitoringEvents }; From 8a39c1dd25134e14e90f6daccdc4faad12856078 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 16:48:38 -0500 Subject: [PATCH 093/130] test: no longer look for `type` in cmap spec test runner The CMAP spec test runner was assuming a particular shape of error reported, which included a `type` field which is only useful for testing. In order to preserve an order that would actually be useful for users, we only compare error messages now. --- test/unit/cmap/connection_pool.test.js | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 1c74737fe65..cbf24f12af2 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -441,12 +441,10 @@ describe('Connection Pool', function() { const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(ev.type) < 0); if (expectedError) { - if (!actualError) { - expect(actualError).to.matchMongoSpec(expectedError); - } else { - const ae = Object.assign({}, actualError, { message: actualError.message }); - expect(ae).to.matchMongoSpec(expectedError); - } + expect(actualError).to.exist; + expect(actualError) + .property('message') + .to.equal(expectedError.message); } else if (actualError) { throw actualError; } From 8d9c2901d9d7c79431a74fce23a32a83f4f3ee31 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 15:07:40 -0500 Subject: [PATCH 094/130] refactor: support modern `connectTimeoutMS` option name --- lib/core/sdam/monitor.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 095e54b3d69..11c2e00c5f5 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -48,7 +48,11 @@ class Monitor extends EventEmitter { this.address = server.description.address; this.options = Object.freeze({ connectTimeoutMS: - typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 10000, + typeof options.connectionTimeout === 'number' + ? options.connectionTimeout + : typeof options.connectTimeoutMS === 'number' + ? options.connectTimeoutMS + : 10000, heartbeatFrequencyMS: typeof options.heartbeatFrequencyMS === 'number' ? options.heartbeatFrequencyMS : 10000, minHeartbeatFrequencyMS: From d341c01fe1024638b376b17419f33787c8a2e3d0 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 15:23:27 -0500 Subject: [PATCH 095/130] refactor: use most recently seen error on second monitor failure --- lib/core/sdam/monitor.js | 11 +++---- test/unit/sdam/monitoring.test.js | 52 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 11c2e00c5f5..84bb142b66e 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -182,7 +182,7 @@ function monitorServer(monitor) { // TODO: the next line is a legacy event, remove in v4 process.nextTick(() => monitor.emit('monitoring', monitor[kServer])); - checkServer(monitor, (err, isMaster) => { + checkServer(monitor, (e0, isMaster) => { if (isMaster) { successHandler(monitor, start, isMaster); return; @@ -190,7 +190,7 @@ function monitorServer(monitor) { // otherwise an error occured on initial discovery, also bail if (monitor[kServer].description.type === ServerType.Unknown) { - failureHandler(monitor, start, err); + failureHandler(monitor, start, e0); return; } @@ -199,10 +199,9 @@ function monitorServer(monitor) { // change its type to `Unknown` only after retrying once. monitor.emit('resetConnectionPool'); - checkServer(monitor, (error, isMaster) => { - if (error) { - // NOTE: using the _first_ error encountered here - failureHandler(monitor, start, err); + checkServer(monitor, (e1, isMaster) => { + if (e1) { + failureHandler(monitor, start, e1); return; } diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index 8bac318883f..adfb6b9488f 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -256,5 +256,57 @@ describe('monitoring', function() { monitor.connect(); }); + + it('should report the most recent error on second monitoring failure', function(done) { + let failedCount = 0; + let initialConnectCompleted = false; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!initialConnectCompleted) { + request.reply(mock.DEFAULT_ISMASTER_36); + initialConnectCompleted = true; + return; + } + + if (failedCount === 0) { + failedCount++; + request.reply({ ok: 0, errmsg: 'first error message' }); + } else { + failedCount++; + request.reply({ ok: 0, errmsg: 'second error message' }); + } + } + }); + + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 + }); + this.defer(() => monitor.close()); + + let resetRequested = false; + monitor.on('resetConnectionPool', () => (resetRequested = true)); + monitor.on('serverHeartbeatSucceeded', () => { + if (server.description.type === ServerType.Unknown) { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + return; + } + + done(new Error('unexpected heartbeat success')); + }); + + monitor.on('serverHeartbeatFailed', event => { + expect(resetRequested).to.be.true; + expect(event) + .property('failure') + .to.match(/second error message/); + done(); + }); + + monitor.connect(); + }); }); }); From 27fbe56c7ee45e1109205fe82b4547b0492a6e65 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 16:30:14 -0500 Subject: [PATCH 096/130] refactor: always emit success/failure events after started Previously we were emitting a success or failure event for heartbeats after the entire check, which would hide an extra attempt from the user. Now we emit started => success/failure events for every individual server check. --- lib/core/sdam/monitor.js | 69 ++++++++++---------- lib/core/sdam/server.js | 7 +- lib/core/sdam/server_selection.js | 2 +- test/unit/sdam/monitoring.test.js | 103 +++++++++++++++++++++++++----- 4 files changed, 124 insertions(+), 57 deletions(-) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 84bb142b66e..039e33ecc6b 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -136,8 +136,27 @@ function checkServer(monitor, callback) { monitor[kConnection] = undefined; } + const start = process.hrtime(); monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address)); + function failureHandler(err) { + monitor.emit( + 'serverHeartbeatFailed', + new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) + ); + + callback(err); + } + + function successHandler(isMaster) { + monitor.emit( + 'serverHeartbeatSucceeded', + new ServerHeartbeatSucceededEvent(calculateDurationInMs(start), isMaster, monitor.address) + ); + + return callback(undefined, isMaster); + } + if (monitor[kConnection] != null) { const connectTimeoutMS = monitor.options.connectTimeoutMS; monitor[kConnection].command( @@ -146,10 +165,11 @@ function checkServer(monitor, callback) { { socketTimeout: connectTimeoutMS }, (err, result) => { if (err) { - return callback(err); + failureHandler(err); + return; } - return callback(undefined, result.result); + successHandler(result.result); } ); @@ -160,37 +180,37 @@ function checkServer(monitor, callback) { connect(monitor.connectOptions, monitor[kCancellationToken], (err, conn) => { if (err) { monitor[kConnection] = undefined; - callback(err); + failureHandler(err); return; } if (monitor.s.state === STATE_CLOSING || monitor.s.state === STATE_CLOSED) { conn.destroy({ force: true }); - callback(new MongoError('monitor was destroyed')); + failureHandler(new MongoError('monitor was destroyed')); return; } monitor[kConnection] = conn; - callback(undefined, conn.ismaster); + successHandler(conn.ismaster); }); } function monitorServer(monitor) { - const start = process.hrtime(); stateTransition(monitor, STATE_MONITORING); // TODO: the next line is a legacy event, remove in v4 process.nextTick(() => monitor.emit('monitoring', monitor[kServer])); - checkServer(monitor, (e0, isMaster) => { - if (isMaster) { - successHandler(monitor, start, isMaster); + checkServer(monitor, e0 => { + if (e0 == null) { + rescheduleMonitoring(monitor); return; } // otherwise an error occured on initial discovery, also bail if (monitor[kServer].description.type === ServerType.Unknown) { - failureHandler(monitor, start, e0); + monitor.emit('resetServer', e0); + rescheduleMonitoring(monitor); return; } @@ -199,13 +219,12 @@ function monitorServer(monitor) { // change its type to `Unknown` only after retrying once. monitor.emit('resetConnectionPool'); - checkServer(monitor, (e1, isMaster) => { + checkServer(monitor, e1 => { if (e1) { - failureHandler(monitor, start, e1); - return; + monitor.emit('resetServer', e1); } - successHandler(monitor, start, isMaster); + rescheduleMonitoring(monitor); }); }); } @@ -225,28 +244,6 @@ function rescheduleMonitoring(monitor, ms) { }, ms || heartbeatFrequencyMS); } -function successHandler(monitor, start, isMaster) { - rescheduleMonitoring(monitor); - - process.nextTick(() => - monitor.emit( - 'serverHeartbeatSucceeded', - new ServerHeartbeatSucceededEvent(calculateDurationInMs(start), isMaster, monitor.address) - ) - ); -} - -function failureHandler(monitor, start, err) { - rescheduleMonitoring(monitor); - - process.nextTick(() => - monitor.emit( - 'serverHeartbeatFailed', - new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address) - ) - ); -} - module.exports = { Monitor }; diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index f06ae35450a..1935c84dd2a 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -136,15 +136,12 @@ class Server extends EventEmitter { this.s.pool.clear(); }); - this[kMonitor].on('serverHeartbeatFailed', event => { + this[kMonitor].on('resetServer', error => { // Revert to an `Unknown` state by emitting a default description with no isMaster, and the // error from the heartbeat attempt this.emit( 'descriptionReceived', - new ServerDescription(this.description.address, null, { - roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration), - error: event.failure - }) + new ServerDescription(this.description.address, null, { error }) ); }); diff --git a/lib/core/sdam/server_selection.js b/lib/core/sdam/server_selection.js index 7c66ab12b77..483703c680a 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/core/sdam/server_selection.js @@ -299,7 +299,7 @@ function selectServers(topology, selector, timeout, start, callback) { const retrySelection = () => { // ensure all server monitors attempt monitoring soon - topology.s.servers.forEach(server => server.requestCheck()); + topology.s.servers.forEach(server => process.nextTick(() => server.requestCheck())); const iterationTimer = setTimeout(() => { topology.removeListener('topologyDescriptionChanged', descriptionChangedHandler); diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index adfb6b9488f..6554d1e6c32 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -237,9 +237,13 @@ describe('monitoring', function() { }); this.defer(() => monitor.close()); - monitor.on('serverHeartbeatFailed', () => done(new Error('unexpected heartbeat failure'))); - let resetRequested = false; + monitor.on('serverHeartbeatFailed', () => { + if (resetRequested) { + done(new Error('unexpected heartbeat failure')); + } + }); + monitor.on('resetConnectionPool', () => (resetRequested = true)); monitor.on('serverHeartbeatSucceeded', () => { if (server.description.type === ServerType.Unknown) { @@ -272,9 +276,11 @@ describe('monitoring', function() { if (failedCount === 0) { failedCount++; request.reply({ ok: 0, errmsg: 'first error message' }); - } else { + } else if (failedCount === 1) { failedCount++; request.reply({ ok: 0, errmsg: 'second error message' }); + } else { + request.reply(mock.DEFAULT_ISMASTER_36); } } }); @@ -288,22 +294,89 @@ describe('monitoring', function() { let resetRequested = false; monitor.on('resetConnectionPool', () => (resetRequested = true)); - monitor.on('serverHeartbeatSucceeded', () => { - if (server.description.type === ServerType.Unknown) { - // this is the first successful heartbeat, set the server type - server.description.type = ServerType.Standalone; - return; + monitor.once('serverHeartbeatSucceeded', () => { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + + let failureCount = 0; + monitor.on('serverHeartbeatFailed', event => { + failureCount++; + if (failureCount === 2) { + expect(resetRequested).to.be.true; + expect(event) + .property('failure') + .to.match(/second error message/); + done(); + } + }); + }); + + monitor.connect(); + }); + + it('should report events in the correct order during monitoring failure', function(done) { + let failedCount = 0; + let initialConnectCompleted = false; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + if (!initialConnectCompleted) { + request.reply(mock.DEFAULT_ISMASTER_36); + initialConnectCompleted = true; + return; + } + + if (failedCount === 0) { + failedCount++; + request.reply({ ok: 0, errmsg: 'first error message' }); + } else { + failedCount++; + request.reply({ ok: 0, errmsg: 'second error message' }); + } } + }); - done(new Error('unexpected heartbeat success')); + const server = new MockServer(mockServer.address()); + const monitor = new Monitor(server, { + heartbeatFrequencyMS: 250, + minHeartbeatFrequencyMS: 50 }); + this.defer(() => monitor.close()); - monitor.on('serverHeartbeatFailed', event => { - expect(resetRequested).to.be.true; - expect(event) - .property('failure') - .to.match(/second error message/); - done(); + let poolResetRequested = false; + let serverResetRequested = false; + monitor.on('resetConnectionPool', () => (poolResetRequested = true)); + monitor.on('resetServer', () => (serverResetRequested = true)); + + const events = []; + monitor.once('serverHeartbeatSucceeded', () => { + // this is the first successful heartbeat, set the server type + server.description.type = ServerType.Standalone; + + monitor.on('serverHeartbeatStarted', event => events.push(event)); + monitor.on('serverHeartbeatFailed', event => events.push(event)); + monitor.once('resetServer', err => { + expect(poolResetRequested).to.be.true; + expect(serverResetRequested).to.be.true; + expect(events.map(e => e.constructor.name)).to.eql([ + 'ServerHeartbeatStartedEvent', + 'ServerHeartbeatFailedEvent', + 'ServerHeartbeatStartedEvent', + 'ServerHeartbeatFailedEvent' + ]); + + expect(events[1]) + .property('failure') + .to.match(/first error message/); + expect(events[3]) + .property('failure') + .to.match(/second error message/); + expect(events[3]) + .property('failure') + .to.eql(err); + + done(); + }); }); monitor.connect(); From c83af9a9d25b996a106dc5f612bd6c9da147fa56 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Fri, 3 Jan 2020 18:27:01 -0500 Subject: [PATCH 097/130] fix: socket timeout for handshake should be `connectTimeoutMS` --- lib/core/connection/connect.js | 15 +++++++++++++-- lib/core/sdam/monitor.js | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index f9fa06c2397..15cfad58601 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -111,8 +111,15 @@ function performInitialHandshake(conn, options, _callback) { getSaslSupportedMechs(options) ); + const handshakeOptions = Object.assign({}, options); + + // The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS + if (options.connectTimeoutMS || options.connectionTimeout) { + handshakeOptions.socketTimeout = options.connectTimeoutMS || options.connectionTimeout; + } + const start = new Date().getTime(); - runCommand(conn, 'admin.$cmd', handshakeDoc, options, (err, ismaster) => { + runCommand(conn, 'admin.$cmd', handshakeDoc, handshakeOptions, (err, ismaster) => { if (err) { callback(err); return; @@ -235,7 +242,11 @@ function makeConnection(family, options, cancellationToken, _callback) { typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000; const noDelay = typeof options.noDelay === 'boolean' ? options.noDelay : true; const connectionTimeout = - typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000; + typeof options.connectionTimeout === 'number' + ? options.connectionTimeout + : typeof options.connectTimeoutMS === 'number' + ? options.connectTimeoutMS + : 30000; const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const rejectUnauthorized = typeof options.rejectUnauthorized === 'boolean' ? options.rejectUnauthorized : true; diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index 039e33ecc6b..c8e3d4ad263 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -70,6 +70,7 @@ class Monitor extends EventEmitter { connectionType: Connection }, server.s.options, + this.options, // force BSON serialization options { From fdd519cef54ac3686edee7b252e3ba3f880dcec7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 09:56:52 -0500 Subject: [PATCH 098/130] chore: update viz utils to display changes to `error` fields --- lib/core/sdam/server_description.js | 2 +- test/tools/utils.js | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/core/sdam/server_description.js b/lib/core/sdam/server_description.js index 98dd460e993..1a26c0609fc 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/core/sdam/server_description.js @@ -68,7 +68,7 @@ class ServerDescription { ); this.address = address; - this.error = options.error || null; + this.error = options.error; this.roundTripTime = options.roundTripTime || -1; this.lastUpdateTime = Date.now(); this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null; diff --git a/test/tools/utils.js b/test/tools/utils.js index 2a82fb6bb37..a766d8cda57 100644 --- a/test/tools/utils.js +++ b/test/tools/utils.js @@ -3,6 +3,7 @@ const Logger = require('../../lib/core').Logger; const deprecateOptions = require('../../lib/utils').deprecateOptions; const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; +const errorStrictEqual = require('../../lib/core/utils').errorStrictEqual; const chalk = require('chalk'); const chai = require('chai'); const expect = chai.expect; @@ -59,7 +60,7 @@ ClassWithUndefinedLogger.prototype.getLogger = function() { function diff(lhs, rhs, fields, comparator) { return fields.reduce((diff, field) => { - if (lhs[field] == null || rhs[field] == null) { + if ((lhs[field] == null || rhs[field] == null) && field !== 'error') { return diff; } @@ -88,6 +89,7 @@ function serverDescriptionDiff(lhs, rhs) { ]; return diff(lhs, rhs, simpleFields, (x, y) => x === y) + .concat(diff(lhs, rhs, ['error'], (x, y) => errorStrictEqual(x, y))) .concat(diff(lhs, rhs, arrayFields, (x, y) => arrayStrictEqual(x, y))) .concat(diff(lhs, rhs, objectIdFields, (x, y) => x.equals(y))) .join(',\n'); From 9687b0673125d78737f047a37c5dd6818a430bdf Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 10:35:14 -0500 Subject: [PATCH 099/130] refactor: default `connectTimeoutMS` to spec mandated 10s --- lib/mongo_client.js | 8 ++++---- lib/operations/connect.js | 4 ++-- lib/topologies/mongos.js | 4 ++-- lib/topologies/replset.js | 4 ++-- lib/topologies/server.js | 4 ++-- test/functional/mongo_client.test.js | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/lib/mongo_client.js b/lib/mongo_client.js index eaad74a22ef..5f406eb7e96 100644 --- a/lib/mongo_client.js +++ b/lib/mongo_client.js @@ -97,10 +97,10 @@ const CloseOperation = require('./operations/close'); * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting + * @param {number} [options.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default). * If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure - * @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies @@ -330,10 +330,10 @@ MongoClient.prototype.isConnected = function(options) { * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {boolean} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting + * @param {number} [options.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default). * If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure - * @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies diff --git a/lib/operations/connect.js b/lib/operations/connect.js index b2ab9196f12..8fdc9e6899f 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -303,7 +303,7 @@ function connect(mongoClient, url, options, callback) { // Check if we have connection and socket timeout set if (_finalOptions.socketTimeoutMS == null) _finalOptions.socketTimeoutMS = 360000; - if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 30000; + if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 10000; if (_finalOptions.retryWrites == null) _finalOptions.retryWrites = true; if (_finalOptions.useRecoveryToken == null) _finalOptions.useRecoveryToken = true; if (_finalOptions.readPreference == null) _finalOptions.readPreference = 'primary'; @@ -786,7 +786,7 @@ function translateOptions(options, translationOptions) { // Set the socket and connection timeouts if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000; - if (options.connectTimeoutMS == null) options.connectTimeoutMS = 30000; + if (options.connectTimeoutMS == null) options.connectTimeoutMS = 10000; if (!translationOptions.createServers) { return; diff --git a/lib/topologies/mongos.js b/lib/topologies/mongos.js index ec14f48516c..5250a8a5814 100644 --- a/lib/topologies/mongos.js +++ b/lib/topologies/mongos.js @@ -83,8 +83,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology * @fires Mongos#connect diff --git a/lib/topologies/replset.js b/lib/topologies/replset.js index 44e83d11fea..d78ae13b616 100644 --- a/lib/topologies/replset.js +++ b/lib/topologies/replset.js @@ -93,8 +93,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed); * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology diff --git a/lib/topologies/server.js b/lib/topologies/server.js index 9bbe4350ec7..2d6c65359dd 100644 --- a/lib/topologies/server.js +++ b/lib/topologies/server.js @@ -85,8 +85,8 @@ var legalOptionNames = [ * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting - * @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting + * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out + * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.monitoring=true] Triggers the server instance to call ismaster diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index 786fe678d03..a3878c1e57b 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -366,7 +366,7 @@ describe('MongoClient', function() { var connections = client.topology.connections(); for (var i = 0; i < connections.length; i++) { - test.equal(30000, connections[i].connectionTimeout); + test.equal(10000, connections[i].connectionTimeout); test.equal(360000, connections[i].socketTimeout); } From 34fdea45c937159b2b7e08af9ad571b878146109 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 11:02:38 -0500 Subject: [PATCH 100/130] refactor: give the monitoring connection a useful identifier --- lib/core/sdam/monitor.js | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/core/sdam/monitor.js b/lib/core/sdam/monitor.js index c8e3d4ad263..a5ddff85a39 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/core/sdam/monitor.js @@ -64,6 +64,7 @@ class Monitor extends EventEmitter { this.connectOptions = Object.freeze( Object.assign( { + id: '', host: addressParts[0], port: parseInt(addressParts[1], 10), bson: server.s.bson, From 5319ff9abe064209dec939fb125d05dcb058582d Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 11:14:03 -0500 Subject: [PATCH 101/130] fix: timed out streams should be destroyed on `timeout` event --- lib/cmap/connection.js | 13 +++++++++++++ test/unit/cmap/connection.test.js | 30 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index 06476a05324..fbb4a7a5be7 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -54,6 +54,10 @@ class Connection extends EventEmitter { }); stream.on('close', () => { + if (this.closed) { + return; + } + this.closed = true; this[kQueue].forEach(op => op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} closed`)) @@ -64,6 +68,11 @@ class Connection extends EventEmitter { }); stream.on('timeout', () => { + if (this.closed) { + return; + } + + stream.destroy(); this.closed = true; this[kQueue].forEach(op => op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} timed out`)) @@ -106,6 +115,10 @@ class Connection extends EventEmitter { return this[kClusterTime]; } + get stream() { + return this[kStream]; + } + markAvailable() { this[kLastUseTime] = Date.now(); } diff --git a/test/unit/cmap/connection.test.js b/test/unit/cmap/connection.test.js index 0fb35cd0d40..d944c6c4099 100644 --- a/test/unit/cmap/connection.test.js +++ b/test/unit/cmap/connection.test.js @@ -38,4 +38,34 @@ describe('Connection', function() { } ); }); + + it('should destroy streams which time out', function(done) { + server.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + request.reply(mock.DEFAULT_ISMASTER_36); + } + + // blackhole all other requests + }); + + connect( + Object.assign({ bson: new BSON(), connectionType: Connection }, server.address()), + (err, conn) => { + expect(err).to.not.exist; + expect(conn).to.exist; + + conn.command('$admin.cmd', { ping: 1 }, { socketTimeout: 50 }, (err, result) => { + expect(err).to.exist; + expect(result).to.not.exist; + + expect(conn) + .property('stream') + .property('destroyed').to.be.true; + + done(); + }); + } + ); + }); }); From 611781f96e80c971c27f95cbf80d2f003654414e Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 11:17:27 -0500 Subject: [PATCH 102/130] test: add ability to force quit `sdam_viz` --- test/tools/sdam_viz | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index 38240a96eac..6b7432038df 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -57,8 +57,16 @@ async function run() { const wait = ms => new Promise(resolve => setTimeout(resolve, ms)); -run().catch(error => console.log('Caught', error)); +let exitRequestCount = 0; process.on('SIGINT', async function() { + exitRequestCount++; + if (exitRequestCount > 3) { + console.log('force quitting...'); + process.exit(1); + } + workloadInterrupt = true; await client.close(); }); + +run().catch(error => console.log('Caught', error)); From 2e1222e7fc2f7ef2d27bcf1cdd007251bd55cb3e Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 11:21:05 -0500 Subject: [PATCH 103/130] test: add a socket timeout to workload generator --- test/tools/sdam_viz | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index 6b7432038df..b942ab372c0 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -44,7 +44,7 @@ async function run() { const result = await client .db('test') .collection('test') - .find({}) + .find({}, { socketTimeout: 2000 }) .limit(1) .toArray(); print(`${chalk.yellow('workload')} find completed: ${JSON.stringify(result)}`); From 27a4df2eee8ea6d298317bd00bce7b14b0b177f6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 16:31:25 -0500 Subject: [PATCH 104/130] chore: bump max test run time in evergreen to 15min --- .evergreen/config.yml | 2 +- .evergreen/config.yml.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1a83ab81da7..4a178259c60 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1,6 +1,6 @@ stepback: true command_type: system -exec_timeout_secs: 500 +exec_timeout_secs: 900 timeout: - command: shell.exec params: diff --git a/.evergreen/config.yml.in b/.evergreen/config.yml.in index 82654bad400..e43e3eea6f9 100644 --- a/.evergreen/config.yml.in +++ b/.evergreen/config.yml.in @@ -11,7 +11,7 @@ command_type: system # Protect ourself against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 500 # 6 minutes is the longest we'll ever run +exec_timeout_secs: 900 # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: From 8a82e3b4ce88be4d4fd4e361922a8b2f538d6976 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 4 Jan 2020 16:48:21 -0500 Subject: [PATCH 105/130] chore: move `bl` to dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 9562b1c116a..f337fbee536 100644 --- a/package.json +++ b/package.json @@ -24,6 +24,7 @@ "bson-ext": "^2.0.0" }, "dependencies": { + "bl": "^2.2.0", "bson": "^1.1.1", "denque": "^1.4.1", "require_optional": "^1.0.1", @@ -31,7 +32,6 @@ }, "devDependencies": { "bluebird": "3.5.0", - "bl": "^2.2.0", "chai": "^4.1.1", "chai-subset": "^1.6.0", "chalk": "^2.4.2", From 7388dcf2d248dd6be063dd13c9ae8709b12ff50e Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 5 Jan 2020 08:20:34 -0500 Subject: [PATCH 106/130] refactor: no wait queue timeout by default --- lib/cmap/connection_pool.js | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 7e6d4aac341..29d836b27e7 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -90,7 +90,7 @@ function resolveOptions(options, defaults) { * @property {number} [maxPoolSize=100] The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections. * @property {number} [minPoolSize=0] The minimum number of connections that MUST exist at any moment in a single connection pool. * @property {number} [maxIdleTimeMS] The maximum amount of time a connection should remain idle in the connection pool before being marked idle. - * @property {number} [waitQueueTimeoutMS=10000] The maximum amount of time operation execution should wait for a connection to become available. + * @property {number} [waitQueueTimeoutMS=0] The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit. */ /** @@ -129,7 +129,7 @@ class ConnectionPool extends EventEmitter { minPoolSize: typeof options.minPoolSize === 'number' ? options.minPoolSize : 0, maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, waitQueueTimeoutMS: - typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 10000, + typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0, autoEncrypter: options.autoEncrypter }); @@ -168,18 +168,20 @@ class ConnectionPool extends EventEmitter { return; } - const pool = this; - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - // add this request to the wait queue const waitQueueMember = { callback }; - waitQueueMember.timer = setTimeout(() => { - waitQueueMember[kCancelled] = true; - waitQueueMember.timer = undefined; - pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); - waitQueueMember.callback(new WaitQueueTimeoutError(pool)); - }, waitQueueTimeoutMS); + const pool = this; + const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; + if (waitQueueTimeoutMS) { + waitQueueMember.timer = setTimeout(() => { + waitQueueMember[kCancelled] = true; + waitQueueMember.timer = undefined; + + pool.emit('connectionCheckOutFailed', new ConnectionCheckOutFailedEvent(pool, 'timeout')); + waitQueueMember.callback(new WaitQueueTimeoutError(pool)); + }, waitQueueTimeoutMS); + } // place the member at the end of the wait queue this[kWaitQueue].push(waitQueueMember); From f1698486a89e8e019600f083aaea64ae3918c6e6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 5 Jan 2020 08:54:06 -0500 Subject: [PATCH 107/130] test: schedule workloads to be run in parallel --- test/tools/sdam_viz | 47 ++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index b942ab372c0..31b69e81453 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -24,7 +24,6 @@ const client = new MongoClient(uri, { useUnifiedTopology: !argv.legacy }); -let workloadInterrupt = false; async function run() { print( `connecting to: ${chalk.bold(uri)} using ${chalk.bold( @@ -36,26 +35,37 @@ async function run() { await client.connect(); if (argv.workload) { - while (!workloadInterrupt) { - await wait(2000); - - try { - print(`${chalk.yellow('workload')} issuing find...`); - const result = await client - .db('test') - .collection('test') - .find({}, { socketTimeout: 2000 }) - .limit(1) - .toArray(); - print(`${chalk.yellow('workload')} find completed: ${JSON.stringify(result)}`); - } catch (e) { - print(`${chalk.yellow('workload')} find failed: ${e.message}`); - } - } + scheduleWorkload(client); } } -const wait = ms => new Promise(resolve => setTimeout(resolve, ms)); +let workloadTimer; +let workloadCounter = 0; +let workloadInterrupt = false; +async function scheduleWorkload(client) { + if (!workloadInterrupt) { + // immediately reschedule work + workloadTimer = setTimeout(() => scheduleWorkload(client), 7000); + } + + const currentWorkload = workloadCounter++; + + try { + print(`${chalk.yellow(`workload#${currentWorkload}`)} issuing find...`); + const result = await client + .db('test') + .collection('test') + .find({}, { socketTimeout: 2000 }) + .limit(1) + .toArray(); + + print( + `${chalk.yellow(`workload#${currentWorkload}`)} find completed: ${JSON.stringify(result)}` + ); + } catch (e) { + print(`${chalk.yellow(`workload#${currentWorkload}`)} find failed: ${e.message}`); + } +} let exitRequestCount = 0; process.on('SIGINT', async function() { @@ -66,6 +76,7 @@ process.on('SIGINT', async function() { } workloadInterrupt = true; + clearTimeout(workloadTimer); await client.close(); }); From cb7154a82e6aedc54df57feb5502ed45399141b4 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 6 Jan 2020 07:22:05 -0500 Subject: [PATCH 108/130] chore: add sinon-chai and mocha-chai for all tests --- test/tools/runner/index.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/tools/runner/index.js b/test/tools/runner/index.js index d477b4a0043..bedad422d8a 100644 --- a/test/tools/runner/index.js +++ b/test/tools/runner/index.js @@ -98,3 +98,8 @@ after(() => mock.cleanup()); require('./plugins/deferred'); require('./plugins/session_leak_checker'); require('./plugins/client_leak_checker'); + +// configure mocha and chai +require('mocha-sinon'); +const chai = require('chai'); +chai.use(require('sinon-chai')); From 6e5901cc17be21ab98a02c376e901e4d1b0f3bfa Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 6 Jan 2020 14:38:52 -0500 Subject: [PATCH 109/130] chore: update package lock file --- package-lock.json | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/package-lock.json b/package-lock.json index 21a1dd1a1fe..c2904f6e33f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -290,7 +290,6 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/bl/-/bl-2.2.0.tgz", "integrity": "sha512-wbgvOpqopSr7uq6fJrLH8EsvYMJf9gzfo2jCsL2eTy75qXPukA4pCgHamOQkZtY5vmfVtjB+P3LNlMHW5CEZXA==", - "dev": true, "requires": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" @@ -830,8 +829,7 @@ "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, "coveralls": { "version": "2.13.3", @@ -1841,8 +1839,7 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "ini": { "version": "1.3.5", @@ -2001,8 +1998,7 @@ "isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" }, "isexe": { "version": "2.0.0", @@ -2979,8 +2975,7 @@ "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" }, "progress": { "version": "2.0.3", @@ -3142,7 +3137,6 @@ "version": "2.3.6", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", - "dev": true, "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -3156,8 +3150,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" } } }, @@ -3703,7 +3696,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, "requires": { "safe-buffer": "~5.1.0" }, @@ -3711,8 +3703,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" } } }, @@ -3951,8 +3942,7 @@ "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" }, "uuid": { "version": "3.3.3", From aedab636ff004246dbd89479b0041e769658a5c1 Mon Sep 17 00:00:00 2001 From: Keith Rogers <150157+keirog@users.noreply.github.com> Date: Tue, 31 Dec 2019 14:08:10 +0000 Subject: [PATCH 110/130] docs(reference): fix typo in unified-topology ~psuedocode~ pseudocode --- docs/reference/content/reference/unified-topology/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/content/reference/unified-topology/index.md b/docs/reference/content/reference/unified-topology/index.md index 22a729b4e84..f7ceb2f3ddf 100644 --- a/docs/reference/content/reference/unified-topology/index.md +++ b/docs/reference/content/reference/unified-topology/index.md @@ -69,7 +69,7 @@ We think the ambiguity of what it means to be "connected" can lead to far more p ### Server Selection -The psuedocode for operation execution looks something like this: +The pseudocode for operation execution looks something like this: ```js function executeOperation(topology, operation, callback) { From 563ced61cb45a66c69fca3fdc34b63947f8c7b81 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 6 Jan 2020 16:47:52 -0500 Subject: [PATCH 111/130] fix: copy `ssl` option to pool connection options --- lib/cmap/connection_pool.js | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 29d836b27e7..55305c51856 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -46,6 +46,7 @@ const VALID_POOL_OPTIONS = new Set([ 'compression', // node Net options + 'ssl', 'localAddress', 'localPort', 'family', From e9d9fc05a6496d54b3537d7c41b08aaa6e6c1ea7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 6 Jan 2020 16:49:37 -0500 Subject: [PATCH 112/130] chore: remove argon build for ubuntu 16.04, no csfle support --- .evergreen/config.yml | 7 ------- .evergreen/generate_evergreen_tasks.js | 3 ++- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4a178259c60..969227122a3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1106,13 +1106,6 @@ buildvariants: NODE_LTS_NAME: boron CLIENT_ENCRYPTION: true tasks: *ref_3 - - name: ubuntu-16.04-argon - display_name: Ubuntu 16.04 Node Argon - run_on: ubuntu1604-test - expansions: - NODE_LTS_NAME: argon - CLIENT_ENCRYPTION: true - tasks: *ref_3 - name: ubuntu1604-arm64-small-dubnium display_name: Ubuntu 16.04 (ARM64) Node Dubnium run_on: ubuntu1604-arm64-small diff --git a/.evergreen/generate_evergreen_tasks.js b/.evergreen/generate_evergreen_tasks.js index b3b20012e79..ecff0659dd5 100644 --- a/.evergreen/generate_evergreen_tasks.js +++ b/.evergreen/generate_evergreen_tasks.js @@ -81,7 +81,8 @@ const OPERATING_SYSTEMS = [ display_name: 'Ubuntu 16.04', run_on: 'ubuntu1604-test', mongoVersion: '>=3.2', - clientEncryption: true + clientEncryption: true, + nodeVersions: ['dubnium', 'carbon', 'boron'] }, { name: 'ubuntu1604-arm64-small', From 24c32dd2f7e71ad6ed3f224d958bc4789407e42c Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 7 Jan 2020 09:07:01 -0500 Subject: [PATCH 113/130] refactor: support camel cased `tlsInsecure` --- lib/mongo_client.js | 4 ++-- lib/operations/connect.js | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/mongo_client.js b/lib/mongo_client.js index 5f406eb7e96..090682f163d 100644 --- a/lib/mongo_client.js +++ b/lib/mongo_client.js @@ -87,7 +87,7 @@ const CloseOperation = require('./operations/close'); * @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer *deprecated* use `tls` variants * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. *deprecated* use `tls` variants * @param {boolean} [options.tls=false] Enable TLS connections - * @param {boolean} [options.tlsinsecure=false] Relax TLS constraints, disabling validation + * @param {boolean} [options.tlsInsecure=false] Relax TLS constraints, disabling validation * @param {string} [options.tlsCAFile] A path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection * @param {string} [options.tlsCertificateKeyFile] A path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated * @param {string} [options.tlsCertificateKeyFilePassword] The password to decrypt the client private key to be used for TLS connections @@ -320,7 +320,7 @@ MongoClient.prototype.isConnected = function(options) { * @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer *deprecated* use `tls` variants * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. *deprecated* use `tls` variants * @param {boolean} [options.tls=false] Enable TLS connections - * @param {boolean} [options.tlsinsecure=false] Relax TLS constraints, disabling validation + * @param {boolean} [options.tlsInsecure=false] Relax TLS constraints, disabling validation * @param {string} [options.tlsCAFile] A path to file with either a single or bundle of certificate authorities to be considered trusted when making a TLS connection * @param {string} [options.tlsCertificateKeyFile] A path to the client certificate file or the client private key file; in the case that they both are needed, the files should be concatenated * @param {string} [options.tlsCertificateKeyFilePassword] The password to decrypt the client private key to be used for TLS connections diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 8fdc9e6899f..31b1dafbd44 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -143,6 +143,7 @@ const validOptionNames = [ 'autoEncryption', 'driverInfo', 'tls', + 'tlsInsecure', 'tlsinsecure', 'tlsAllowInvalidCertificates', 'tlsAllowInvalidHostnames', From 700d5dcddedffd998a85c6ed54f38cdd27a57d09 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 7 Jan 2020 12:27:55 -0500 Subject: [PATCH 114/130] test: include auth information in generated test connection string --- test/tools/runner/config.js | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index faca173b6fb..1738ae1e769 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -13,6 +13,7 @@ class NativeConfiguration { this.clientSideEncryption = context.clientSideEncryption; this.options = Object.assign( { + auth: parsedURI.auth, hosts: parsedURI.hosts, host: parsedURI.hosts[0] ? parsedURI.hosts[0].host : 'localhost', port: parsedURI.hosts[0] ? parsedURI.hosts[0].port : 27017, @@ -21,6 +22,15 @@ class NativeConfiguration { parsedURI.options ); + const clientOptions = parsedURI.options; + if (clientOptions) { + if (clientOptions.caseTranslate) { + delete clientOptions.caseTranslate; + } + + this.clientOptions = Object.freeze(clientOptions); + } + this.mongo = this.require = require('../../..'); this.writeConcern = function() { return { w: 1 }; @@ -57,6 +67,8 @@ class NativeConfiguration { } newClient(dbOptions, serverOptions) { + serverOptions = Object.assign({}, serverOptions, this.clientOptions); + // support MongoClient contructor form (url, options) for `newClient` if (typeof dbOptions === 'string') { return new MongoClient( @@ -85,15 +97,25 @@ class NativeConfiguration { Object.assign(dbOptions, { replicaSet: this.options.replicaSet, auto_reconnect: false }); } - const connectionString = url.format({ + const urlOptions = { protocol: 'mongodb', slashes: true, hostname: dbHost, port: dbPort, query: dbOptions, pathname: '/' - }); + }; + + if (this.options.auth) { + let authSection = this.options.auth.username; + if (this.options.auth.password) { + authSection = `${authSection}:${this.options.auth.password}`; + } + + urlOptions.auth = authSection; + } + const connectionString = url.format(urlOptions); return new MongoClient(connectionString, serverOptions); } From 95a772ec149edb78369c9f89108166f5ecddd685 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 8 Jan 2020 08:47:46 -0500 Subject: [PATCH 115/130] fix: remove servers with me mismatch in `updateRsFromPrimary` NODE-2417 --- lib/core/sdam/topology_description.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/core/sdam/topology_description.js b/lib/core/sdam/topology_description.js index ba6a2507ee6..34ce8ace9a8 100644 --- a/lib/core/sdam/topology_description.js +++ b/lib/core/sdam/topology_description.js @@ -287,7 +287,10 @@ function updateRsFromPrimary( maxElectionId ) { setName = setName || serverDescription.setName; - if (setName !== serverDescription.setName) { + if ( + setName !== serverDescription.setName || + (serverDescription.me && serverDescription.address !== serverDescription.me) + ) { serverDescriptions.delete(serverDescription.address); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } From 903208f473e8416183f95f9a54b6416ee6e65706 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 8 Jan 2020 09:05:56 -0500 Subject: [PATCH 116/130] Revert "test: include auth information in generated test connection string" This reverts commit 700d5dcddedffd998a85c6ed54f38cdd27a57d09. --- test/tools/runner/config.js | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index 1738ae1e769..faca173b6fb 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -13,7 +13,6 @@ class NativeConfiguration { this.clientSideEncryption = context.clientSideEncryption; this.options = Object.assign( { - auth: parsedURI.auth, hosts: parsedURI.hosts, host: parsedURI.hosts[0] ? parsedURI.hosts[0].host : 'localhost', port: parsedURI.hosts[0] ? parsedURI.hosts[0].port : 27017, @@ -22,15 +21,6 @@ class NativeConfiguration { parsedURI.options ); - const clientOptions = parsedURI.options; - if (clientOptions) { - if (clientOptions.caseTranslate) { - delete clientOptions.caseTranslate; - } - - this.clientOptions = Object.freeze(clientOptions); - } - this.mongo = this.require = require('../../..'); this.writeConcern = function() { return { w: 1 }; @@ -67,8 +57,6 @@ class NativeConfiguration { } newClient(dbOptions, serverOptions) { - serverOptions = Object.assign({}, serverOptions, this.clientOptions); - // support MongoClient contructor form (url, options) for `newClient` if (typeof dbOptions === 'string') { return new MongoClient( @@ -97,25 +85,15 @@ class NativeConfiguration { Object.assign(dbOptions, { replicaSet: this.options.replicaSet, auto_reconnect: false }); } - const urlOptions = { + const connectionString = url.format({ protocol: 'mongodb', slashes: true, hostname: dbHost, port: dbPort, query: dbOptions, pathname: '/' - }; - - if (this.options.auth) { - let authSection = this.options.auth.username; - if (this.options.auth.password) { - authSection = `${authSection}:${this.options.auth.password}`; - } - - urlOptions.auth = authSection; - } + }); - const connectionString = url.format(urlOptions); return new MongoClient(connectionString, serverOptions); } From c528a6695e2c23e99e8692102717c3e6880924a9 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Wed, 8 Jan 2020 16:43:27 -0500 Subject: [PATCH 117/130] Revert "fix: remove servers with me mismatch in `updateRsFromPrimary`" This reverts commit 95a772ec149edb78369c9f89108166f5ecddd685. --- lib/core/sdam/topology_description.js | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/core/sdam/topology_description.js b/lib/core/sdam/topology_description.js index 34ce8ace9a8..ba6a2507ee6 100644 --- a/lib/core/sdam/topology_description.js +++ b/lib/core/sdam/topology_description.js @@ -287,10 +287,7 @@ function updateRsFromPrimary( maxElectionId ) { setName = setName || serverDescription.setName; - if ( - setName !== serverDescription.setName || - (serverDescription.me && serverDescription.address !== serverDescription.me) - ) { + if (setName !== serverDescription.setName) { serverDescriptions.delete(serverDescription.address); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } From 35d02747e5daca6bbeb0fbec7b0192b77ccc5ac9 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sat, 11 Jan 2020 08:22:42 -0500 Subject: [PATCH 118/130] fix: report the correct platform in client metadata This relatively large refactoring ultimately corrects the fact that the platform was not always reported correctly in client metadata during initial handshake. It also takes the steps to localize the creation of that metadata to one place, and makes the metadata consistent across legacy and unified topologies NODE-2418 --- lib/cmap/connection_pool.js | 3 +- lib/core/connection/connect.js | 4 +- lib/core/sdam/server.js | 3 - lib/core/sdam/topology.js | 19 ++++--- lib/core/topologies/mongos.js | 18 ++---- lib/core/topologies/replset.js | 15 ++--- lib/core/topologies/server.js | 13 +++-- lib/core/topologies/shared.js | 60 -------------------- lib/core/utils.js | 50 +++++++++++++++- lib/topologies/mongos.js | 7 --- lib/topologies/native_topology.js | 5 -- lib/topologies/replset.js | 7 --- lib/topologies/server.js | 7 --- lib/topologies/topology_base.js | 29 ++-------- lib/utils.js | 1 - test/functional/core/client_metadata.test.js | 12 ++-- test/functional/mongo_client.test.js | 4 +- test/unit/client_metadata.test.js | 51 +++++++++++++++++ test/unit/core/connect.test.js | 29 ---------- 19 files changed, 143 insertions(+), 194 deletions(-) create mode 100644 test/unit/client_metadata.test.js diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 55305c51856..829075aca60 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -131,7 +131,8 @@ class ConnectionPool extends EventEmitter { maxIdleTimeMS: typeof options.maxIdleTimeMS === 'number' ? options.maxIdleTimeMS : 0, waitQueueTimeoutMS: typeof options.waitQueueTimeoutMS === 'number' ? options.waitQueueTimeoutMS : 0, - autoEncrypter: options.autoEncrypter + autoEncrypter: options.autoEncrypter, + metadata: options.metadata }); if (options.minSize > options.maxSize) { diff --git a/lib/core/connection/connect.js b/lib/core/connection/connect.js index 15cfad58601..2922c61b45a 100644 --- a/lib/core/connection/connect.js +++ b/lib/core/connection/connect.js @@ -3,11 +3,11 @@ const net = require('net'); const tls = require('tls'); const Connection = require('./connection'); const Query = require('./commands').Query; -const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const defaultAuthProviders = require('../auth/defaultAuthProviders').defaultAuthProviders; const WIRE_CONSTANTS = require('../wireprotocol/constants'); +const makeClientMetadata = require('../utils').makeClientMetadata; const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; @@ -105,7 +105,7 @@ function performInitialHandshake(conn, options, _callback) { const handshakeDoc = Object.assign( { ismaster: true, - client: createClientInfo(options), + client: options.metadata || makeClientMetadata(options), compression: compressors }, getSaslSupportedMechs(options) diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 1935c84dd2a..7855c2a09da 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -4,7 +4,6 @@ const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool; const MongoError = require('../error').MongoError; const relayEvents = require('../utils').relayEvents; const BSON = require('../connection/utils').retrieveBSON(); -const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); @@ -99,8 +98,6 @@ class Server extends EventEmitter { BSON.Symbol, BSON.Timestamp ]), - // client metadata for the initial handshake - clientInfo: createClientInfo(options), // the server state state: STATE_CLOSED, credentials: options.credentials, diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 02a65ac6b17..2fbefee707f 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -16,7 +16,6 @@ const createCompressionInfo = require('../topologies/shared').createCompressionI const isRetryableError = require('../error').isRetryableError; const isSDAMUnrecoverableError = require('../error').isSDAMUnrecoverableError; const ClientSession = require('../sessions').ClientSession; -const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const resolveClusterTime = require('../topologies/shared').resolveClusterTime; const SrvPoller = require('./srv_polling').SrvPoller; @@ -25,6 +24,7 @@ const makeStateMachine = require('../utils').makeStateMachine; const eachAsync = require('../utils').eachAsync; const emitDeprecationWarning = require('../../utils').emitDeprecationWarning; const ServerSessionPool = require('../sessions').ServerSessionPool; +const makeClientMetadata = require('../utils').makeClientMetadata; const common = require('./common'); const drainTimerQueue = common.drainTimerQueue; @@ -119,6 +119,13 @@ class Topology extends EventEmitter { } options = Object.assign({}, common.TOPOLOGY_DEFAULTS, options); + options = Object.freeze( + Object.assign(options, { + metadata: makeClientMetadata(options), + compression: { compressors: createCompressionInfo(options) } + }) + ); + DEPRECATED_OPTIONS.forEach(optionName => { if (options[optionName]) { emitDeprecationWarning( @@ -196,12 +203,6 @@ class Topology extends EventEmitter { connectionTimers: new Set() }; - // amend options for server instance creation - this.s.options.compression = { compressors: createCompressionInfo(options) }; - - // add client info - this.s.clientInfo = createClientInfo(options); - if (options.srvHost) { this.s.srvPoller = options.srvPoller || @@ -705,8 +706,8 @@ class Topology extends EventEmitter { return new CursorClass(topology, ns, cmd, options); } - get clientInfo() { - return this.s.clientInfo; + get clientMetadata() { + return this.s.options.metadata; } isConnected() { diff --git a/lib/core/topologies/mongos.js b/lib/core/topologies/mongos.js index 681b01fd70e..29371931af7 100644 --- a/lib/core/topologies/mongos.js +++ b/lib/core/topologies/mongos.js @@ -8,16 +8,15 @@ const Logger = require('../connection/logger'); const retrieveBSON = require('../connection/utils').retrieveBSON; const MongoError = require('../error').MongoError; const Server = require('./server'); -const clone = require('./shared').clone; const diff = require('./shared').diff; const cloneOptions = require('./shared').cloneOptions; -const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); const getMMAPError = require('./shared').getMMAPError; +const makeClientMetadata = require('../utils').makeClientMetadata; /** * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is @@ -116,7 +115,7 @@ var Mongos = function(seedlist, options) { // Internal state this.s = { - options: Object.assign({}, options), + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // BSON instance bson: options.bson || @@ -153,14 +152,9 @@ var Mongos = function(seedlist, options) { // Are we running in debug mode debug: typeof options.debug === 'boolean' ? options.debug : false, // localThresholdMS - localThresholdMS: options.localThresholdMS || 15, - // Client info - clientInfo: createClientInfo(options) + localThresholdMS: options.localThresholdMS || 15 }; - // Set the client info - this.s.options.clientInfo = createClientInfo(options); - // Log info warning if the socketTimeout < haInterval as it will cause // a lot of recycled connections to happen. if ( @@ -265,8 +259,7 @@ Mongos.prototype.connect = function(options) { Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); @@ -607,8 +600,7 @@ function reconnectProxies(self, proxies, callback) { port: parseInt(_server.name.split(':')[1], 10), reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); diff --git a/lib/core/topologies/replset.js b/lib/core/topologies/replset.js index 0f03e9940de..b289d59a345 100644 --- a/lib/core/topologies/replset.js +++ b/lib/core/topologies/replset.js @@ -10,10 +10,8 @@ const Logger = require('../connection/logger'); const MongoError = require('../error').MongoError; const Server = require('./server'); const ReplSetState = require('./replset_state'); -const clone = require('./shared').clone; const Timeout = require('./shared').Timeout; const Interval = require('./shared').Interval; -const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; @@ -21,6 +19,7 @@ const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); const calculateDurationInMs = require('../utils').calculateDurationInMs; const getMMAPError = require('./shared').getMMAPError; +const makeClientMetadata = require('../utils').makeClientMetadata; // // States @@ -140,7 +139,7 @@ var ReplSet = function(seedlist, options) { // Internal state this.s = { - options: Object.assign({}, options), + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // BSON instance bson: options.bson || @@ -187,9 +186,7 @@ var ReplSet = function(seedlist, options) { // Connect function options passed in connectOptions: {}, // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false, - // Client info - clientInfo: createClientInfo(options) + debug: typeof options.debug === 'boolean' ? options.debug : false }; // Add handler for topology change @@ -369,8 +366,7 @@ function connectNewServers(self, servers, callback) { port: parseInt(_server.split(':')[1], 10), reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); @@ -918,8 +914,7 @@ ReplSet.prototype.connect = function(options) { Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, - parent: self, - clientInfo: clone(self.s.clientInfo) + parent: self }) ); }); diff --git a/lib/core/topologies/server.js b/lib/core/topologies/server.js index c81d5e8e40c..6f6de12eaa7 100644 --- a/lib/core/topologies/server.js +++ b/lib/core/topologies/server.js @@ -13,13 +13,13 @@ var inherits = require('util').inherits, wireProtocol = require('../wireprotocol'), CoreCursor = require('../cursor').CoreCursor, sdam = require('./shared'), - createClientInfo = require('./shared').createClientInfo, createCompressionInfo = require('./shared').createCompressionInfo, resolveClusterTime = require('./shared').resolveClusterTime, SessionMixins = require('./shared').SessionMixins, relayEvents = require('../utils').relayEvents; const collationNotSupported = require('../utils').collationNotSupported; +const makeClientMetadata = require('../utils').makeClientMetadata; // Used for filtering out fields for loggin var debugFields = [ @@ -120,7 +120,7 @@ var Server = function(options) { // Internal state this.s = { // Options - options: options, + options: Object.assign({ metadata: makeClientMetadata(options) }, options), // Logger logger: Logger('Server', options), // Factory overrides @@ -175,8 +175,6 @@ var Server = function(options) { this.initialConnect = true; // Default type this._type = 'server'; - // Set the client info - this.clientInfo = createClientInfo(options); // Max Stalleness values // last time we updated the ismaster state @@ -212,6 +210,13 @@ Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', { } }); +Object.defineProperty(Server.prototype, 'clientMetadata', { + enumerable: true, + get: function() { + return this.s.options.metadata; + } +}); + // In single server deployments we track the clusterTime directly on the topology, however // in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the // tracking objects so we can ensure we are gossiping the maximum time received from the diff --git a/lib/core/topologies/shared.js b/lib/core/topologies/shared.js index d69e88bf40d..c0d0f14d69c 100644 --- a/lib/core/topologies/shared.js +++ b/lib/core/topologies/shared.js @@ -1,8 +1,5 @@ 'use strict'; - -const os = require('os'); const ReadPreference = require('./read_preference'); -const Buffer = require('safe-buffer').Buffer; const TopologyType = require('../sdam/common').TopologyType; const MongoError = require('../error').MongoError; @@ -18,62 +15,6 @@ function emitSDAMEvent(self, event, description) { } } -// Get package.json variable -const driverVersion = require('../../../package.json').version; -const nodejsVersion = `'Node.js ${process.version}, ${os.endianness}`; -const type = os.type(); -const name = process.platform; -const architecture = process.arch; -const release = os.release(); - -function createClientInfo(options) { - const clientInfo = options.clientInfo - ? clone(options.clientInfo) - : { - driver: { - name: 'nodejs', - version: driverVersion - }, - os: { - type: type, - name: name, - architecture: architecture, - version: release - } - }; - - if (options.useUnifiedTopology) { - clientInfo.platform = `${nodejsVersion} (${options.useUnifiedTopology ? 'unified' : 'legacy'})`; - } - - // Do we have an application specific string - if (options.appname) { - // Cut at 128 bytes - var buffer = Buffer.from(options.appname); - // Return the truncated appname - var appname = buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname; - // Add to the clientInfo - clientInfo.application = { name: appname }; - } - - // support optionally provided wrapping driver info - if (options.driverInfo) { - if (options.driverInfo.name) { - clientInfo.driver.name = `${clientInfo.driver.name}|${options.driverInfo.name}`; - } - - if (options.driverInfo.version) { - clientInfo.driver.version = `${clientInfo.driver.version}|${options.driverInfo.version}`; - } - - if (options.driverInfo.platform) { - clientInfo.platform = `${clientInfo.platform}|${options.driverInfo.platform}`; - } - } - - return clientInfo; -} - function createCompressionInfo(options) { if (!options.compression || !options.compression.compressors) { return []; @@ -475,7 +416,6 @@ module.exports.getTopologyType = getTopologyType; module.exports.emitServerDescriptionChanged = emitServerDescriptionChanged; module.exports.emitTopologyDescriptionChanged = emitTopologyDescriptionChanged; module.exports.cloneOptions = cloneOptions; -module.exports.createClientInfo = createClientInfo; module.exports.createCompressionInfo = createCompressionInfo; module.exports.clone = clone; module.exports.diff = diff; diff --git a/lib/core/utils.js b/lib/core/utils.js index 9e71f09f473..d9f487db01e 100644 --- a/lib/core/utils.js +++ b/lib/core/utils.js @@ -1,5 +1,5 @@ 'use strict'; - +const os = require('os'); const crypto = require('crypto'); const requireOptional = require('require_optional'); @@ -210,6 +210,51 @@ function makeStateMachine(stateTable) { }; } +function makeClientMetadata(options) { + options = options || {}; + + const metadata = { + driver: { + name: 'nodejs', + version: require('../../package.json').version + }, + os: { + type: os.type(), + name: process.platform, + architecture: process.arch, + version: os.release() + }, + platform: `'Node.js ${process.version}, ${os.endianness} (${ + options.useUnifiedTopology ? 'unified' : 'legacy' + })` + }; + + // support optionally provided wrapping driver info + if (options.driverInfo) { + if (options.driverInfo.name) { + metadata.driver.name = `${metadata.driver.name}|${options.driverInfo.name}`; + } + + if (options.driverInfo.version) { + metadata.version = `${metadata.driver.version}|${options.driverInfo.version}`; + } + + if (options.driverInfo.platform) { + metadata.platform = `${metadata.platform}|${options.driverInfo.platform}`; + } + } + + if (options.appname) { + // MongoDB requires the appname not exceed a byte length of 128 + const buffer = Buffer.from(options.appname); + metadata.application = { + name: buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname + }; + } + + return metadata; +} + module.exports = { uuidV4, calculateDurationInMs, @@ -224,5 +269,6 @@ module.exports = { arrayStrictEqual, tagsStrictEqual, errorStrictEqual, - makeStateMachine + makeStateMachine, + makeClientMetadata }; diff --git a/lib/topologies/mongos.js b/lib/topologies/mongos.js index 5250a8a5814..10e66d2151b 100644 --- a/lib/topologies/mongos.js +++ b/lib/topologies/mongos.js @@ -168,13 +168,6 @@ class Mongos extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Internal state this.s = { // Create the Mongos diff --git a/lib/topologies/native_topology.js b/lib/topologies/native_topology.js index ce53f7f3e96..778ddc9fab7 100644 --- a/lib/topologies/native_topology.js +++ b/lib/topologies/native_topology.js @@ -35,11 +35,6 @@ class NativeTopology extends Topology { clonedOptions = translateOptions(clonedOptions, socketOptions); super(servers, clonedOptions); - - // Do we have an application specific string - if (options.appname) { - this.s.clientInfo.application = { name: options.appname }; - } } capabilities() { diff --git a/lib/topologies/replset.js b/lib/topologies/replset.js index d78ae13b616..69df26d19e0 100644 --- a/lib/topologies/replset.js +++ b/lib/topologies/replset.js @@ -175,13 +175,6 @@ class ReplSet extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Create the ReplSet var coreTopology = new CReplSet(seedlist, clonedOptions); diff --git a/lib/topologies/server.js b/lib/topologies/server.js index 2d6c65359dd..3079cb9953e 100644 --- a/lib/topologies/server.js +++ b/lib/topologies/server.js @@ -168,13 +168,6 @@ class Server extends TopologyBase { // Translate all the options to the core types clonedOptions = translateOptions(clonedOptions, socketOptions); - // Build default client information - clonedOptions.clientInfo = this.clientInfo; - // Do we have an application specific string - if (options.appname) { - clonedOptions.clientInfo.application = { name: options.appname }; - } - // Define the internal properties this.s = { // Create an instance of a server instance from core module diff --git a/lib/topologies/topology_base.js b/lib/topologies/topology_base.js index e74cb9ff601..967b4cd4627 100644 --- a/lib/topologies/topology_base.js +++ b/lib/topologies/topology_base.js @@ -3,7 +3,6 @@ const EventEmitter = require('events'), MongoError = require('../core').MongoError, f = require('util').format, - os = require('os'), translateReadPreference = require('../utils').translateReadPreference, ClientSession = require('../core').Sessions.ClientSession; @@ -254,33 +253,9 @@ var ServerCapabilities = function(ismaster) { setup_get_property(this, 'commandsTakeCollation', commandsTakeCollation); }; -// Get package.json variable -const driverVersion = require('../../package.json').version, - nodejsversion = f('Node.js %s, %s', process.version, os.endianness()), - type = os.type(), - name = process.platform, - architecture = process.arch, - release = os.release(); - class TopologyBase extends EventEmitter { constructor() { super(); - - // Build default client information - this.clientInfo = { - driver: { - name: 'nodejs', - version: driverVersion - }, - os: { - type: type, - name: name, - architecture: architecture, - version: release - }, - platform: nodejsversion - }; - this.setMaxListeners(Infinity); } @@ -304,6 +279,10 @@ class TopologyBase extends EventEmitter { return this.s.coreTopology.endSessions(sessions, callback); } + get clientMetadata() { + return this.s.coreTopology.s.options.metadata; + } + // Server capabilities capabilities() { if (this.s.sCapabilities) return this.s.sCapabilities; diff --git a/lib/utils.js b/lib/utils.js index 052bcabdade..dd6cbe8ce88 100644 --- a/lib/utils.js +++ b/lib/utils.js @@ -1,5 +1,4 @@ 'use strict'; - const MongoError = require('./core/error').MongoError; const ReadPreference = require('./core/topologies/read_preference'); const WriteConcern = require('./write_concern'); diff --git a/test/functional/core/client_metadata.test.js b/test/functional/core/client_metadata.test.js index 7bdcf014457..62089f36f50 100644 --- a/test/functional/core/client_metadata.test.js +++ b/test/functional/core/client_metadata.test.js @@ -22,7 +22,7 @@ describe('Client metadata tests', function() { } ); - expect(server.clientInfo.application.name).to.equal('My application name'); + expect(server.clientMetadata.application.name).to.equal('My application name'); done(); } }); @@ -53,9 +53,8 @@ describe('Client metadata tests', function() { server.on('connect', function(_server) { _server.s.replicaSetState.allServers().forEach(function(x) { - // console.dir(x.clientInfo) - expect(x.clientInfo.application.name).to.equal('My application name'); - expect(x.clientInfo.platform.split('mongodb-core').length).to.equal(2); + expect(x.clientMetadata.application.name).to.equal('My application name'); + expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); }); _server.destroy(done); @@ -86,9 +85,8 @@ describe('Client metadata tests', function() { // Add event listeners _server.once('connect', function(server) { server.connectedProxies.forEach(function(x) { - // console.dir(x.clientInfo) - expect(x.clientInfo.application.name).to.equal('My application name'); - expect(x.clientInfo.platform.split('mongodb-core').length).to.equal(2); + expect(x.clientMetadata.application.name).to.equal('My application name'); + expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); }); server.destroy(done); diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index a3878c1e57b..c46c81aea9d 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -642,7 +642,7 @@ describe('MongoClient', function() { const client = configuration.newClient(url); client.connect(function(err, client) { test.equal(null, err); - test.equal('hello world', client.topology.clientInfo.application.name); + test.equal('hello world', client.topology.clientMetadata.application.name); client.close(done); }); @@ -664,7 +664,7 @@ describe('MongoClient', function() { const client = configuration.newClient(url, { appname: 'hello world' }); client.connect(err => { test.equal(null, err); - test.equal('hello world', client.topology.clientInfo.application.name); + test.equal('hello world', client.topology.clientMetadata.application.name); client.close(done); }); diff --git a/test/unit/client_metadata.test.js b/test/unit/client_metadata.test.js new file mode 100644 index 00000000000..21b51274189 --- /dev/null +++ b/test/unit/client_metadata.test.js @@ -0,0 +1,51 @@ +'use strict'; +const mock = require('mongodb-mock-server'); +const expect = require('chai').expect; + +describe('Client Metadata', function() { + let mockServer; + before(() => mock.createServer().then(server => (mockServer = server))); + after(() => mock.cleanup()); + + it('should report the correct platform in client metadata', function(done) { + const ismasters = []; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + ismasters.push(doc); + request.reply(mock.DEFAULT_ISMASTER); + } else { + request.reply({ ok: 1 }); + } + }); + + const isUnifiedTopology = this.configuration.usingUnifiedTopology(); + const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db().command({ ping: 1 }, err => { + expect(err).to.not.exist; + + if (isUnifiedTopology) { + expect(ismasters).to.have.length.greaterThan(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/unified/) + ); + } else { + expect(ismasters).to.have.length(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/legacy/) + ); + } + + done(); + }); + }); + }); +}); diff --git a/test/unit/core/connect.test.js b/test/unit/core/connect.test.js index a3db4940c60..312553771cc 100644 --- a/test/unit/core/connect.test.js +++ b/test/unit/core/connect.test.js @@ -100,35 +100,6 @@ describe('Connect Tests', function() { }); }); - it( - 'should report the correct metadata for unified topology', - { requires: { unifiedTopology: true, topology: ['single'] } }, - function(done) { - let ismaster; - test.server.setMessageHandler(request => { - const doc = request.document; - const $clusterTime = genClusterTime(Date.now()); - if (doc.ismaster) { - ismaster = doc; - request.reply( - Object.assign({}, mock.DEFAULT_ISMASTER, { - $clusterTime, - arbiterOnly: true - }) - ); - } - }); - - const topology = this.configuration.newTopology(test.connectOptions); - topology.connect(test.connectOptions, err => { - expect(err).to.not.exist; - const platform = ismaster.client.platform; - expect(platform).to.match(/unified/); - topology.close(done); - }); - } - ); - it('should allow a cancellaton token', function(done) { const cancellationToken = new EventEmitter(); setTimeout(() => cancellationToken.emit('cancel'), 500); From ec3d87b57db242ed88997ec48c0b41eb4cc1201c Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 12 Jan 2020 11:15:44 -0500 Subject: [PATCH 119/130] test: allow all test files to use custom chai mongodb spec matcher --- test/tools/runner/index.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/tools/runner/index.js b/test/tools/runner/index.js index bedad422d8a..d70967837b3 100644 --- a/test/tools/runner/index.js +++ b/test/tools/runner/index.js @@ -103,3 +103,7 @@ require('./plugins/client_leak_checker'); require('mocha-sinon'); const chai = require('chai'); chai.use(require('sinon-chai')); +chai.use(require('../../functional/spec-runner/matcher').default); +chai.config.includeStack = true; +chai.config.showDiff = true; +chai.config.truncateThreshold = 0; From 95414108f134968f7783ee75e9cf7052486c761a Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Sun, 12 Jan 2020 11:16:11 -0500 Subject: [PATCH 120/130] test: ignore ismaster events in change streams spec tests --- test/functional/change_stream_spec.test.js | 48 ++++------------------ 1 file changed, 7 insertions(+), 41 deletions(-) diff --git a/test/functional/change_stream_spec.test.js b/test/functional/change_stream_spec.test.js index 0fb707f8eb8..251b84fad49 100644 --- a/test/functional/change_stream_spec.test.js +++ b/test/functional/change_stream_spec.test.js @@ -48,7 +48,9 @@ describe('Change Stream Spec', function() { ctx.database = ctx.client.db(sDB); ctx.collection = ctx.database.collection(sColl); - ctx.client.on('commandStarted', e => _events.push(e)); + ctx.client.on('commandStarted', e => { + if (e.commandName !== 'ismaster') _events.push(e); + }); }); }); @@ -124,7 +126,7 @@ describe('Change Stream Spec', function() { if (result.success) { expect(value).to.have.a.lengthOf(result.success.length); - assertEquality(value, result.success); + expect(value).to.matchMongoSpec(result.success); } }; } @@ -137,7 +139,7 @@ describe('Change Stream Spec', function() { throw err; } - assertEquality(err, result.error); + expect(err).to.matchMongoSpec(result.error); }; } @@ -154,7 +156,8 @@ describe('Change Stream Spec', function() { `Expected there to be an APM event at index ${idx}, but there was none` ); } - assertEquality(events[idx], expected); + + expect(events[idx]).to.matchMongoSpec(expected); }); }; } @@ -254,41 +257,4 @@ describe('Change Stream Spec', function() { } return () => target[command].apply(target, args); } - - function assertEquality(actual, expected) { - try { - _assertEquality(actual, expected); - } catch (e) { - console.dir(actual, { depth: 999 }); - console.dir(expected, { depth: 999 }); - throw e; - } - } - - function _assertEquality(actual, expected) { - try { - if (expected === '42' || expected === 42) { - expect(actual).to.exist; - return; - } - - const expectedType = - expected && expected.code ? 'error' : Array.isArray(expected) ? 'array' : typeof expected; - expect(actual).to.be.a(expectedType); - - if (expected == null) { - expect(actual).to.not.exist; - } else if (Array.isArray(expected)) { - expected.forEach((ex, idx) => _assertEquality(actual[idx], ex)); - } else if (typeof expected === 'object') { - for (let i in expected) { - _assertEquality(actual[i], expected[i]); - } - } else { - expect(actual).to.equal(expected); - } - } catch (e) { - throw e; - } - } }); From 9bd360c63cba8fbe23fa8ec2882d65caadb7ce93 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 13 Jan 2020 10:36:55 -0500 Subject: [PATCH 121/130] feat: include `connectionId` for APM with new CMAP connection pool With the new CMAP pool we have the ability to include a connection id in all of our APM messages, allowing users to correlate APM events with CMAP events for greater traceability. NODE-2419 --- lib/core/connection/apm.js | 29 +++++++++++++++++++++++------ test/functional/apm.test.js | 20 +++++++++++++++++--- 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/lib/core/connection/apm.js b/lib/core/connection/apm.js index 3e01bb63e8a..82858efda76 100644 --- a/lib/core/connection/apm.js +++ b/lib/core/connection/apm.js @@ -25,6 +25,7 @@ const collectionName = command => command.ns.split('.')[1]; const generateConnectionId = pool => pool.options ? `${pool.options.host}:${pool.options.port}` : pool.address; const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); +const isLegacyPool = pool => pool.s && pool.queue; const LEGACY_FIND_QUERY_MAP = { $query: 'filter', @@ -151,6 +152,22 @@ const extractReply = (command, reply) => { return reply && reply.result ? reply.result : reply; }; +const extractConnectionDetails = pool => { + if (isLegacyPool(pool)) { + return { + connectionId: generateConnectionId(pool) + }; + } + + // APM in the modern pool is done at the `Connection` level, so we rename it here for + // readability. + const connection = pool; + return { + address: connection.address, + connectionId: connection.id + }; +}; + /** An event indicating the start of a given command */ class CommandStartedEvent { /** @@ -162,6 +179,7 @@ class CommandStartedEvent { constructor(pool, command) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); // NOTE: remove in major revision, this is not spec behavior if (SENSITIVE_COMMANDS.has(commandName)) { @@ -169,8 +187,7 @@ class CommandStartedEvent { this.commandObj[commandName] = true; } - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, databaseName: databaseName(command), commandName, @@ -192,9 +209,9 @@ class CommandSucceededEvent { constructor(pool, command, reply, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, commandName, duration: calculateDurationInMs(started), @@ -216,9 +233,9 @@ class CommandFailedEvent { constructor(pool, command, error, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); - Object.assign(this, { - connectionId: generateConnectionId(pool), + Object.assign(this, connectionDetails, { requestId: command.requestId, commandName, duration: calculateDurationInMs(started), diff --git a/test/functional/apm.test.js b/test/functional/apm.test.js index 6a80a056d2e..3f229b751af 100644 --- a/test/functional/apm.test.js +++ b/test/functional/apm.test.js @@ -224,8 +224,15 @@ describe('APM', function() { .then(() => { expect(started).to.have.lengthOf(2); - // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); + if (self.configuration.usingUnifiedTopology()) { + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); + } else { + // Ensure command was not sent to the primary + expect(started[0].connectionId).to.not.equal(started[1].connectionId); + } + return client.close(); }); }); @@ -274,7 +281,14 @@ describe('APM', function() { expect(started).to.have.lengthOf(2); // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); + if (self.configuration.usingUnifiedTopology()) { + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); + } else { + expect(started[0].connectionId).to.not.equal(started[1].connectionId); + } + return client.close(); }); }); From 0715a36ae3e6f371d06e2b78ad4f8cf6cf740bcd Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 13 Jan 2020 11:06:15 -0500 Subject: [PATCH 122/130] doc: add documentation for CMAP events and errors --- lib/cmap/errors.js | 12 +++++++++++ lib/cmap/events.js | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/lib/cmap/errors.js b/lib/cmap/errors.js index 87998b05ef4..d9330195e74 100644 --- a/lib/cmap/errors.js +++ b/lib/cmap/errors.js @@ -1,6 +1,12 @@ 'use strict'; const MongoError = require('../core/error').MongoError; +/** + * An error indicating a connection pool is closed + * + * @property {string} address The address of the connection pool + * @extends MongoError + */ class PoolClosedError extends MongoError { constructor(pool) { super('Attempted to check out a connection from closed connection pool'); @@ -9,6 +15,12 @@ class PoolClosedError extends MongoError { } } +/** + * An error thrown when a request to check out a connection times out + * + * @property {string} address The address of the connection pool + * @extends MongoError + */ class WaitQueueTimeoutError extends MongoError { constructor(pool) { super('Timed out while checking out a connection from connection pool'); diff --git a/lib/cmap/events.js b/lib/cmap/events.js index 999a31b4366..e1e5e0e47b3 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -1,5 +1,11 @@ 'use strict'; +/** + * The base class for all monitoring events published from the connection pool + * + * @property {number} time A timestamp when the event was created + * @property {string} address The address (host/port pair) of the pool + */ class ConnectionPoolMonitoringEvent { constructor(type, pool) { this.time = new Date(); @@ -8,6 +14,11 @@ class ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection pool is created + * + * @property {Object} options The options used to create this connection pool + */ class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolCreated', pool); @@ -15,12 +26,20 @@ class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection pool is closed + */ class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolClosed', pool); } } +/** + * An event published when a connection pool creates a new connection + * + * @property {number} connectionId A monotonically increasing, per-pool id for the newly created connection + */ class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCreated', pool); @@ -28,6 +47,11 @@ class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection is ready for use + * + * @property {number} connectionId The id of the connection + */ class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionReady', pool); @@ -35,6 +59,12 @@ class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection is closed + * + * @property {number} connectionId The id of the connection + * @property {string} reason The reason the connection was closed + */ class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection, reason) { super('ConnectionClosed', pool); @@ -43,12 +73,20 @@ class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a request to check a connection out begins + */ class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionCheckOutStarted', pool); } } +/** + * An event published when a request to check a connection out fails + * + * @property {string} reason The reason the attempt to check out failed + */ class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, reason) { super('ConnectionCheckOutFailed', pool); @@ -56,6 +94,11 @@ class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection is checked out of the connection pool + * + * @property {number} connectionId The id of the connection + */ class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCheckedOut', pool); @@ -63,6 +106,11 @@ class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection is checked into the connection pool + * + * @property {number} connectionId The id of the connection + */ class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { super('ConnectionCheckedIn', pool); @@ -70,6 +118,9 @@ class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { } } +/** + * An event published when a connection pool is cleared + */ class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { super('ConnectionPoolCleared', pool); From 7e64df72f631b981e5172e238a9f5c1cf1d64be7 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 13 Jan 2020 16:04:23 -0500 Subject: [PATCH 123/130] test: reduce flakiness of objectid test which checks by time --- test/functional/object_id.test.js | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/test/functional/object_id.test.js b/test/functional/object_id.test.js index 1b3eb814f56..93b6aaa9ecd 100644 --- a/test/functional/object_id.test.js +++ b/test/functional/object_id.test.js @@ -213,15 +213,21 @@ describe('ObjectID', function() { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { + test.equal(null, err); + var db = client.db(configuration.db); var collection = db.collection('shouldCorrectlyInsertWithObjectId'); collection.insert({}, { w: 1 }, function(err) { test.equal(null, err); + const firstCompareDate = new Date(); + setTimeout(function() { collection.insert({}, { w: 1 }, function(err) { test.equal(null, err); + const secondCompareDate = new Date(); + collection.find().toArray(function(err, items) { - var compareDate = new Date(); + test.equal(null, err); // Date 1 var date1 = new Date(); @@ -231,15 +237,15 @@ describe('ObjectID', function() { date2.setTime(items[1]._id.generationTime * 1000); // Compare - test.equal(compareDate.getFullYear(), date1.getFullYear()); - test.equal(compareDate.getDate(), date1.getDate()); - test.equal(compareDate.getMonth(), date1.getMonth()); - test.equal(compareDate.getHours(), date1.getHours()); - - test.equal(compareDate.getFullYear(), date2.getFullYear()); - test.equal(compareDate.getDate(), date2.getDate()); - test.equal(compareDate.getMonth(), date2.getMonth()); - test.equal(compareDate.getHours(), date2.getHours()); + test.equal(firstCompareDate.getFullYear(), date1.getFullYear()); + test.equal(firstCompareDate.getDate(), date1.getDate()); + test.equal(firstCompareDate.getMonth(), date1.getMonth()); + test.equal(firstCompareDate.getHours(), date1.getHours()); + + test.equal(secondCompareDate.getFullYear(), date2.getFullYear()); + test.equal(secondCompareDate.getDate(), date2.getDate()); + test.equal(secondCompareDate.getMonth(), date2.getMonth()); + test.equal(secondCompareDate.getHours(), date2.getHours()); // Let's close the db client.close(done); }); From ed8c9d41e28eb29f6f70b05412874dd160b8f738 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 13 Jan 2020 21:13:34 -0500 Subject: [PATCH 124/130] refactor: warn on use of deprecated SDAM events in unified mode The `joined`, `left`, `ping`, `ha`, `all`, `fullsetup`, and `open` events are deprecated when using the unified topology. We now warn users when even listeners are attached to these events in unified mode, and documentation has been provided informing of the change. --- .../content/reference/unified-topology/index.md | 13 +++++++++++++ lib/operations/connect.js | 14 +++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/reference/content/reference/unified-topology/index.md b/docs/reference/content/reference/unified-topology/index.md index f7ceb2f3ddf..6ed2b1d73ec 100644 --- a/docs/reference/content/reference/unified-topology/index.md +++ b/docs/reference/content/reference/unified-topology/index.md @@ -35,6 +35,13 @@ The unified topology no longer supports the following events: - `reconnect` - `reconnectFailed` - `attemptReconnect` +- `joined` +- `left` +- `ping` +- `ha` +- `all` +- `fullsetup` +- `open` It also deprecates the following options passed into the `MongoClient`: - `autoReconnect` @@ -95,3 +102,9 @@ The three topology types from the "native" layer (in `lib/topologies`) primarily - There is no collaboration with the server to ensure that queued write operations only happen one time. Imagine running an `updateOne` operation which is interrupted by a network error. The operation was successfully sent to the server, but the server response was lost during the interruption, which means the operation is placed in the callback store to be retried. At the same, another microservice allows a user to update the written data. Once the original client is reconnected to the server, it automatically rexecutes the operation and updates the _newer_ data with an _older_ value. The unified topology completely removes the disconnect handler, in favor of the more robust and consistent [Retryable Reads](https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.rst) and [Retryable Writes](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst) features. Operations now will attempt execution in a server selection loop for up to `serverSelectionTimeoutMS` (default: 30s), and will retry the operation one time in the event of a [retryable error](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms). All errors outside of this loop are returned to the user, since they know best what to do in these scenarios. + +### Deprecated monitoring events + +The `joined`, `left`, `all`, and `fullsetup` events are no longer emitted by the unified topology, primarily +because their behavior is duplicated by the pre-existing SDAM monitoring events: `topologyDescriptionChanged` +and `serverDescriptionChanged`. Please refer to the documentation on [Topology Monitoring]({{}}) diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 31b1dafbd44..57357a86f4a 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -467,7 +467,19 @@ function createServer(mongoClient, options, callback) { }); } -const DEPRECATED_UNIFIED_EVENTS = new Set(['reconnect', 'reconnectFailed', 'attemptReconnect']); +const DEPRECATED_UNIFIED_EVENTS = new Set([ + 'reconnect', + 'reconnectFailed', + 'attemptReconnect', + 'joined', + 'left', + 'ping', + 'ha', + 'all', + 'fullsetup', + 'open' +]); + function registerDeprecatedEventNotifiers(client) { client.on('newListener', eventName => { if (DEPRECATED_UNIFIED_EVENTS.has(eventName)) { From 1aea4debda51b0ca62270a921b4b85935780e4e6 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 14 Jan 2020 08:23:51 -0500 Subject: [PATCH 125/130] feat: relay all CMAP events to MongoClient The final step in introducing the new pool is to forward all of the pool events to the MongoClient, so users can listen to the events there. --- lib/cmap/events.js | 14 ++++++++++++++ lib/core/sdam/server.js | 8 +++++++- lib/core/sdam/topology.js | 3 ++- lib/operations/connect.js | 16 +++++++++++----- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/lib/cmap/events.js b/lib/cmap/events.js index e1e5e0e47b3..f96021199bf 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -127,7 +127,21 @@ class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent { } } +const CMAP_EVENT_NAMES = [ + 'connectionPoolCreated', + 'connectionPoolClosed', + 'connectionCreated', + 'connectionReady', + 'connectionClosed', + 'connectionCheckOutStarted', + 'connectionCheckOutFailed', + 'connectionCheckedOut', + 'connectionCheckedIn', + 'connectionPoolCleared' +]; + module.exports = { + CMAP_EVENT_NAMES, ConnectionPoolCreatedEvent, ConnectionPoolClosedEvent, ConnectionCreatedEvent, diff --git a/lib/core/sdam/server.js b/lib/core/sdam/server.js index 7855c2a09da..0da1031b467 100644 --- a/lib/core/sdam/server.js +++ b/lib/core/sdam/server.js @@ -1,6 +1,7 @@ 'use strict'; const EventEmitter = require('events'); const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool; +const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; const MongoError = require('../error').MongoError; const relayEvents = require('../utils').relayEvents; const BSON = require('../connection/utils').retrieveBSON(); @@ -113,7 +114,12 @@ class Server extends EventEmitter { ); this.s.pool = new ConnectionPool(poolOptions); - relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']); + relayEvents( + this.s.pool, + this, + ['commandStarted', 'commandSucceeded', 'commandFailed'].concat(CMAP_EVENT_NAMES) + ); + this.s.pool.on('clusterTimeReceived', clusterTime => { this.clusterTime = clusterTime; }); diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index 2fbefee707f..cac4777816b 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -25,6 +25,7 @@ const eachAsync = require('../utils').eachAsync; const emitDeprecationWarning = require('../../utils').emitDeprecationWarning; const ServerSessionPool = require('../sessions').ServerSessionPool; const makeClientMetadata = require('../utils').makeClientMetadata; +const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; const common = require('./common'); const drainTimerQueue = common.drainTimerQueue; @@ -49,7 +50,7 @@ const SERVER_RELAY_EVENTS = [ // NOTE: Legacy events 'monitoring' -]; +].concat(CMAP_EVENT_NAMES); // all events we listen to from `Server` instances const LOCAL_SERVER_EVENTS = SERVER_RELAY_EVENTS.concat([ diff --git a/lib/operations/connect.js b/lib/operations/connect.js index 57357a86f4a..f649af4a514 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -18,6 +18,7 @@ const ServerSessionPool = require('../core').Sessions.ServerSessionPool; const emitDeprecationWarning = require('../utils').emitDeprecationWarning; const fs = require('fs'); const BSON = require('../core/connection/utils').retrieveBSON(); +const CMAP_EVENT_NAMES = require('../cmap/events').CMAP_EVENT_NAMES; let client; function loadClient() { @@ -700,23 +701,28 @@ function mergeOptions(target, source, flatten) { function relayEvents(mongoClient, topology) { const serverOrCommandEvents = [ + // APM + 'commandStarted', + 'commandSucceeded', + 'commandFailed', + + // SDAM 'serverOpening', + 'serverClosed', 'serverDescriptionChanged', 'serverHeartbeatStarted', 'serverHeartbeatSucceeded', 'serverHeartbeatFailed', - 'serverClosed', 'topologyOpening', 'topologyClosed', 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed', + + // Legacy 'joined', 'left', 'ping', 'ha' - ]; + ].concat(CMAP_EVENT_NAMES); serverOrCommandEvents.forEach(event => { topology.on(event, (object1, object2) => { From c01bf5065bc4f31fbf6c0b93d408e4a51af701af Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 13 Jan 2020 10:04:29 -0500 Subject: [PATCH 126/130] refactor: don't encode type name into public CMAP event types --- lib/cmap/events.js | 23 +++++++++++------------ test/unit/cmap/connection_pool.test.js | 7 ++++++- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/lib/cmap/events.js b/lib/cmap/events.js index f96021199bf..dcc8b6752b9 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -7,9 +7,8 @@ * @property {string} address The address (host/port pair) of the pool */ class ConnectionPoolMonitoringEvent { - constructor(type, pool) { + constructor(pool) { this.time = new Date(); - this.type = type; this.address = pool.address; } } @@ -21,7 +20,7 @@ class ConnectionPoolMonitoringEvent { */ class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { - super('ConnectionPoolCreated', pool); + super(pool); this.options = pool.options; } } @@ -31,7 +30,7 @@ class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { - super('ConnectionPoolClosed', pool); + super(pool); } } @@ -42,7 +41,7 @@ class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { - super('ConnectionCreated', pool); + super(pool); this.connectionId = connection.id; } } @@ -54,7 +53,7 @@ class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { - super('ConnectionReady', pool); + super(pool); this.connectionId = connection.id; } } @@ -67,7 +66,7 @@ class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection, reason) { - super('ConnectionClosed', pool); + super(pool); this.connectionId = connection.id; this.reason = reason || 'unknown'; } @@ -78,7 +77,7 @@ class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { - super('ConnectionCheckOutStarted', pool); + super(pool); } } @@ -89,7 +88,7 @@ class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { constructor(pool, reason) { - super('ConnectionCheckOutFailed', pool); + super(pool); this.reason = reason; } } @@ -101,7 +100,7 @@ class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { - super('ConnectionCheckedOut', pool); + super(pool); this.connectionId = connection.id; } } @@ -113,7 +112,7 @@ class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { constructor(pool, connection) { - super('ConnectionCheckedIn', pool); + super(pool); this.connectionId = connection.id; } } @@ -123,7 +122,7 @@ class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent { */ class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent { constructor(pool) { - super('ConnectionPoolCleared', pool); + super(pool); } } diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index cbf24f12af2..a41919cf707 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -438,7 +438,7 @@ describe('Connection Pool', function() { .then(() => mainThread.finish()) .catch(e => (actualError = e)) .then(() => { - const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(ev.type) < 0); + const actualEvents = poolEvents.filter(ev => ignoreEvents.indexOf(eventType(ev)) < 0); if (expectedError) { expect(actualError).to.exist; @@ -451,6 +451,11 @@ describe('Connection Pool', function() { expectedEvents.forEach((expected, index) => { const actual = actualEvents[index]; + if (expected.type) { + expect(actual.constructor.name).to.equal(`${expected.type}Event`); + delete expected.type; + } + expect(actual).to.matchMongoSpec(expected); }); }); From c04a52050ae6959bc4eec8d82168f9870168146b Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 14 Jan 2020 09:07:38 -0500 Subject: [PATCH 127/130] doc: add basic documentation for CMAP event monitoring --- .../reference/management/cmap-monitoring.md | 146 ++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 docs/reference/content/reference/management/cmap-monitoring.md diff --git a/docs/reference/content/reference/management/cmap-monitoring.md b/docs/reference/content/reference/management/cmap-monitoring.md new file mode 100644 index 00000000000..9a29344e9eb --- /dev/null +++ b/docs/reference/content/reference/management/cmap-monitoring.md @@ -0,0 +1,146 @@ ++++ +date = "2020-01-14T09:03:26-04:00" +title = "Connection Pool Monitoring" +[menu.main] + parent = "Management" + identifier = "CMAP" + weight = 100 + pre = "" ++++ + +# Connection Pool Monitoring + +The Node.js driver `3.5.0` or higher features Connection Pool Monitoring events, allowing an application or +tool to monitor the internal workings of the driver's connection pool. + +**NOTE:** Connection pool monitoring is only available when the "Unified Topology" is enabled + +## Overview of CMAP events + +| Event | Description | +| :----------| :------------- | +| connectionPoolCreated | Emitted when a connection pool is created | +| connectionPoolClosed | Emitted when a connection pool is closed, prior to server instance destruction | +| connectionCreated | Emitted when a connection is created, but not necessarily when it is used for an operation | +| connectionReady | Emitted after a connection has successfully completed a handshake, and is ready to be used for operations| +| connectionClosed | Emitted when a connection is closed | +| connectionCheckOutStarted | Emitted when an operation attempts to acquire a connection for execution | +| connectionCheckOutFailed | Emitted when an operation fails to acquire a connection for execution | +| connectionCheckedOut | Emitted when an operation successfully acquires a connection for execution | +| connectionCheckedIn | Emitted when a connection is returned to the pool after operation execution | +| connectionPoolCleared | Emitted when the connection pool's generation count is increased | + +## Simple Code Example + +The following example demonstrates connecting to a replica set and printing out all CMAP related events: + +```js +const MongoClient = require('mongodb').MongoClient; +const url = 'mongodb://localhost:31000,localhost:31001/?replicaSet=rs'; +const client = new MongoClient(url); + +client.on('connectionPoolCreated', event => console.dir(event)); +client.on('connectionPoolClosed', event => console.dir(event)); +client.on('connectionCreated', event => console.dir(event)); +client.on('connectionReady', event => console.dir(event)); +client.on('connectionClosed', event => console.dir(event)); +client.on('connectionCheckOutStarted', event => console.dir(event)); +client.on('connectionCheckOutFailed', event => console.dir(event)); +client.on('connectionCheckedOut', event => console.dir(event)); +client.on('connectionCheckedIn', event => console.dir(event)); +client.on('connectionPoolCleared', event => console.dir(event)); + +client.connect((err, client) => { + if (err) throw err; +}); +``` + +## Example Events + +### connectionPoolCreated +```js +ConnectionPoolCreatedEvent { + time: 2020-01-14T13:46:15.536Z, + address: 'localhost:31003', + options: { ... } +} +``` + +### connectionPoolClosed +```js +ConnectionPoolClosedEvent { + time: 2020-01-14T13:54:53.570Z, + address: '127.0.0.1:34849' +} +``` + +### connectionCreated +```js +ConnectionCreatedEvent { + time: 2020-01-14T13:54:53.579Z, + address: '127.0.0.1:34849', + connectionId: 1 +} +``` + +### connectionReady +```js +ConnectionReadyEvent { + time: 2020-01-14T13:54:53.579Z, + address: '127.0.0.1:34849', + connectionId: 1 +} +``` + +### connectionClosed +```js +ConnectionClosedEvent { + time: 2020-01-14T13:54:53.564Z, + address: '127.0.0.1:34849', + connectionId: 2, + reason: ... +} +``` + +### connectionCheckOutStarted +```js +ConnectionCheckOutStartedEvent { + time: 2020-01-14T13:49:59.271Z, + address: 'localhost:31000' +} +``` + +### connectionCheckOutFailed +```js +ConnectionCheckOutFailedEvent { + time: 2020-01-14T13:49:59.271Z, + address: 'localhost:31000' + reason: ... +} +``` + +### connectionCheckedOut +```js +ConnectionCheckedOutEvent { + time: 2020-01-14T13:48:42.541Z, + address: 'localhost:31000', + connectionId: 1 +} +``` + +### connectionCheckedIn +```js +ConnectionCheckedInEvent { + time: 2020-01-14T13:48:42.543Z, + address: 'localhost:31000', + connectionId: 1 +} +``` + +### connectionPoolCleared +```js +ConnectionPoolClearedEvent { + time: 2020-01-14T13:58:11.437Z, + address: '127.0.0.1:45005' +} +``` From a630389bef2cc2a360dacd84d67cf514ed321fdd Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 14 Jan 2020 09:30:15 -0500 Subject: [PATCH 128/130] refactor: wait until server destroyed before stopping event relay Some monitoring events (such as connection pool closed, or a return of a connection to the pool) are desired _after_ a server has been closed/destroyed. --- lib/core/sdam/topology.js | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/core/sdam/topology.js b/lib/core/sdam/topology.js index cac4777816b..f15e2b6352c 100644 --- a/lib/core/sdam/topology.js +++ b/lib/core/sdam/topology.js @@ -53,13 +53,7 @@ const SERVER_RELAY_EVENTS = [ ].concat(CMAP_EVENT_NAMES); // all events we listen to from `Server` instances -const LOCAL_SERVER_EVENTS = SERVER_RELAY_EVENTS.concat([ - 'error', - 'connect', - 'descriptionReceived', - 'close', - 'ended' -]); +const LOCAL_SERVER_EVENTS = ['error', 'connect', 'descriptionReceived', 'close', 'ended']; const STATE_CLOSING = common.STATE_CLOSING; const STATE_CLOSED = common.STATE_CLOSED; @@ -780,6 +774,7 @@ function destroyServer(server, topology, options, callback) { new events.ServerClosedEvent(topology.s.id, server.description.address) ); + SERVER_RELAY_EVENTS.forEach(event => server.removeAllListeners(event)); if (typeof callback === 'function') { callback(); } From ea56625e4ddb4ea30ec80410bfee8703883c9422 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 14 Jan 2020 16:31:10 -0500 Subject: [PATCH 129/130] chore: `waitQueueTimeoutMS` is a valid connection string option --- lib/operations/connect.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/operations/connect.js b/lib/operations/connect.js index f649af4a514..0ba93b5580f 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -153,7 +153,8 @@ const validOptionNames = [ 'tlsCertificateKeyFile', 'tlsCertificateKeyFilePassword', 'minHeartbeatFrequencyMS', - 'heartbeatFrequencyMS' + 'heartbeatFrequencyMS', + 'waitQueueTimeoutMS' ]; const ignoreOptionNames = ['native_parser']; From 899128580b5a1a42019c7fc103489c72b87ec279 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Tue, 14 Jan 2020 16:40:07 -0500 Subject: [PATCH 130/130] chore(release): 3.5.0 --- HISTORY.md | 36 ++++++++++++++++++++++++++++++++++++ package-lock.json | 2 +- package.json | 2 +- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 62141795c50..127f8de5618 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,42 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +# [3.5.0](https://github.com/mongodb/node-mongodb-native/compare/v3.4.1...v3.5.0) (2020-01-14) + + +### Bug Fixes + +* copy `ssl` option to pool connection options ([563ced6](https://github.com/mongodb/node-mongodb-native/commit/563ced6)) +* destroy connections marked as closed on checkIn / checkOut ([2bd17a6](https://github.com/mongodb/node-mongodb-native/commit/2bd17a6)) +* ensure sync errors are thrown, and don't callback twice ([cca5b49](https://github.com/mongodb/node-mongodb-native/commit/cca5b49)) +* ignore connection errors during pool destruction ([b8805dc](https://github.com/mongodb/node-mongodb-native/commit/b8805dc)) +* not all message payloads are arrays of Buffer ([e4df5f4](https://github.com/mongodb/node-mongodb-native/commit/e4df5f4)) +* recover on network error during initial connect ([a13dc68](https://github.com/mongodb/node-mongodb-native/commit/a13dc68)) +* remove servers with me mismatch in `updateRsFromPrimary` ([95a772e](https://github.com/mongodb/node-mongodb-native/commit/95a772e)) +* report the correct platform in client metadata ([35d0274](https://github.com/mongodb/node-mongodb-native/commit/35d0274)) +* reschedule monitoring before emitting heartbeat events ([7fcbeb5](https://github.com/mongodb/node-mongodb-native/commit/7fcbeb5)) +* socket timeout for handshake should be `connectTimeoutMS` ([c83af9a](https://github.com/mongodb/node-mongodb-native/commit/c83af9a)) +* timed out streams should be destroyed on `timeout` event ([5319ff9](https://github.com/mongodb/node-mongodb-native/commit/5319ff9)) +* use remote address for stream identifier ([f13c20b](https://github.com/mongodb/node-mongodb-native/commit/f13c20b)) +* used weighted RTT calculation for server selection ([d446be5](https://github.com/mongodb/node-mongodb-native/commit/d446be5)) +* **execute-operation:** don't swallow synchronous errors ([0a2d4e9](https://github.com/mongodb/node-mongodb-native/commit/0a2d4e9)) +* **gridfs:** make a copy of chunk before writing to server ([b4ec5b8](https://github.com/mongodb/node-mongodb-native/commit/b4ec5b8)) + + +### Features + +* add a `withConnection` helper to the connection pool ([d59dced](https://github.com/mongodb/node-mongodb-native/commit/d59dced)) +* include `connectionId` for APM with new CMAP connection pool ([9bd360c](https://github.com/mongodb/node-mongodb-native/commit/9bd360c)) +* integrate CMAP connection pool into unified topology ([9dd3939](https://github.com/mongodb/node-mongodb-native/commit/9dd3939)) +* introduce `MongoServerSelectionError` ([0cf7ec9](https://github.com/mongodb/node-mongodb-native/commit/0cf7ec9)) +* introduce a class for tracking stream specific attributes ([f6bf82c](https://github.com/mongodb/node-mongodb-native/commit/f6bf82c)) +* introduce a new `Monitor` type for server monitoring ([2bfe2a1](https://github.com/mongodb/node-mongodb-native/commit/2bfe2a1)) +* relay all CMAP events to MongoClient ([1aea4de](https://github.com/mongodb/node-mongodb-native/commit/1aea4de)) +* support socket timeouts on a per-connection level ([93e8ad0](https://github.com/mongodb/node-mongodb-native/commit/93e8ad0)) + + + ## [3.4.1](https://github.com/mongodb/node-mongodb-native/compare/v3.4.0...v3.4.1) (2019-12-19) diff --git a/package-lock.json b/package-lock.json index c2904f6e33f..c8d35bc56a8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "mongodb", - "version": "3.4.1", + "version": "3.5.0", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/package.json b/package.json index f337fbee536..248baf630f3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "mongodb", - "version": "3.4.1", + "version": "3.5.0", "description": "The official MongoDB driver for Node.js", "main": "index.js", "files": [