diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1de7efde03a..1eab4515c56 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -66,7 +66,8 @@ functions: # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + # git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone git://github.com/bazile-clyde/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > @@ -74,9 +75,11 @@ functions: bootstrap mongo-orchestration: - command: shell.exec params: - script: | + script: > ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} ORCHESTRATION_FILE=${ORCHESTRATION_FILE} bash + ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh - command: expansions.update params: file: mo-expansion.yml @@ -154,6 +157,172 @@ functions: NODE_LTS_NAME='${NODE_LTS_NAME}' ATLAS_REPL='${atlas_repl}' ATLAS_SHRD='${atlas_shrd}' ATLAS_FREE='${atlas_free}' ATLAS_TLS11='${atlas_tls11}' ATLAS_TLS12='${atlas_tls12}' bash ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh + add aws auth variables to file: + - command: shell.exec + type: test + params: + working_dir: src + silent: true + script: | + cat < ${DRIVERS_TOOLS}/.evergreen/auth_aws/aws_e2e_setup.json + { + "iam_auth_ecs_account" : "${iam_auth_ecs_account}", + "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}", + "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user", + "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}", + "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}", + "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}", + "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}", + "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}", + "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}", + "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}", + "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}", + "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", + "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", + "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" + } + EOF + run aws auth test with regular aws credentials: + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_regular_aws.js + - command: shell.exec + type: test + params: + working_dir: src + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(urlencode ${iam_auth_ecs_account}) + PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) + export MONGODB_URI="mongodb://$USER:$PASS@localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + run aws auth test with assume role credentials: + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_assume_role.js + - command: shell.exec + type: test + params: + working_dir: src + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + USER=$(urlencode $USER) + PASS=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + PASS=$(urlencode $PASS) + SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + SESSION_TOKEN=$(urlencode $SESSION_TOKEN) + export MONGODB_URI="mongodb://$USER:$PASS@localhost:27017/aws?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:$SESSION_TOKEN" + EOF + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + run aws auth test with aws EC2 credentials: + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_ec2.js + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + run aws auth test with aws credentials as environment variables: + - command: shell.exec + type: test + params: + working_dir: src + silent: true + script: | + cat < "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=${iam_auth_ecs_account} + export AWS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key} + export MONGODB_URI="mongodb://localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + run aws auth test with aws credentials and session token as environment variables: + - command: shell.exec + type: test + params: + working_dir: src + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SECRET_ACCESS_KEY=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export MONGODB_URI="mongodb://localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + run aws ECS auth test: + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + AUTH_AWS_DIR=${DRIVERS_TOOLS}/.evergreen/auth_aws + ECS_SRC_DIR=$AUTH_AWS_DIR/src + + # fix issue with `TestData` in SERVER-46340 + sed -i '1s+^+TestData = {};\n+' $AUTH_AWS_DIR/lib/ecs_hosted_test.js + + # pack up project directory to ssh it to the container + mkdir -p $ECS_SRC_DIR/.evergreen + cp $PROJECT_DIRECTORY/.evergreen/run-mongodb-aws-ecs-test.sh $ECS_SRC_DIR/.evergreen + tar -czf $ECS_SRC_DIR/src.tgz -C $PROJECT_DIRECTORY . + + cd $AUTH_AWS_DIR + cat < setup.js + const mongo_binaries = "$MONGODB_BINARIES"; + const project_dir = "$ECS_SRC_DIR"; + EOF + + cat setup.js + mongo --nodb setup.js aws_e2e_ecs.js pre: - func: fetch source - func: prepare resources @@ -195,45 +364,6 @@ tasks: VERSION: latest TOPOLOGY: sharded_cluster - func: run tests - - name: test-latest-server-unified - tags: - - latest - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: latest - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-latest-replica_set-unified - tags: - - latest - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: latest - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-latest-sharded_cluster-unified - tags: - - latest - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - name: test-4.2-server tags: - '4.2' @@ -267,45 +397,6 @@ tasks: VERSION: '4.2' TOPOLOGY: sharded_cluster - func: run tests - - name: test-4.2-server-unified - tags: - - '4.2' - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.2' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-4.2-replica_set-unified - tags: - - '4.2' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.2' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-4.2-sharded_cluster-unified - tags: - - '4.2' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.2' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - name: test-4.0-server tags: - '4.0' @@ -339,45 +430,6 @@ tasks: VERSION: '4.0' TOPOLOGY: sharded_cluster - func: run tests - - name: test-4.0-server-unified - tags: - - '4.0' - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.0' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-4.0-replica_set-unified - tags: - - '4.0' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.0' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-4.0-sharded_cluster-unified - tags: - - '4.0' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '4.0' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - name: test-3.6-server tags: - '3.6' @@ -411,45 +463,6 @@ tasks: VERSION: '3.6' TOPOLOGY: sharded_cluster - func: run tests - - name: test-3.6-server-unified - tags: - - '3.6' - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.6' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.6-replica_set-unified - tags: - - '3.6' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.6' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.6-sharded_cluster-unified - tags: - - '3.6' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.6' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - name: test-3.4-server tags: - '3.4' @@ -483,198 +496,81 @@ tasks: VERSION: '3.4' TOPOLOGY: sharded_cluster - func: run tests - - name: test-3.4-server-unified + - name: test-3.2-server tags: - - '3.4' - - server-unified + - '3.2' + - server commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.4' + VERSION: '3.2' TOPOLOGY: server - func: run tests - vars: - UNIFIED: 1 - - name: test-3.4-replica_set-unified + - name: test-3.2-replica_set tags: - - '3.4' - - replica_set-unified + - '3.2' + - replica_set commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.4' + VERSION: '3.2' TOPOLOGY: replica_set - func: run tests - vars: - UNIFIED: 1 - - name: test-3.4-sharded_cluster-unified + - name: test-3.2-sharded_cluster tags: - - '3.4' - - sharded_cluster-unified + - '3.2' + - sharded_cluster commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.4' + VERSION: '3.2' TOPOLOGY: sharded_cluster - func: run tests - vars: - UNIFIED: 1 - - name: test-3.2-server + - name: test-3.0-server tags: - - '3.2' + - '3.0' - server commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.2' + VERSION: '3.0' TOPOLOGY: server - func: run tests - - name: test-3.2-replica_set + - name: test-3.0-replica_set tags: - - '3.2' + - '3.0' - replica_set commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.2' + VERSION: '3.0' TOPOLOGY: replica_set - func: run tests - - name: test-3.2-sharded_cluster + - name: test-3.0-sharded_cluster tags: - - '3.2' + - '3.0' - sharded_cluster commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.2' + VERSION: '3.0' TOPOLOGY: sharded_cluster - func: run tests - - name: test-3.2-server-unified + - name: test-2.6-server tags: - - '3.2' - - server-unified + - '2.6' + - server commands: - func: install dependencies - func: bootstrap mongo-orchestration vars: - VERSION: '3.2' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.2-replica_set-unified - tags: - - '3.2' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.2' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.2-sharded_cluster-unified - tags: - - '3.2' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.2' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.0-server - tags: - - '3.0' - - server - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: server - - func: run tests - - name: test-3.0-replica_set - tags: - - '3.0' - - replica_set - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: replica_set - - func: run tests - - name: test-3.0-sharded_cluster - tags: - - '3.0' - - sharded_cluster - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: sharded_cluster - - func: run tests - - name: test-3.0-server-unified - tags: - - '3.0' - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.0-replica_set-unified - tags: - - '3.0' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-3.0-sharded_cluster-unified - tags: - - '3.0' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '3.0' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - - name: test-2.6-server - tags: - - '2.6' - - server - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '2.6' + VERSION: '2.6' TOPOLOGY: server - func: run tests - name: test-2.6-replica_set @@ -699,45 +595,6 @@ tasks: VERSION: '2.6' TOPOLOGY: sharded_cluster - func: run tests - - name: test-2.6-server-unified - tags: - - '2.6' - - server-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '2.6' - TOPOLOGY: server - - func: run tests - vars: - UNIFIED: 1 - - name: test-2.6-replica_set-unified - tags: - - '2.6' - - replica_set-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '2.6' - TOPOLOGY: replica_set - - func: run tests - vars: - UNIFIED: 1 - - name: test-2.6-sharded_cluster-unified - tags: - - '2.6' - - sharded_cluster-unified - commands: - - func: install dependencies - - func: bootstrap mongo-orchestration - vars: - VERSION: '2.6' - TOPOLOGY: sharded_cluster - - func: run tests - vars: - UNIFIED: 1 - name: test-atlas-connectivity tags: - atlas-connect @@ -746,351 +603,200 @@ tasks: - func: run atlas tests vars: VERSION: latest + - name: aws-auth-test + commands: + - func: install dependencies + - func: bootstrap mongo-orchestration + vars: + AUTH: auth + ORCHESTRATION_FILE: auth-aws.json + TOPOLOGY: server + - func: add aws auth variables to file + - func: run aws auth test with regular aws credentials + - func: run aws auth test with assume role credentials + - func: run aws auth test with aws EC2 credentials + - func: run aws auth test with aws credentials as environment variables + - func: run aws auth test with aws credentials and session token as environment variables + - func: run aws ECS auth test buildvariants: - - name: debian71-test-boron - display_name: Debian 7.1 Node Boron - run_on: debian71-test - expansions: - NODE_LTS_NAME: boron - tasks: &ref_0 - - test-3.6-server - - test-3.6-replica_set - - test-3.6-sharded_cluster - - test-3.6-server-unified - - test-3.6-replica_set-unified - - test-3.6-sharded_cluster-unified - - test-3.4-server - - test-3.4-replica_set - - test-3.4-sharded_cluster - - test-3.4-server-unified - - test-3.4-replica_set-unified - - test-3.4-sharded_cluster-unified - - test-3.2-server - - test-3.2-replica_set - - test-3.2-sharded_cluster - - test-3.2-server-unified - - test-3.2-replica_set-unified - - test-3.2-sharded_cluster-unified - - test-3.0-server - - test-3.0-replica_set - - test-3.0-sharded_cluster - - test-3.0-server-unified - - test-3.0-replica_set-unified - - test-3.0-sharded_cluster-unified - - test-2.6-server - - test-2.6-replica_set - - test-2.6-sharded_cluster - - test-2.6-server-unified - - test-2.6-replica_set-unified - - test-2.6-sharded_cluster-unified - - name: debian71-test-argon - display_name: Debian 7.1 Node Argon - run_on: debian71-test - expansions: - NODE_LTS_NAME: argon - tasks: *ref_0 - - name: debian81-test-dubnium - display_name: Debian 8.1 Node Dubnium + - name: debian81-test-carbon + display_name: Debian 8.1 Node Carbon run_on: debian81-test expansions: - NODE_LTS_NAME: dubnium - tasks: &ref_1 + NODE_LTS_NAME: carbon + tasks: &ref_0 - test-4.0-server - test-4.0-replica_set - test-4.0-sharded_cluster - - test-4.0-server-unified - - test-4.0-replica_set-unified - - test-4.0-sharded_cluster-unified - test-3.6-server - test-3.6-replica_set - test-3.6-sharded_cluster - - test-3.6-server-unified - - test-3.6-replica_set-unified - - test-3.6-sharded_cluster-unified - test-3.4-server - test-3.4-replica_set - test-3.4-sharded_cluster - - test-3.4-server-unified - - test-3.4-replica_set-unified - - test-3.4-sharded_cluster-unified - - name: debian81-test-carbon - display_name: Debian 8.1 Node Carbon - run_on: debian81-test - expansions: - NODE_LTS_NAME: carbon - tasks: *ref_1 - - name: debian81-test-boron - display_name: Debian 8.1 Node Boron - run_on: debian81-test - expansions: - NODE_LTS_NAME: boron - tasks: *ref_1 - - name: debian81-test-argon - display_name: Debian 8.1 Node Argon + - name: debian81-test-dubnium + display_name: Debian 8.1 Node Dubnium run_on: debian81-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_1 - - name: linux-64-amzn-test-boron - display_name: Amazon Linux (Enterprise) Node Boron - run_on: linux-64-amzn-test - expansions: - NODE_LTS_NAME: boron + NODE_LTS_NAME: dubnium tasks: *ref_0 - - name: linux-64-amzn-test-argon - display_name: Amazon Linux (Enterprise) Node Argon - run_on: linux-64-amzn-test + - name: debian81-test-erbium + display_name: Debian 8.1 Node Erbium + run_on: debian81-test expansions: - NODE_LTS_NAME: argon + NODE_LTS_NAME: erbium tasks: *ref_0 - - name: macos-1014-dubnium - display_name: macOS 10.14 Node Dubnium + - name: macos-1014-carbon + display_name: macOS 10.14 Node Carbon run_on: macos-1014 expansions: - NODE_LTS_NAME: dubnium - tasks: &ref_2 + NODE_LTS_NAME: carbon + tasks: &ref_1 - test-latest-server - test-latest-replica_set - test-latest-sharded_cluster - - test-latest-server-unified - - test-latest-replica_set-unified - - test-latest-sharded_cluster-unified - test-4.2-server - test-4.2-replica_set - test-4.2-sharded_cluster - - test-4.2-server-unified - - test-4.2-replica_set-unified - - test-4.2-sharded_cluster-unified - test-4.0-server - test-4.0-replica_set - test-4.0-sharded_cluster - - test-4.0-server-unified - - test-4.0-replica_set-unified - - test-4.0-sharded_cluster-unified - test-3.6-server - test-3.6-replica_set - test-3.6-sharded_cluster - - test-3.6-server-unified - - test-3.6-replica_set-unified - - test-3.6-sharded_cluster-unified - test-3.4-server - test-3.4-replica_set - test-3.4-sharded_cluster - - test-3.4-server-unified - - test-3.4-replica_set-unified - - test-3.4-sharded_cluster-unified - test-3.2-server - test-3.2-replica_set - test-3.2-sharded_cluster - - test-3.2-server-unified - - test-3.2-replica_set-unified - - test-3.2-sharded_cluster-unified - test-3.0-server - test-3.0-replica_set - test-3.0-sharded_cluster - - test-3.0-server-unified - - test-3.0-replica_set-unified - - test-3.0-sharded_cluster-unified - test-2.6-server - test-2.6-replica_set - test-2.6-sharded_cluster - - test-2.6-server-unified - - test-2.6-replica_set-unified - - test-2.6-sharded_cluster-unified - test-atlas-connectivity - - name: macos-1014-carbon - display_name: macOS 10.14 Node Carbon - run_on: macos-1014 - expansions: - NODE_LTS_NAME: carbon - tasks: *ref_2 - - name: macos-1014-boron - display_name: macOS 10.14 Node Boron + - name: macos-1014-dubnium + display_name: macOS 10.14 Node Dubnium run_on: macos-1014 expansions: - NODE_LTS_NAME: boron - tasks: *ref_2 - - name: macos-1014-argon - display_name: macOS 10.14 Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_1 + - name: macos-1014-erbium + display_name: macOS 10.14 Node Erbium run_on: macos-1014 expansions: - NODE_LTS_NAME: argon - tasks: *ref_2 - - name: rhel70-dubnium - display_name: RHEL 7.0 Node Dubnium - run_on: rhel70-small - expansions: - NODE_LTS_NAME: dubnium - tasks: *ref_2 + NODE_LTS_NAME: erbium + tasks: *ref_1 - name: rhel70-carbon display_name: RHEL 7.0 Node Carbon run_on: rhel70-small expansions: NODE_LTS_NAME: carbon - tasks: *ref_2 - - name: rhel70-boron - display_name: RHEL 7.0 Node Boron + tasks: *ref_1 + - name: rhel70-dubnium + display_name: RHEL 7.0 Node Dubnium run_on: rhel70-small expansions: - NODE_LTS_NAME: boron - tasks: *ref_2 - - name: rhel70-argon - display_name: RHEL 7.0 Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_1 + - name: rhel70-erbium + display_name: RHEL 7.0 Node Erbium run_on: rhel70-small expansions: - NODE_LTS_NAME: argon - tasks: *ref_2 - - name: rhel71-power8-test-dubnium - display_name: RHEL 7.1 (POWER8) Node Dubnium + NODE_LTS_NAME: erbium + tasks: *ref_1 + - name: rhel71-power8-test-carbon + display_name: RHEL 7.1 (POWER8) Node Carbon run_on: rhel71-power8-test expansions: - NODE_LTS_NAME: dubnium - tasks: &ref_3 + NODE_LTS_NAME: carbon + tasks: &ref_2 - test-latest-server - test-latest-replica_set - test-latest-sharded_cluster - - test-latest-server-unified - - test-latest-replica_set-unified - - test-latest-sharded_cluster-unified - test-4.2-server - test-4.2-replica_set - test-4.2-sharded_cluster - - test-4.2-server-unified - - test-4.2-replica_set-unified - - test-4.2-sharded_cluster-unified - test-4.0-server - test-4.0-replica_set - test-4.0-sharded_cluster - - test-4.0-server-unified - - test-4.0-replica_set-unified - - test-4.0-sharded_cluster-unified - test-3.6-server - test-3.6-replica_set - test-3.6-sharded_cluster - - test-3.6-server-unified - - test-3.6-replica_set-unified - - test-3.6-sharded_cluster-unified - test-3.4-server - test-3.4-replica_set - test-3.4-sharded_cluster - - test-3.4-server-unified - - test-3.4-replica_set-unified - - test-3.4-sharded_cluster-unified - test-3.2-server - test-3.2-replica_set - test-3.2-sharded_cluster - - test-3.2-server-unified - - test-3.2-replica_set-unified - - test-3.2-sharded_cluster-unified - test-atlas-connectivity - - name: rhel71-power8-test-carbon - display_name: RHEL 7.1 (POWER8) Node Carbon - run_on: rhel71-power8-test - expansions: - NODE_LTS_NAME: carbon - tasks: *ref_3 - - name: rhel71-power8-test-boron - display_name: RHEL 7.1 (POWER8) Node Boron + - name: rhel71-power8-test-dubnium + display_name: RHEL 7.1 (POWER8) Node Dubnium run_on: rhel71-power8-test expansions: - NODE_LTS_NAME: boron - tasks: *ref_3 - - name: rhel71-power8-test-argon - display_name: RHEL 7.1 (POWER8) Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_2 + - name: rhel71-power8-test-erbium + display_name: RHEL 7.1 (POWER8) Node Erbium run_on: rhel71-power8-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_3 - - name: suse12-x86-64-test-dubnium - display_name: SUSE 12 (x86_64) Node Dubnium - run_on: suse12-test - expansions: - NODE_LTS_NAME: dubnium - tasks: *ref_3 + NODE_LTS_NAME: erbium + tasks: *ref_2 - name: suse12-x86-64-test-carbon display_name: SUSE 12 (x86_64) Node Carbon run_on: suse12-test expansions: NODE_LTS_NAME: carbon - tasks: *ref_3 - - name: suse12-x86-64-test-boron - display_name: SUSE 12 (x86_64) Node Boron + tasks: *ref_2 + - name: suse12-x86-64-test-dubnium + display_name: SUSE 12 (x86_64) Node Dubnium run_on: suse12-test expansions: - NODE_LTS_NAME: boron - tasks: *ref_3 - - name: suse12-x86-64-test-argon - display_name: SUSE 12 (x86_64) Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_2 + - name: suse12-x86-64-test-erbium + display_name: SUSE 12 (x86_64) Node Erbium run_on: suse12-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_3 - - name: ubuntu-14.04-dubnium - display_name: Ubuntu 14.04 Node Dubnium + NODE_LTS_NAME: erbium + tasks: *ref_2 + - name: ubuntu-14.04-carbon + display_name: Ubuntu 14.04 Node Carbon run_on: ubuntu1404-test expansions: - NODE_LTS_NAME: dubnium - tasks: &ref_4 + NODE_LTS_NAME: carbon + tasks: &ref_3 - test-4.0-server - test-4.0-replica_set - test-4.0-sharded_cluster - - test-4.0-server-unified - - test-4.0-replica_set-unified - - test-4.0-sharded_cluster-unified - test-3.6-server - test-3.6-replica_set - test-3.6-sharded_cluster - - test-3.6-server-unified - - test-3.6-replica_set-unified - - test-3.6-sharded_cluster-unified - test-3.4-server - test-3.4-replica_set - test-3.4-sharded_cluster - - test-3.4-server-unified - - test-3.4-replica_set-unified - - test-3.4-sharded_cluster-unified - test-3.2-server - test-3.2-replica_set - test-3.2-sharded_cluster - - test-3.2-server-unified - - test-3.2-replica_set-unified - - test-3.2-sharded_cluster-unified - test-3.0-server - test-3.0-replica_set - test-3.0-sharded_cluster - - test-3.0-server-unified - - test-3.0-replica_set-unified - - test-3.0-sharded_cluster-unified - test-2.6-server - test-2.6-replica_set - test-2.6-sharded_cluster - - test-2.6-server-unified - - test-2.6-replica_set-unified - - test-2.6-sharded_cluster-unified - - name: ubuntu-14.04-carbon - display_name: Ubuntu 14.04 Node Carbon - run_on: ubuntu1404-test - expansions: - NODE_LTS_NAME: carbon - tasks: *ref_4 - - name: ubuntu-14.04-boron - display_name: Ubuntu 14.04 Node Boron + - name: ubuntu-14.04-dubnium + display_name: Ubuntu 14.04 Node Dubnium run_on: ubuntu1404-test expansions: - NODE_LTS_NAME: boron - tasks: *ref_4 - - name: ubuntu-14.04-argon - display_name: Ubuntu 14.04 Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_3 + - name: ubuntu-14.04-erbium + display_name: Ubuntu 14.04 Node Erbium run_on: ubuntu1404-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_4 - - name: ubuntu-16.04-dubnium - display_name: Ubuntu 16.04 Node Dubnium - run_on: ubuntu1604-test - expansions: - NODE_LTS_NAME: dubnium - CLIENT_ENCRYPTION: true + NODE_LTS_NAME: erbium tasks: *ref_3 - name: ubuntu-16.04-carbon display_name: Ubuntu 16.04 Node Carbon @@ -1098,96 +804,86 @@ buildvariants: expansions: NODE_LTS_NAME: carbon CLIENT_ENCRYPTION: true - tasks: *ref_3 - - name: ubuntu-16.04-boron - display_name: Ubuntu 16.04 Node Boron + tasks: *ref_2 + - name: ubuntu-16.04-dubnium + display_name: Ubuntu 16.04 Node Dubnium run_on: ubuntu1604-test expansions: - NODE_LTS_NAME: boron + NODE_LTS_NAME: dubnium CLIENT_ENCRYPTION: true - tasks: *ref_3 - - name: ubuntu1604-arm64-small-dubnium - display_name: Ubuntu 16.04 (ARM64) Node Dubnium - run_on: ubuntu1604-arm64-small + tasks: *ref_2 + - name: ubuntu-16.04-erbium + display_name: Ubuntu 16.04 Node Erbium + run_on: ubuntu1604-test expansions: - NODE_LTS_NAME: dubnium - tasks: *ref_1 + NODE_LTS_NAME: erbium + CLIENT_ENCRYPTION: true + tasks: *ref_2 - name: ubuntu1604-arm64-small-carbon display_name: Ubuntu 16.04 (ARM64) Node Carbon run_on: ubuntu1604-arm64-small expansions: NODE_LTS_NAME: carbon - tasks: *ref_1 - - name: ubuntu1604-arm64-small-boron - display_name: Ubuntu 16.04 (ARM64) Node Boron + tasks: *ref_0 + - name: ubuntu1604-arm64-small-dubnium + display_name: Ubuntu 16.04 (ARM64) Node Dubnium run_on: ubuntu1604-arm64-small expansions: - NODE_LTS_NAME: boron - tasks: *ref_1 - - name: ubuntu1604-arm64-small-argon - display_name: Ubuntu 16.04 (ARM64) Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_0 + - name: ubuntu1604-arm64-small-erbium + display_name: Ubuntu 16.04 (ARM64) Node Erbium run_on: ubuntu1604-arm64-small expansions: - NODE_LTS_NAME: argon - tasks: *ref_1 - - name: ubuntu1604-power8-test-dubnium - display_name: Ubuntu 16.04 (POWER8) Node Dubnium - run_on: ubuntu1604-power8-test - expansions: - NODE_LTS_NAME: dubnium - tasks: *ref_1 + NODE_LTS_NAME: erbium + tasks: *ref_0 - name: ubuntu1604-power8-test-carbon display_name: Ubuntu 16.04 (POWER8) Node Carbon run_on: ubuntu1604-power8-test expansions: NODE_LTS_NAME: carbon - tasks: *ref_1 - - name: ubuntu1604-power8-test-boron - display_name: Ubuntu 16.04 (POWER8) Node Boron + tasks: *ref_0 + - name: ubuntu1604-power8-test-dubnium + display_name: Ubuntu 16.04 (POWER8) Node Dubnium run_on: ubuntu1604-power8-test expansions: - NODE_LTS_NAME: boron - tasks: *ref_1 - - name: ubuntu1604-power8-test-argon - display_name: Ubuntu 16.04 (POWER8) Node Argon + NODE_LTS_NAME: dubnium + tasks: *ref_0 + - name: ubuntu1604-power8-test-erbium + display_name: Ubuntu 16.04 (POWER8) Node Erbium run_on: ubuntu1604-power8-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_1 - - name: ubuntu1804-arm64-test-dubnium - display_name: Ubuntu 18.04 (ARM64) Node Dubnium + NODE_LTS_NAME: erbium + tasks: *ref_0 + - name: ubuntu1804-arm64-test-carbon + display_name: Ubuntu 18.04 (ARM64) Node Carbon run_on: ubuntu1804-arm64-test expansions: - NODE_LTS_NAME: dubnium - tasks: &ref_5 + NODE_LTS_NAME: carbon + tasks: &ref_4 - test-latest-server - test-latest-replica_set - test-latest-sharded_cluster - - test-latest-server-unified - - test-latest-replica_set-unified - - test-latest-sharded_cluster-unified - test-4.2-server - test-4.2-replica_set - test-4.2-sharded_cluster - - test-4.2-server-unified - - test-4.2-replica_set-unified - - test-4.2-sharded_cluster-unified - test-atlas-connectivity - - name: ubuntu1804-arm64-test-carbon - display_name: Ubuntu 18.04 (ARM64) Node Carbon + - name: ubuntu1804-arm64-test-dubnium + display_name: Ubuntu 18.04 (ARM64) Node Dubnium run_on: ubuntu1804-arm64-test expansions: - NODE_LTS_NAME: carbon - tasks: *ref_5 - - name: ubuntu1804-arm64-test-boron - display_name: Ubuntu 18.04 (ARM64) Node Boron + NODE_LTS_NAME: dubnium + tasks: *ref_4 + - name: ubuntu1804-arm64-test-erbium + display_name: Ubuntu 18.04 (ARM64) Node Erbium run_on: ubuntu1804-arm64-test expansions: - NODE_LTS_NAME: boron - tasks: *ref_5 - - name: ubuntu1804-arm64-test-argon - display_name: Ubuntu 18.04 (ARM64) Node Argon - run_on: ubuntu1804-arm64-test + NODE_LTS_NAME: erbium + tasks: *ref_4 + - name: ubuntu1804-test-mongodb-aws + display_name: MONGODB-AWS Auth test + run_on: ubuntu1804-test expansions: - NODE_LTS_NAME: argon - tasks: *ref_5 + NODE_LTS_NAME: carbon + tasks: + - aws-auth-test diff --git a/.evergreen/config.yml.in b/.evergreen/config.yml.in index e43e3eea6f9..b9a9cc32a21 100644 --- a/.evergreen/config.yml.in +++ b/.evergreen/config.yml.in @@ -85,7 +85,8 @@ functions: # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + # git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone git://github.com/bazile-clyde/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config @@ -94,7 +95,7 @@ functions: params: script: | ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} ORCHESTRATION_FILE=${ORCHESTRATION_FILE} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -186,6 +187,179 @@ functions: # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) NODE_LTS_NAME='${NODE_LTS_NAME}' ATLAS_REPL='${atlas_repl}' ATLAS_SHRD='${atlas_shrd}' ATLAS_FREE='${atlas_free}' ATLAS_TLS11='${atlas_tls11}' ATLAS_TLS12='${atlas_tls12}' bash ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh + "add aws auth variables to file": + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat < ${DRIVERS_TOOLS}/.evergreen/auth_aws/aws_e2e_setup.json + { + "iam_auth_ecs_account" : "${iam_auth_ecs_account}", + "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}", + "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user", + "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}", + "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}", + "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}", + "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}", + "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}", + "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}", + "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}", + "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}", + "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", + "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", + "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" + } + EOF + + run aws auth test with regular aws credentials: + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_regular_aws.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(urlencode ${iam_auth_ecs_account}) + PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) + export MONGODB_URI="mongodb://$USER:$PASS@localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + + run aws auth test with assume role credentials: + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_assume_role.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + USER=$(urlencode $USER) + PASS=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + PASS=$(urlencode $PASS) + SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + SESSION_TOKEN=$(urlencode $SESSION_TOKEN) + export MONGODB_URI="mongodb://$USER:$PASS@localhost:27017/aws?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:$SESSION_TOKEN" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + + run aws auth test with aws EC2 credentials: + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + ${MONGODB_BINARIES}/mongo aws_e2e_ec2.js + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + + run aws auth test with aws credentials as environment variables: + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat < "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=${iam_auth_ecs_account} + export AWS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key} + export MONGODB_URI="mongodb://localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + + run aws auth test with aws credentials and session token as environment variables: + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SECRET_ACCESS_KEY=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export MONGODB_URI="mongodb://localhost:27017/aws?authMechanism=MONGODB-AWS" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-aws-test.sh + + run aws ECS auth test: + - command: shell.exec + type: test + params: + working_dir: src + script: | + ${PREPARE_SHELL} + AUTH_AWS_DIR=${DRIVERS_TOOLS}/.evergreen/auth_aws + ECS_SRC_DIR=$AUTH_AWS_DIR/src + + # fix issue with `TestData` in SERVER-46340 + sed -i '1s+^+TestData = {};\n+' $AUTH_AWS_DIR/lib/ecs_hosted_test.js + + # pack up project directory to ssh it to the container + mkdir -p $ECS_SRC_DIR/.evergreen + cp $PROJECT_DIRECTORY/.evergreen/run-mongodb-aws-ecs-test.sh $ECS_SRC_DIR/.evergreen + tar -czf $ECS_SRC_DIR/src.tgz -C $PROJECT_DIRECTORY . + + cd $AUTH_AWS_DIR + cat < setup.js + const mongo_binaries = "$MONGODB_BINARIES"; + const project_dir = "$ECS_SRC_DIR"; + EOF + + cat setup.js + mongo --nodb setup.js aws_e2e_ecs.js + pre: - func: "fetch source" - func: "prepare resources" diff --git a/.evergreen/generate_evergreen_tasks.js b/.evergreen/generate_evergreen_tasks.js index e0190419b5b..7a18f700b35 100644 --- a/.evergreen/generate_evergreen_tasks.js +++ b/.evergreen/generate_evergreen_tasks.js @@ -6,43 +6,16 @@ const yaml = require('js-yaml'); const LATEST_EFFECTIVE_VERSION = '5.0'; const MONGODB_VERSIONS = ['latest', '4.2', '4.0', '3.6', '3.4', '3.2', '3.0', '2.6']; -const NODE_VERSIONS = ['dubnium', 'carbon', 'boron', 'argon']; -const TOPOLOGIES = ['server', 'replica_set', 'sharded_cluster'].concat([ - 'server-unified', - 'replica_set-unified', - 'sharded_cluster-unified' -]); - +const NODE_VERSIONS = ['carbon', 'dubnium', 'erbium']; +const TOPOLOGIES = ['server', 'replica_set', 'sharded_cluster']; const OPERATING_SYSTEMS = [ // Debian - { - name: 'debian71-test', - display_name: 'Debian 7.1', - run_on: 'debian71-test', - mongoVersion: '<4.0', - nodeVersions: ['argon', 'boron'] - }, { name: 'debian81-test', display_name: 'Debian 8.1', run_on: 'debian81-test', mongoVersion: '>=3.4 <4.2' }, - // TODO: once we know how to test debian 9.x - // { - // name: 'debian91-test', - // display_name: 'Debian 9.1', - // run_on: 'debian91-test', - // mongoVersion: '>=4.0' - // }, - // Amazon Linux - { - name: 'linux-64-amzn-test', - display_name: 'Amazon Linux (Enterprise)', - run_on: 'linux-64-amzn-test', - mongoVersion: '<4.0', - nodeVersions: ['argon', 'boron'] - }, // macos { name: 'macos-1014', @@ -81,8 +54,7 @@ const OPERATING_SYSTEMS = [ display_name: 'Ubuntu 16.04', run_on: 'ubuntu1604-test', mongoVersion: '>=3.2', - clientEncryption: true, - nodeVersions: ['dubnium', 'carbon', 'boron'] + clientEncryption: true }, { name: 'ubuntu1604-arm64-small', @@ -102,15 +74,6 @@ const OPERATING_SYSTEMS = [ run_on: 'ubuntu1804-arm64-test', mongoVersion: '>=4.2' } - - // reenable when these are actually running 7.2, or we release a 7.4 rpm - // { - // name: 'rhel72-zseries-test', - // display_name: 'RHEL 7.2 (zSeries)', - // run_on: 'rhel72-zseries-test', - // mongoVersion: '>=3.4' - // }, - // Windows. reenable this when nvm supports windows, or we settle on an alternative tool // { // name: 'windows-64-vs2010-test', @@ -131,7 +94,7 @@ const OPERATING_SYSTEMS = [ Object.assign( { mongoVersion: '>=2.6', - nodeVersion: 'argon', + nodeVersion: 'carbon', auth: false }, osConfig @@ -143,10 +106,6 @@ const TASKS = []; function makeTask({ mongoVersion, topology }) { let topologyForTest = topology; let runTestsCommand = { func: 'run tests' }; - if (topology.indexOf('-unified') !== -1) { - topologyForTest = topology.split('-unified')[0]; - runTestsCommand = { func: 'run tests', vars: { UNIFIED: 1 } }; - } return { name: `test-${mongoVersion}-${topology}`, @@ -189,6 +148,29 @@ TASKS.push({ ] }); +TASKS.push({ + name: 'aws-auth-test', + commands: [ + { func: 'install dependencies' }, + { + func: 'bootstrap mongo-orchestration', + vars: { + AUTH: 'auth', + ORCHESTRATION_FILE: 'auth-aws.json', + TOPOLOGY: 'server' + }, + }, + { func: 'add aws auth variables to file' }, + { func: 'run aws auth test with regular aws credentials' }, + { func: 'run aws auth test with assume role credentials' }, + { func: 'run aws auth test with aws EC2 credentials' }, + { func: 'run aws auth test with aws credentials as environment variables' }, + { func: 'run aws auth test with aws credentials and session token as environment variables' }, + { func: 'run aws ECS auth test' } + ] +}); + + const BUILD_VARIANTS = []; const getTaskList = (() => { @@ -242,6 +224,17 @@ OPERATING_SYSTEMS.forEach( } ); +// special case for MONGODB-AWS authentication +BUILD_VARIANTS.push({ + name: 'ubuntu1804-test-mongodb-aws', + display_name: 'MONGODB-AWS Auth test', + run_on: 'ubuntu1804-test', + expansions: { + NODE_LTS_NAME: 'carbon' + }, + tasks: ['aws-auth-test'] +}) + const fileData = yaml.safeLoad(fs.readFileSync(`${__dirname}/config.yml.in`, 'utf8')); fileData.tasks = (fileData.tasks || []).concat(TASKS); diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh new file mode 100755 index 00000000000..fe47265ec0d --- /dev/null +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +MONGODB_URI="$1" +PROJECT_DIRECTORY="$(pwd)/src" + +# untar packed archive +cd $PROJECT_DIRECTORY +tar -xzf src.tgz . + +# load node.js +set +x +export NVM_DIR="${PROJECT_DIRECTORY}/node-artifacts/nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" +set -x + +# run the tests +MONGODB_URI=$MONGODB_URI MONGODB_UNIFIED_TOPOLOGY=1 npx mocha test/functional/mongodb_aws.test.js diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh new file mode 100755 index 00000000000..db4f751f390 --- /dev/null +++ b/.evergreen/run-mongodb-aws-test.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +MONGODB_URI=${MONGODB_URI:-} + +# ensure no secrets are printed in log files +set +x + +# load node.js environment +export NVM_DIR="${PROJECT_DIRECTORY}/node-artifacts/nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" + +# the default connection string, may be overridden by the environment script +export MONGODB_URI="mongodb://localhost:27017/aws?authMechanism=MONGODB-AWS" + +# load the script +shopt -s expand_aliases # needed for `urlencode` alias +[ -s "$PROJECT_DIRECTORY/prepare_mongodb_aws.sh" ] && source "$PROJECT_DIRECTORY/prepare_mongodb_aws.sh" + +# revert to show test output +set -x + +MONGODB_UNIFIED_TOPOLOGY=1 npx mocha test/functional/mongodb_aws.test.js diff --git a/docs/reference/content/reference/ecmascriptnext/connecting.md b/docs/reference/content/reference/ecmascriptnext/connecting.md index ada4de5d9aa..f46aa912400 100644 --- a/docs/reference/content/reference/ecmascriptnext/connecting.md +++ b/docs/reference/content/reference/ecmascriptnext/connecting.md @@ -14,7 +14,6 @@ The MongoClient connection method returns a Promise if no callback is passed to ```js const MongoClient = require('mongodb').MongoClient; -const assert = require('assert'); (async function() { // Connection URL diff --git a/index.js b/index.js index 4e9e6359e86..daf1329fc60 100644 --- a/index.js +++ b/index.js @@ -1,19 +1,19 @@ 'use strict'; - -// Core module -const core = require('./lib/core'); +const error = require('./lib/error'); const Instrumentation = require('./lib/apm'); +const BSON = require('./lib/utils').retrieveBSON(); +const { Cursor, AggregationCursor, CommandCursor } = require('./lib/cursor'); // Set up the connect function const connect = require('./lib/mongo_client').connect; // Expose error class -connect.MongoError = core.MongoError; -connect.MongoNetworkError = core.MongoNetworkError; -connect.MongoTimeoutError = core.MongoTimeoutError; -connect.MongoServerSelectionError = core.MongoServerSelectionError; -connect.MongoParseError = core.MongoParseError; -connect.MongoWriteConcernError = core.MongoWriteConcernError; +connect.MongoError = error.MongoError; +connect.MongoNetworkError = error.MongoNetworkError; +connect.MongoTimeoutError = error.MongoTimeoutError; +connect.MongoServerSelectionError = error.MongoServerSelectionError; +connect.MongoParseError = error.MongoParseError; +connect.MongoWriteConcernError = error.MongoWriteConcernError; connect.MongoBulkWriteError = require('./lib/bulk/common').BulkWriteError; connect.BulkWriteError = connect.MongoBulkWriteError; @@ -22,37 +22,31 @@ connect.Admin = require('./lib/admin'); connect.MongoClient = require('./lib/mongo_client'); connect.Db = require('./lib/db'); connect.Collection = require('./lib/collection'); -connect.Server = require('./lib/topologies/server'); -connect.ReplSet = require('./lib/topologies/replset'); -connect.Mongos = require('./lib/topologies/mongos'); -connect.ReadPreference = core.ReadPreference; +connect.ReadPreference = require('./lib/read_preference'); connect.GridStore = require('./lib/gridfs/grid_store'); connect.Chunk = require('./lib/gridfs/chunk'); -connect.Logger = core.Logger; -connect.AggregationCursor = require('./lib/aggregation_cursor'); -connect.CommandCursor = require('./lib/command_cursor'); -connect.Cursor = require('./lib/cursor'); +connect.Logger = require('./lib/logger'); +connect.AggregationCursor = AggregationCursor; +connect.CommandCursor = CommandCursor; +connect.Cursor = Cursor; connect.GridFSBucket = require('./lib/gridfs-stream'); -// Exported to be used in tests not to be used anywhere else -connect.CoreServer = core.Server; -connect.CoreConnection = core.Connection; // BSON types exported -connect.Binary = core.BSON.Binary; -connect.Code = core.BSON.Code; -connect.Map = core.BSON.Map; -connect.DBRef = core.BSON.DBRef; -connect.Double = core.BSON.Double; -connect.Int32 = core.BSON.Int32; -connect.Long = core.BSON.Long; -connect.MinKey = core.BSON.MinKey; -connect.MaxKey = core.BSON.MaxKey; -connect.ObjectID = core.BSON.ObjectID; -connect.ObjectId = core.BSON.ObjectID; -connect.Symbol = core.BSON.Symbol; -connect.Timestamp = core.BSON.Timestamp; -connect.BSONRegExp = core.BSON.BSONRegExp; -connect.Decimal128 = core.BSON.Decimal128; +connect.Binary = BSON.Binary; +connect.Code = BSON.Code; +connect.Map = BSON.Map; +connect.DBRef = BSON.DBRef; +connect.Double = BSON.Double; +connect.Int32 = BSON.Int32; +connect.Long = BSON.Long; +connect.MinKey = BSON.MinKey; +connect.MaxKey = BSON.MaxKey; +connect.ObjectID = BSON.ObjectID; +connect.ObjectId = BSON.ObjectID; +connect.Symbol = BSON.Symbol; +connect.Timestamp = BSON.Timestamp; +connect.BSONRegExp = BSON.BSONRegExp; +connect.Decimal128 = BSON.Decimal128; // Add connect method connect.connect = connect; diff --git a/lib/admin.js b/lib/admin.js index 716844bb02d..d2f7d990f28 100644 --- a/lib/admin.js +++ b/lib/admin.js @@ -232,6 +232,7 @@ Admin.prototype.removeUser = function(username, options, callback) { * * @param {string} collectionName The name of the collection to validate. * @param {object} [options] Optional settings. + * @param {boolean} [options.background] Validates a collection in the background, without interrupting read or write traffic (only in MongoDB 4.4+) * @param {ClientSession} [options.session] optional session to use for this operation * @param {Admin~resultCallback} [callback] The command result callback. * @return {Promise} returns Promise if no callback passed diff --git a/lib/bulk/common.js b/lib/bulk/common.js index fea6c18e34d..abc57a48576 100644 --- a/lib/bulk/common.js +++ b/lib/bulk/common.js @@ -1,16 +1,16 @@ 'use strict'; -const Long = require('../core').BSON.Long; -const MongoError = require('../core').MongoError; -const ObjectID = require('../core').BSON.ObjectID; -const BSON = require('../core').BSON; -const MongoWriteConcernError = require('../core').MongoWriteConcernError; -const toError = require('../utils').toError; -const handleCallback = require('../utils').handleCallback; -const applyRetryableWrites = require('../utils').applyRetryableWrites; -const applyWriteConcern = require('../utils').applyWriteConcern; -const executeLegacyOperation = require('../utils').executeLegacyOperation; -const isPromiseLike = require('../utils').isPromiseLike; +const BSON = require('../utils').retrieveBSON(); +const { Long, ObjectID } = BSON; +const { MongoError, MongoWriteConcernError } = require('../error'); +const { + toError, + handleCallback, + applyWriteConcern, + applyRetryableWrites, + executeLegacyOperation, + isPromiseLike +} = require('../utils'); // Error codes const WRITE_CONCERN_ERROR = 64; diff --git a/lib/bulk/ordered.js b/lib/bulk/ordered.js index a976bed2106..ec63c1e4d40 100644 --- a/lib/bulk/ordered.js +++ b/lib/bulk/ordered.js @@ -1,11 +1,7 @@ 'use strict'; -const common = require('./common'); -const BulkOperationBase = common.BulkOperationBase; -const Batch = common.Batch; -const bson = common.bson; -const utils = require('../utils'); -const toError = utils.toError; +const { BulkOperationBase, Batch, bson, INSERT } = require('./common'); +const { toError } = require('../utils'); /** * Add to internal list of Operations @@ -59,7 +55,7 @@ function addToOperationsList(bulkOperation, docType, document) { bulkOperation.s.currentBatchSizeBytes = 0; } - if (docType === common.INSERT) { + if (docType === INSERT) { bulkOperation.s.bulkResult.insertedIds.push({ index: bulkOperation.s.currentIndex, _id: document._id diff --git a/lib/bulk/unordered.js b/lib/bulk/unordered.js index 37a278370e0..9ef33b5898d 100644 --- a/lib/bulk/unordered.js +++ b/lib/bulk/unordered.js @@ -1,11 +1,7 @@ 'use strict'; -const common = require('./common'); -const BulkOperationBase = common.BulkOperationBase; -const Batch = common.Batch; -const bson = common.bson; -const utils = require('../utils'); -const toError = utils.toError; +const { BulkOperationBase, Batch, bson, INSERT, UPDATE, REMOVE } = require('./common'); +const { toError } = require('../utils'); /** * Add to internal list of Operations @@ -31,11 +27,11 @@ function addToOperationsList(bulkOperation, docType, document) { // Holds the current batch bulkOperation.s.currentBatch = null; // Get the right type of batch - if (docType === common.INSERT) { + if (docType === INSERT) { bulkOperation.s.currentBatch = bulkOperation.s.currentInsertBatch; - } else if (docType === common.UPDATE) { + } else if (docType === UPDATE) { bulkOperation.s.currentBatch = bulkOperation.s.currentUpdateBatch; - } else if (docType === common.REMOVE) { + } else if (docType === REMOVE) { bulkOperation.s.currentBatch = bulkOperation.s.currentRemoveBatch; } @@ -74,15 +70,15 @@ function addToOperationsList(bulkOperation, docType, document) { bulkOperation.s.currentIndex = bulkOperation.s.currentIndex + 1; // Save back the current Batch to the right type - if (docType === common.INSERT) { + if (docType === INSERT) { bulkOperation.s.currentInsertBatch = bulkOperation.s.currentBatch; bulkOperation.s.bulkResult.insertedIds.push({ index: bulkOperation.s.bulkResult.insertedIds.length, _id: document._id }); - } else if (docType === common.UPDATE) { + } else if (docType === UPDATE) { bulkOperation.s.currentUpdateBatch = bulkOperation.s.currentBatch; - } else if (docType === common.REMOVE) { + } else if (docType === REMOVE) { bulkOperation.s.currentRemoveBatch = bulkOperation.s.currentBatch; } diff --git a/lib/change_stream.js b/lib/change_stream.js index 4cc779debfa..9021d787639 100644 --- a/lib/change_stream.js +++ b/lib/change_stream.js @@ -1,11 +1,9 @@ 'use strict'; const EventEmitter = require('events'); -const isResumableError = require('./error').isResumableError; -const MongoError = require('./core').MongoError; -const Cursor = require('./cursor'); -const relayEvents = require('./core/utils').relayEvents; -const maxWireVersion = require('./core/utils').maxWireVersion; +const { MongoError, isResumableError } = require('./error'); +const { Cursor } = require('./cursor'); +const { relayEvents, maxWireVersion } = require('./utils'); const AggregateOperation = require('./operations/aggregate'); const CHANGE_STREAM_OPTIONS = ['resumeAfter', 'startAfter', 'startAtOperationTime', 'fullDocument']; diff --git a/lib/core/auth/auth_provider.js b/lib/cmap/auth/auth_provider.js similarity index 98% rename from lib/core/auth/auth_provider.js rename to lib/cmap/auth/auth_provider.js index 7597b72804d..1b4ca7c0f56 100644 --- a/lib/core/auth/auth_provider.js +++ b/lib/cmap/auth/auth_provider.js @@ -1,6 +1,6 @@ 'use strict'; -const MongoError = require('../error').MongoError; +const { MongoError } = require('../../error'); /** * Creates a new AuthProvider, which dictates how to authenticate for a given diff --git a/lib/core/auth/defaultAuthProviders.js b/lib/cmap/auth/defaultAuthProviders.js similarity index 89% rename from lib/core/auth/defaultAuthProviders.js rename to lib/cmap/auth/defaultAuthProviders.js index fc5f4c28364..44878afa073 100644 --- a/lib/core/auth/defaultAuthProviders.js +++ b/lib/cmap/auth/defaultAuthProviders.js @@ -7,6 +7,7 @@ const GSSAPI = require('./gssapi'); const SSPI = require('./sspi'); const ScramSHA1 = require('./scram').ScramSHA1; const ScramSHA256 = require('./scram').ScramSHA256; +const MongoDBAWS = require('./mongodb_aws'); /** * Returns the default authentication providers. @@ -16,6 +17,7 @@ const ScramSHA256 = require('./scram').ScramSHA256; */ function defaultAuthProviders(bson) { return { + 'mongodb-aws': new MongoDBAWS(bson), mongocr: new MongoCR(bson), x509: new X509(bson), plain: new Plain(bson), diff --git a/lib/core/auth/gssapi.js b/lib/cmap/auth/gssapi.js similarity index 98% rename from lib/core/auth/gssapi.js rename to lib/cmap/auth/gssapi.js index 936fb65aa72..6a8070a5cc9 100644 --- a/lib/core/auth/gssapi.js +++ b/lib/cmap/auth/gssapi.js @@ -1,7 +1,7 @@ 'use strict'; const AuthProvider = require('./auth_provider').AuthProvider; -const retrieveKerberos = require('../utils').retrieveKerberos; +const retrieveKerberos = require('../../utils').retrieveKerberos; let kerberos; /** diff --git a/lib/core/auth/mongo_credentials.js b/lib/cmap/auth/mongo_credentials.js similarity index 83% rename from lib/core/auth/mongo_credentials.js rename to lib/cmap/auth/mongo_credentials.js index 13c00b14f4d..83625a957fc 100644 --- a/lib/core/auth/mongo_credentials.js +++ b/lib/cmap/auth/mongo_credentials.js @@ -47,7 +47,21 @@ class MongoCredentials { this.password = options.password; this.source = options.source || options.db; this.mechanism = options.mechanism || 'default'; - this.mechanismProperties = options.mechanismProperties; + this.mechanismProperties = options.mechanismProperties || {}; + + if (this.mechanism.match(/MONGODB-AWS/i)) { + if (this.username == null && process.env.AWS_ACCESS_KEY_ID) { + this.username = process.env.AWS_ACCESS_KEY_ID; + } + + if (this.password == null && process.env.AWS_SECRET_ACCESS_KEY) { + this.password = process.env.AWS_SECRET_ACCESS_KEY; + } + + if (this.mechanismProperties.AWS_SESSION_TOKEN == null && process.env.AWS_SESSION_TOKEN) { + this.mechanismProperties.AWS_SESSION_TOKEN = process.env.AWS_SESSION_TOKEN; + } + } } /** diff --git a/lib/core/auth/mongocr.js b/lib/cmap/auth/mongocr.js similarity index 100% rename from lib/core/auth/mongocr.js rename to lib/cmap/auth/mongocr.js diff --git a/lib/cmap/auth/mongodb_aws.js b/lib/cmap/auth/mongodb_aws.js new file mode 100644 index 00000000000..ed706e82fa4 --- /dev/null +++ b/lib/cmap/auth/mongodb_aws.js @@ -0,0 +1,255 @@ +'use strict'; + +const http = require('http'); +const crypto = require('crypto'); +const url = require('url'); +const { AuthProvider } = require('./auth_provider'); +const { MongoCredentials } = require('./mongo_credentials'); +const { MongoError } = require('../../error'); +const { maxWireVersion } = require('../../utils'); + +let aws4; +try { + aws4 = require('aws4'); +} catch (e) { + // don't do anything; +} + +const ASCII_N = 110; +const AWS_RELATIVE_URI = 'http://169.254.170.2'; +const AWS_EC2_URI = 'http://169.254.169.254'; +const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials'; + +class MongoDBAWS extends AuthProvider { + _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { + const username = credentials.username; + const password = credentials.password; + const db = credentials.source; + const token = credentials.mechanismProperties.AWS_SESSION_TOKEN; + const bson = this.bson; + + crypto.randomBytes(32, (err, nonce) => { + if (err) { + callback(err); + return; + } + + const saslStart = { + saslStart: 1, + mechanism: 'MONGODB-AWS', + payload: bson.serialize({ r: nonce, p: ASCII_N }) + }; + + sendAuthCommand(connection, `${db}.$cmd`, saslStart, (err, res) => { + if (err) return callback(err); + const serverResponse = bson.deserialize(res.payload.buffer); + const host = serverResponse.h; + const serverNonce = serverResponse.s.buffer; + if (serverNonce.length !== 64) { + callback( + new MongoError(`Invalid server nonce length ${serverNonce.length}, expected 64`) + ); + return; + } + + if (serverNonce.compare(nonce, 0, nonce.length, 0, nonce.length) !== 0) { + callback(new MongoError('Server nonce does not begin with client nonce')); + return; + } + + if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) { + callback(new MongoError(`Server returned an invalid host: "${host}"`)); + return; + } + + const body = 'Action=GetCallerIdentity&Version=2011-06-15'; + const options = aws4.sign( + { + method: 'POST', + host, + region: deriveRegion(serverResponse.h), + service: 'sts', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Content-Length': body.length, + 'X-MongoDB-Server-Nonce': serverNonce.toString('base64'), + 'X-MongoDB-GS2-CB-Flag': 'n' + }, + path: '/', + body + }, + { + accessKeyId: username, + secretAccessKey: password, + token + } + ); + + const authorization = options.headers.Authorization; + const date = options.headers['X-Amz-Date']; + const payload = { a: authorization, d: date }; + if (token) { + payload.t = token; + } + + const saslContinue = { + saslContinue: 1, + conversationId: 1, + payload: bson.serialize(payload) + }; + + sendAuthCommand(connection, `${db}.$cmd`, saslContinue, err => { + if (err) return callback(err); + callback(); + }); + }); + }); + } + + auth(sendAuthCommand, connections, credentials, callback) { + const connection = connections[0]; + if (maxWireVersion(connection) < 9) { + callback(new MongoError('MONGODB-AWS authentication requires MongoDB version 4.4 or later')); + return; + } + + if (aws4 == null) { + callback( + new MongoError( + 'MONGODB-AWS authentication requires the `aws4` module, please install it as a dependency of your project' + ) + ); + + return; + } + + if (credentials.username == null) { + makeTempCredentials(credentials, (err, tempCredentials) => { + if (err) return callback(err); + super.auth(sendAuthCommand, connections, tempCredentials, callback); + }); + + return; + } + + super.auth(sendAuthCommand, connections, credentials, callback); + } +} + +function makeTempCredentials(credentials, callback) { + function done(creds) { + if (creds.AccessKeyId == null || creds.SecretAccessKey == null || creds.Token == null) { + callback(new MongoError('Could not obtain temporary MONGODB-AWS credentials')); + return; + } + + callback( + undefined, + new MongoCredentials({ + username: creds.AccessKeyId, + password: creds.SecretAccessKey, + source: credentials.source, + mechanism: 'MONGODB-AWS', + mechanismProperties: { + AWS_SESSION_TOKEN: creds.Token + } + }) + ); + } + + // If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI + // is set then drivers MUST assume that it was set by an AWS ECS agent + if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) { + request( + `${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`, + (err, res) => { + if (err) return callback(err); + done(res); + } + ); + + return; + } + + // Otherwise assume we are on an EC2 instance + + // get a token + + request( + `${AWS_EC2_URI}/latest/api/token`, + { method: 'PUT', json: false, headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 } }, + (err, token) => { + if (err) return callback(err); + + // get role name + request( + `${AWS_EC2_URI}/${AWS_EC2_PATH}`, + { json: false, headers: { 'X-aws-ec2-metadata-token': token } }, + (err, roleName) => { + if (err) return callback(err); + + // get temp credentials + request( + `${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`, + { headers: { 'X-aws-ec2-metadata-token': token } }, + (err, creds) => { + if (err) return callback(err); + done(creds); + } + ); + } + ); + } + ); +} + +function deriveRegion(host) { + const parts = host.split('.'); + if (parts.length === 1 || parts[1] === 'amazonaws') { + return 'us-east-1'; + } + + return parts[1]; +} + +function request(uri, options, callback) { + if (typeof options === 'function') { + callback = options; + options = {}; + } + + options = Object.assign( + { + method: 'GET', + timeout: 10000, + json: true + }, + url.parse(uri), + options + ); + + const req = http.request(options, res => { + res.setEncoding('utf8'); + + let data = ''; + res.on('data', d => (data += d)); + res.on('end', () => { + if (options.json === false) { + callback(undefined, data); + return; + } + + try { + const parsed = JSON.parse(data); + callback(undefined, parsed); + } catch (err) { + callback(new MongoError(`Invalid JSON response: "${data}"`)); + } + }); + }); + + req.on('error', err => callback(err)); + req.end(); +} + +module.exports = MongoDBAWS; diff --git a/lib/core/auth/plain.js b/lib/cmap/auth/plain.js similarity index 92% rename from lib/core/auth/plain.js rename to lib/cmap/auth/plain.js index 240de758ecd..ebc12d10f7c 100644 --- a/lib/core/auth/plain.js +++ b/lib/cmap/auth/plain.js @@ -1,6 +1,6 @@ 'use strict'; -const retrieveBSON = require('../connection/utils').retrieveBSON; +const retrieveBSON = require('../../utils').retrieveBSON; const AuthProvider = require('./auth_provider').AuthProvider; // TODO: can we get the Binary type from this.bson instead? diff --git a/lib/core/auth/scram.js b/lib/cmap/auth/scram.js similarity index 96% rename from lib/core/auth/scram.js rename to lib/cmap/auth/scram.js index 32fb4cd57b7..83ac25ff98e 100644 --- a/lib/core/auth/scram.js +++ b/lib/cmap/auth/scram.js @@ -1,10 +1,10 @@ 'use strict'; const crypto = require('crypto'); -const Buffer = require('safe-buffer').Buffer; -const retrieveBSON = require('../connection/utils').retrieveBSON; -const MongoError = require('../error').MongoError; -const AuthProvider = require('./auth_provider').AuthProvider; +const { Buffer } = require('safe-buffer'); +const { retrieveBSON } = require('../../utils'); +const { MongoError } = require('../../error'); +const { AuthProvider } = require('./auth_provider'); const BSON = retrieveBSON(); const Binary = BSON.Binary; @@ -183,7 +183,8 @@ class ScramSHA extends AuthProvider { saslStart: 1, mechanism, payload: new Binary(Buffer.concat([Buffer.from('n,,', 'utf8'), firstBare])), - autoAuthorize: 1 + autoAuthorize: 1, + options: { skipEmptyExchange: true } }; // Write the commmand on the connection diff --git a/lib/core/auth/sspi.js b/lib/cmap/auth/sspi.js similarity index 97% rename from lib/core/auth/sspi.js rename to lib/cmap/auth/sspi.js index 8a3f54485b0..3b38e5326f7 100644 --- a/lib/core/auth/sspi.js +++ b/lib/cmap/auth/sspi.js @@ -1,7 +1,7 @@ 'use strict'; const AuthProvider = require('./auth_provider').AuthProvider; -const retrieveKerberos = require('../utils').retrieveKerberos; +const retrieveKerberos = require('../../utils').retrieveKerberos; let kerberos; /** diff --git a/lib/core/auth/x509.js b/lib/cmap/auth/x509.js similarity index 100% rename from lib/core/auth/x509.js rename to lib/cmap/auth/x509.js diff --git a/lib/cmap/commands.js b/lib/cmap/commands.js new file mode 100644 index 00000000000..9f91420677f --- /dev/null +++ b/lib/cmap/commands.js @@ -0,0 +1,764 @@ +'use strict'; + +const ReadPreference = require('../read_preference'); +const { Long } = require('../utils').retrieveBSON(); +const { Buffer } = require('safe-buffer'); +const { databaseNamespace } = require('../utils'); +const { OP_QUERY, OP_GETMORE, OP_KILL_CURSORS, OP_MSG } = require('./wire_protocol/constants'); + +// Incrementing request id +let _requestId = 0; + +// Query flags +const OPTS_TAILABLE_CURSOR = 2; +const OPTS_SLAVE = 4; +const OPTS_OPLOG_REPLAY = 8; +const OPTS_NO_CURSOR_TIMEOUT = 16; +const OPTS_AWAIT_DATA = 32; +const OPTS_EXHAUST = 64; +const OPTS_PARTIAL = 128; + +// Response flags +const CURSOR_NOT_FOUND = 1; +const QUERY_FAILURE = 2; +const SHARD_CONFIG_STALE = 4; +const AWAIT_CAPABLE = 8; + +/************************************************************** + * QUERY + **************************************************************/ +class Query { + constructor(bson, ns, query, options) { + var self = this; + // Basic options needed to be passed in + if (ns == null) throw new Error('ns must be specified for query'); + if (query == null) throw new Error('query must be specified for query'); + + // Validate that we are not passing 0x00 in the collection name + if (ns.indexOf('\x00') !== -1) { + throw new Error('namespace cannot contain a null character'); + } + + // Basic options + this.bson = bson; + this.ns = ns; + this.query = query; + + // Additional options + this.numberToSkip = options.numberToSkip || 0; + this.numberToReturn = options.numberToReturn || 0; + this.returnFieldSelector = options.returnFieldSelector || null; + this.requestId = Query.getRequestId(); + + // special case for pre-3.2 find commands, delete ASAP + this.pre32Limit = options.pre32Limit; + + // Serialization option + this.serializeFunctions = + typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; + this.ignoreUndefined = + typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; + this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; + this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : true; + this.batchSize = self.numberToReturn; + + // Flags + this.tailable = false; + this.slaveOk = typeof options.slaveOk === 'boolean' ? options.slaveOk : false; + this.oplogReplay = false; + this.noCursorTimeout = false; + this.awaitData = false; + this.exhaust = false; + this.partial = false; + } + + // + // Assign a new request Id + incRequestId() { + this.requestId = _requestId++; + } + + // + // Assign a new request Id + nextRequestId() { + return _requestId + 1; + } + + static getRequestId() { + return ++_requestId; + } + + // + // Uses a single allocated buffer for the process, avoiding multiple memory allocations + toBin() { + var self = this; + var buffers = []; + var projection = null; + + // Set up the flags + var flags = 0; + if (this.tailable) { + flags |= OPTS_TAILABLE_CURSOR; + } + + if (this.slaveOk) { + flags |= OPTS_SLAVE; + } + + if (this.oplogReplay) { + flags |= OPTS_OPLOG_REPLAY; + } + + if (this.noCursorTimeout) { + flags |= OPTS_NO_CURSOR_TIMEOUT; + } + + if (this.awaitData) { + flags |= OPTS_AWAIT_DATA; + } + + if (this.exhaust) { + flags |= OPTS_EXHAUST; + } + + if (this.partial) { + flags |= OPTS_PARTIAL; + } + + // If batchSize is different to self.numberToReturn + if (self.batchSize !== self.numberToReturn) self.numberToReturn = self.batchSize; + + // Allocate write protocol header buffer + var header = Buffer.alloc( + 4 * 4 + // Header + 4 + // Flags + Buffer.byteLength(self.ns) + + 1 + // namespace + 4 + // numberToSkip + 4 // numberToReturn + ); + + // Add header to buffers + buffers.push(header); + + // Serialize the query + var query = self.bson.serialize(this.query, { + checkKeys: this.checkKeys, + serializeFunctions: this.serializeFunctions, + ignoreUndefined: this.ignoreUndefined + }); + + // Add query document + buffers.push(query); + + if (self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) { + // Serialize the projection document + projection = self.bson.serialize(this.returnFieldSelector, { + checkKeys: this.checkKeys, + serializeFunctions: this.serializeFunctions, + ignoreUndefined: this.ignoreUndefined + }); + // Add projection document + buffers.push(projection); + } + + // Total message size + var totalLength = header.length + query.length + (projection ? projection.length : 0); + + // Set up the index + var index = 4; + + // Write total document length + header[3] = (totalLength >> 24) & 0xff; + header[2] = (totalLength >> 16) & 0xff; + header[1] = (totalLength >> 8) & 0xff; + header[0] = totalLength & 0xff; + + // Write header information requestId + header[index + 3] = (this.requestId >> 24) & 0xff; + header[index + 2] = (this.requestId >> 16) & 0xff; + header[index + 1] = (this.requestId >> 8) & 0xff; + header[index] = this.requestId & 0xff; + index = index + 4; + + // Write header information responseTo + header[index + 3] = (0 >> 24) & 0xff; + header[index + 2] = (0 >> 16) & 0xff; + header[index + 1] = (0 >> 8) & 0xff; + header[index] = 0 & 0xff; + index = index + 4; + + // Write header information OP_QUERY + header[index + 3] = (OP_QUERY >> 24) & 0xff; + header[index + 2] = (OP_QUERY >> 16) & 0xff; + header[index + 1] = (OP_QUERY >> 8) & 0xff; + header[index] = OP_QUERY & 0xff; + index = index + 4; + + // Write header information flags + header[index + 3] = (flags >> 24) & 0xff; + header[index + 2] = (flags >> 16) & 0xff; + header[index + 1] = (flags >> 8) & 0xff; + header[index] = flags & 0xff; + index = index + 4; + + // Write collection name + index = index + header.write(this.ns, index, 'utf8') + 1; + header[index - 1] = 0; + + // Write header information flags numberToSkip + header[index + 3] = (this.numberToSkip >> 24) & 0xff; + header[index + 2] = (this.numberToSkip >> 16) & 0xff; + header[index + 1] = (this.numberToSkip >> 8) & 0xff; + header[index] = this.numberToSkip & 0xff; + index = index + 4; + + // Write header information flags numberToReturn + header[index + 3] = (this.numberToReturn >> 24) & 0xff; + header[index + 2] = (this.numberToReturn >> 16) & 0xff; + header[index + 1] = (this.numberToReturn >> 8) & 0xff; + header[index] = this.numberToReturn & 0xff; + index = index + 4; + + // Return the buffers + return buffers; + } +} + +/************************************************************** + * GETMORE + **************************************************************/ +class GetMore { + constructor(bson, ns, cursorId, opts) { + opts = opts || {}; + this.numberToReturn = opts.numberToReturn || 0; + this.requestId = _requestId++; + this.bson = bson; + this.ns = ns; + this.cursorId = cursorId; + } + + // + // Uses a single allocated buffer for the process, avoiding multiple memory allocations + toBin() { + var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + 4 * 4; + // Create command buffer + var index = 0; + // Allocate buffer + var _buffer = Buffer.alloc(length); + + // Write header information + // index = write32bit(index, _buffer, length); + _buffer[index + 3] = (length >> 24) & 0xff; + _buffer[index + 2] = (length >> 16) & 0xff; + _buffer[index + 1] = (length >> 8) & 0xff; + _buffer[index] = length & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, requestId); + _buffer[index + 3] = (this.requestId >> 24) & 0xff; + _buffer[index + 2] = (this.requestId >> 16) & 0xff; + _buffer[index + 1] = (this.requestId >> 8) & 0xff; + _buffer[index] = this.requestId & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, 0); + _buffer[index + 3] = (0 >> 24) & 0xff; + _buffer[index + 2] = (0 >> 16) & 0xff; + _buffer[index + 1] = (0 >> 8) & 0xff; + _buffer[index] = 0 & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, OP_GETMORE); + _buffer[index + 3] = (OP_GETMORE >> 24) & 0xff; + _buffer[index + 2] = (OP_GETMORE >> 16) & 0xff; + _buffer[index + 1] = (OP_GETMORE >> 8) & 0xff; + _buffer[index] = OP_GETMORE & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, 0); + _buffer[index + 3] = (0 >> 24) & 0xff; + _buffer[index + 2] = (0 >> 16) & 0xff; + _buffer[index + 1] = (0 >> 8) & 0xff; + _buffer[index] = 0 & 0xff; + index = index + 4; + + // Write collection name + index = index + _buffer.write(this.ns, index, 'utf8') + 1; + _buffer[index - 1] = 0; + + // Write batch size + // index = write32bit(index, _buffer, numberToReturn); + _buffer[index + 3] = (this.numberToReturn >> 24) & 0xff; + _buffer[index + 2] = (this.numberToReturn >> 16) & 0xff; + _buffer[index + 1] = (this.numberToReturn >> 8) & 0xff; + _buffer[index] = this.numberToReturn & 0xff; + index = index + 4; + + // Write cursor id + // index = write32bit(index, _buffer, cursorId.getLowBits()); + _buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff; + _buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff; + _buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff; + _buffer[index] = this.cursorId.getLowBits() & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, cursorId.getHighBits()); + _buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff; + _buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff; + _buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff; + _buffer[index] = this.cursorId.getHighBits() & 0xff; + index = index + 4; + + // Return buffer + return _buffer; + } +} + +/************************************************************** + * KILLCURSOR + **************************************************************/ +class KillCursor { + constructor(bson, ns, cursorIds) { + this.ns = ns; + this.requestId = _requestId++; + this.cursorIds = cursorIds; + } + + // + // Uses a single allocated buffer for the process, avoiding multiple memory allocations + toBin() { + var length = 4 + 4 + 4 * 4 + this.cursorIds.length * 8; + + // Create command buffer + var index = 0; + var _buffer = Buffer.alloc(length); + + // Write header information + // index = write32bit(index, _buffer, length); + _buffer[index + 3] = (length >> 24) & 0xff; + _buffer[index + 2] = (length >> 16) & 0xff; + _buffer[index + 1] = (length >> 8) & 0xff; + _buffer[index] = length & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, requestId); + _buffer[index + 3] = (this.requestId >> 24) & 0xff; + _buffer[index + 2] = (this.requestId >> 16) & 0xff; + _buffer[index + 1] = (this.requestId >> 8) & 0xff; + _buffer[index] = this.requestId & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, 0); + _buffer[index + 3] = (0 >> 24) & 0xff; + _buffer[index + 2] = (0 >> 16) & 0xff; + _buffer[index + 1] = (0 >> 8) & 0xff; + _buffer[index] = 0 & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, OP_KILL_CURSORS); + _buffer[index + 3] = (OP_KILL_CURSORS >> 24) & 0xff; + _buffer[index + 2] = (OP_KILL_CURSORS >> 16) & 0xff; + _buffer[index + 1] = (OP_KILL_CURSORS >> 8) & 0xff; + _buffer[index] = OP_KILL_CURSORS & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, 0); + _buffer[index + 3] = (0 >> 24) & 0xff; + _buffer[index + 2] = (0 >> 16) & 0xff; + _buffer[index + 1] = (0 >> 8) & 0xff; + _buffer[index] = 0 & 0xff; + index = index + 4; + + // Write batch size + // index = write32bit(index, _buffer, this.cursorIds.length); + _buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff; + _buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff; + _buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff; + _buffer[index] = this.cursorIds.length & 0xff; + index = index + 4; + + // Write all the cursor ids into the array + for (var i = 0; i < this.cursorIds.length; i++) { + // Write cursor id + // index = write32bit(index, _buffer, cursorIds[i].getLowBits()); + _buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff; + _buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff; + _buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff; + _buffer[index] = this.cursorIds[i].getLowBits() & 0xff; + index = index + 4; + + // index = write32bit(index, _buffer, cursorIds[i].getHighBits()); + _buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff; + _buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff; + _buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff; + _buffer[index] = this.cursorIds[i].getHighBits() & 0xff; + index = index + 4; + } + + // Return buffer + return _buffer; + } +} + +class Response { + constructor(bson, message, msgHeader, msgBody, opts) { + opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; + this.parsed = false; + this.raw = message; + this.data = msgBody; + this.bson = bson; + this.opts = opts; + + // Read the message header + this.length = msgHeader.length; + this.requestId = msgHeader.requestId; + this.responseTo = msgHeader.responseTo; + this.opCode = msgHeader.opCode; + this.fromCompressed = msgHeader.fromCompressed; + + // Read the message body + this.responseFlags = msgBody.readInt32LE(0); + this.cursorId = new Long(msgBody.readInt32LE(4), msgBody.readInt32LE(8)); + this.startingFrom = msgBody.readInt32LE(12); + this.numberReturned = msgBody.readInt32LE(16); + + // Preallocate document array + this.documents = new Array(this.numberReturned); + + // Flag values + this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0; + this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0; + this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0; + this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0; + this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; + this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; + this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; + } + + isParsed() { + return this.parsed; + } + + parse(options) { + // Don't parse again if not needed + if (this.parsed) return; + options = options || {}; + + // Allow the return of raw documents instead of parsing + var raw = options.raw || false; + var documentsReturnedIn = options.documentsReturnedIn || null; + var promoteLongs = + typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; + var promoteValues = + typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; + var promoteBuffers = + typeof options.promoteBuffers === 'boolean' + ? options.promoteBuffers + : this.opts.promoteBuffers; + var bsonSize, _options; + + // Set up the options + _options = { + promoteLongs: promoteLongs, + promoteValues: promoteValues, + promoteBuffers: promoteBuffers + }; + + // Position within OP_REPLY at which documents start + // (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply) + this.index = 20; + + // + // Parse Body + // + for (var i = 0; i < this.numberReturned; i++) { + bsonSize = + this.data[this.index] | + (this.data[this.index + 1] << 8) | + (this.data[this.index + 2] << 16) | + (this.data[this.index + 3] << 24); + + // If we have raw results specified slice the return document + if (raw) { + this.documents[i] = this.data.slice(this.index, this.index + bsonSize); + } else { + this.documents[i] = this.bson.deserialize( + this.data.slice(this.index, this.index + bsonSize), + _options + ); + } + + // Adjust the index + this.index = this.index + bsonSize; + } + + if (this.documents.length === 1 && documentsReturnedIn != null && raw) { + const fieldsAsRaw = {}; + fieldsAsRaw[documentsReturnedIn] = true; + _options.fieldsAsRaw = fieldsAsRaw; + + const doc = this.bson.deserialize(this.documents[0], _options); + this.documents = [doc]; + } + + // Set parsed + this.parsed = true; + } +} + +// Implementation of OP_MSG spec: +// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst +// +// struct Section { +// uint8 payloadType; +// union payload { +// document document; // payloadType == 0 +// struct sequence { // payloadType == 1 +// int32 size; +// cstring identifier; +// document* documents; +// }; +// }; +// }; + +// struct OP_MSG { +// struct MsgHeader { +// int32 messageLength; +// int32 requestID; +// int32 responseTo; +// int32 opCode = 2013; +// }; +// uint32 flagBits; +// Section+ sections; +// [uint32 checksum;] +// }; + +// Msg Flags +const OPTS_CHECKSUM_PRESENT = 1; +const OPTS_MORE_TO_COME = 2; +const OPTS_EXHAUST_ALLOWED = 1 << 16; + +class Msg { + constructor(bson, ns, command, options) { + // Basic options needed to be passed in + if (command == null) throw new Error('query must be specified for query'); + + // Basic options + this.bson = bson; + this.ns = ns; + this.command = command; + this.command.$db = databaseNamespace(ns); + + if (options.readPreference && options.readPreference.mode !== ReadPreference.PRIMARY) { + this.command.$readPreference = options.readPreference.toJSON(); + } + + // Ensure empty options + this.options = options || {}; + + // Additional options + this.requestId = options.requestId ? options.requestId : Msg.getRequestId(); + + // Serialization option + this.serializeFunctions = + typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; + this.ignoreUndefined = + typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; + this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false; + this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; + + // flags + this.checksumPresent = false; + this.moreToCome = options.moreToCome || false; + this.exhaustAllowed = + typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false; + } + + toBin() { + const buffers = []; + let flags = 0; + + if (this.checksumPresent) { + flags |= OPTS_CHECKSUM_PRESENT; + } + + if (this.moreToCome) { + flags |= OPTS_MORE_TO_COME; + } + + if (this.exhaustAllowed) { + flags |= OPTS_EXHAUST_ALLOWED; + } + + const header = Buffer.alloc( + 4 * 4 + // Header + 4 // Flags + ); + + buffers.push(header); + + let totalLength = header.length; + const command = this.command; + totalLength += this.makeDocumentSegment(buffers, command); + + header.writeInt32LE(totalLength, 0); // messageLength + header.writeInt32LE(this.requestId, 4); // requestID + header.writeInt32LE(0, 8); // responseTo + header.writeInt32LE(OP_MSG, 12); // opCode + header.writeUInt32LE(flags, 16); // flags + return buffers; + } + + makeDocumentSegment(buffers, document) { + const payloadTypeBuffer = Buffer.alloc(1); + payloadTypeBuffer[0] = 0; + + const documentBuffer = this.serializeBson(document); + buffers.push(payloadTypeBuffer); + buffers.push(documentBuffer); + + return payloadTypeBuffer.length + documentBuffer.length; + } + + serializeBson(document) { + return this.bson.serialize(document, { + checkKeys: this.checkKeys, + serializeFunctions: this.serializeFunctions, + ignoreUndefined: this.ignoreUndefined + }); + } +} + +Msg.getRequestId = function() { + _requestId = (_requestId + 1) & 0x7fffffff; + return _requestId; +}; + +class BinMsg { + constructor(bson, message, msgHeader, msgBody, opts) { + opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; + this.parsed = false; + this.raw = message; + this.data = msgBody; + this.bson = bson; + this.opts = opts; + + // Read the message header + this.length = msgHeader.length; + this.requestId = msgHeader.requestId; + this.responseTo = msgHeader.responseTo; + this.opCode = msgHeader.opCode; + this.fromCompressed = msgHeader.fromCompressed; + + // Read response flags + this.responseFlags = msgBody.readInt32LE(0); + this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0; + this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0; + this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0; + this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; + this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; + this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; + + this.documents = []; + } + + isParsed() { + return this.parsed; + } + + parse(options) { + // Don't parse again if not needed + if (this.parsed) return; + options = options || {}; + + this.index = 4; + // Allow the return of raw documents instead of parsing + const raw = options.raw || false; + const documentsReturnedIn = options.documentsReturnedIn || null; + const promoteLongs = + typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; + const promoteValues = + typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; + const promoteBuffers = + typeof options.promoteBuffers === 'boolean' + ? options.promoteBuffers + : this.opts.promoteBuffers; + + // Set up the options + const _options = { + promoteLongs: promoteLongs, + promoteValues: promoteValues, + promoteBuffers: promoteBuffers + }; + + while (this.index < this.data.length) { + const payloadType = this.data.readUInt8(this.index++); + if (payloadType === 1) { + console.error('TYPE 1'); + } else if (payloadType === 0) { + const bsonSize = this.data.readUInt32LE(this.index); + const bin = this.data.slice(this.index, this.index + bsonSize); + this.documents.push(raw ? bin : this.bson.deserialize(bin, _options)); + + this.index += bsonSize; + } + } + + if (this.documents.length === 1 && documentsReturnedIn != null && raw) { + const fieldsAsRaw = {}; + fieldsAsRaw[documentsReturnedIn] = true; + _options.fieldsAsRaw = fieldsAsRaw; + + const doc = this.bson.deserialize(this.documents[0], _options); + this.documents = [doc]; + } + + this.parsed = true; + } +} + +/** + * Creates a new CommandResult instance + * @class + * @param {object} result CommandResult object + * @param {Connection} connection A connection instance associated with this result + * @return {CommandResult} A cursor instance + */ +class CommandResult { + constructor(result, connection, message) { + this.result = result; + this.connection = connection; + this.message = message; + } + + /** + * Convert CommandResult to JSON + * @method + * @return {object} + */ + toJSON() { + let result = Object.assign({}, this, this.result); + delete result.message; + return result; + } + + /** + * Convert CommandResult to String representation + * @method + * @return {string} + */ + toString() { + return JSON.stringify(this.toJSON()); + } +} + +module.exports = { + Query, + GetMore, + Response, + KillCursor, + Msg, + BinMsg, + CommandResult +}; diff --git a/lib/core/connection/connect.js b/lib/cmap/connect.js similarity index 80% rename from lib/core/connection/connect.js rename to lib/cmap/connect.js index b096e21fbcf..c0a304f1983 100644 --- a/lib/core/connection/connect.js +++ b/lib/cmap/connect.js @@ -1,17 +1,16 @@ 'use strict'; const net = require('net'); const tls = require('tls'); -const Connection = require('./connection'); -const Query = require('./commands').Query; -const MongoError = require('../error').MongoError; -const MongoNetworkError = require('../error').MongoNetworkError; -const defaultAuthProviders = require('../auth/defaultAuthProviders').defaultAuthProviders; -const WIRE_CONSTANTS = require('../wireprotocol/constants'); -const makeClientMetadata = require('../utils').makeClientMetadata; -const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; -const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; -const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; -const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; +const { Connection } = require('./connection'); +const { defaultAuthProviders } = require('./auth/defaultAuthProviders'); +const { makeClientMetadata } = require('../utils'); +const { MongoError, MongoNetworkError } = require('../error'); +const { + MAX_SUPPORTED_WIRE_VERSION, + MAX_SUPPORTED_SERVER_VERSION, + MIN_SUPPORTED_WIRE_VERSION, + MIN_SUPPORTED_SERVER_VERSION +} = require('./wire_protocol/constants'); let AUTH_PROVIDERS; function connect(options, cancellationToken, callback) { @@ -318,70 +317,18 @@ function makeConnection(family, options, cancellationToken, _callback) { socket.once(connectEvent, connectHandler); } -const CONNECTION_ERROR_EVENTS = ['error', 'close', 'timeout', 'parseError']; function runCommand(conn, ns, command, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); - // are we using the new connection type? if so, no need to simulate a rpc `command` method - if (isModernConnectionType(conn)) { - conn.command(ns, command, options, (err, result) => { - if (err) { - callback(err); - return; - } - - // NODE-2382: raw wire protocol messages, or command results should not be used anymore - callback(undefined, result.result); - }); - - return; - } - - const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; - const bson = conn.options.bson; - const query = new Query(bson, ns, command, { - numberToSkip: 0, - numberToReturn: 1 - }); - - const noop = () => {}; - function _callback(err, result) { - callback(err, result); - callback = noop; - } - - function errorHandler(err) { - conn.resetSocketTimeout(); - CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler)); - conn.removeListener('message', messageHandler); - - if (err == null) { - err = new MongoError(`runCommand failed for connection to '${conn.address}'`); - } - - // ignore all future errors - conn.on('error', noop); - - _callback(err); - } - - function messageHandler(msg) { - if (msg.responseTo !== query.requestId) { + conn.command(ns, command, options, (err, result) => { + if (err) { + callback(err); return; } - conn.resetSocketTimeout(); - CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler)); - conn.removeListener('message', messageHandler); - - msg.parse({ promoteValues: true }); - _callback(undefined, msg.documents[0]); - } - - conn.setSocketTimeout(socketTimeout); - CONNECTION_ERROR_EVENTS.forEach(eventName => conn.once(eventName, errorHandler)); - conn.on('message', messageHandler); - conn.write(query.toBin()); + // NODE-2382: raw wire protocol messages, or command results should not be used anymore + callback(undefined, result.result); + }); } function authenticate(conn, credentials, callback) { diff --git a/lib/cmap/connection.js b/lib/cmap/connection.js index bec3acae6f1..e8b646ec907 100644 --- a/lib/cmap/connection.js +++ b/lib/cmap/connection.js @@ -2,15 +2,13 @@ const EventEmitter = require('events'); const MessageStream = require('./message_stream'); -const MongoError = require('../core/error').MongoError; -const MongoNetworkError = require('../core/error').MongoNetworkError; -const MongoWriteConcernError = require('../core/error').MongoWriteConcernError; -const CommandResult = require('../core/connection/command_result'); -const StreamDescription = require('./stream_description').StreamDescription; -const wp = require('../core/wireprotocol'); -const apm = require('../core/connection/apm'); -const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse; -const uuidV4 = require('../core/utils').uuidV4; +const { CommandResult } = require('./commands'); +const { StreamDescription } = require('./stream_description'); +const wp = require('./wire_protocol'); +const { CommandStartedEvent, CommandFailedEvent, CommandSucceededEvent } = require('./events'); +const { updateSessionFromResponse } = require('../sessions'); +const { uuidV4 } = require('../utils'); +const { MongoError, MongoNetworkError, MongoWriteConcernError } = require('../error'); const kStream = Symbol('stream'); const kQueue = Symbol('queue'); @@ -288,6 +286,7 @@ function streamIdentifier(stream) { // Not meant to be called directly, the wire protocol methods call this assuming it is a `Pool` instance function write(command, options, callback) { + const connection = this; if (typeof options === 'function') { callback = options; } @@ -309,40 +308,40 @@ function write(command, options, callback) { raw: typeof options.raw === 'boolean' ? options.raw : false }; - if (this[kDescription] && this[kDescription].compressor) { - operationDescription.agreedCompressor = this[kDescription].compressor; + if (connection[kDescription] && connection[kDescription].compressor) { + operationDescription.agreedCompressor = connection[kDescription].compressor; - if (this[kDescription].zlibCompressionLevel) { - operationDescription.zlibCompressionLevel = this[kDescription].zlibCompressionLevel; + if (connection[kDescription].zlibCompressionLevel) { + operationDescription.zlibCompressionLevel = connection[kDescription].zlibCompressionLevel; } } if (typeof options.socketTimeout === 'number') { operationDescription.socketTimeoutOverride = true; - this[kStream].setTimeout(options.socketTimeout); + connection[kStream].setTimeout(options.socketTimeout); } // if command monitoring is enabled we need to modify the callback here if (this.monitorCommands) { - this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); + connection.emit('commandStarted', new CommandStartedEvent(this, command)); operationDescription.started = process.hrtime(); operationDescription.cb = (err, reply) => { if (err) { - this.emit( + connection.emit( 'commandFailed', - new apm.CommandFailedEvent(this, command, err, operationDescription.started) + new CommandFailedEvent(connection, command, err, operationDescription.started) ); } else { if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { - this.emit( + connection.emit( 'commandFailed', - new apm.CommandFailedEvent(this, command, reply.result, operationDescription.started) + new CommandFailedEvent(connection, command, reply.result, operationDescription.started) ); } else { - this.emit( + connection.emit( 'commandSucceeded', - new apm.CommandSucceededEvent(this, command, reply, operationDescription.started) + new CommandSucceededEvent(connection, command, reply, operationDescription.started) ); } } @@ -354,14 +353,14 @@ function write(command, options, callback) { } if (!operationDescription.noResponse) { - this[kQueue].set(operationDescription.requestId, operationDescription); + connection[kQueue].set(operationDescription.requestId, operationDescription); } try { - this[kMessageStream].writeCommand(command, operationDescription); + connection[kMessageStream].writeCommand(command, operationDescription); } catch (e) { if (!operationDescription.noResponse) { - this[kQueue].delete(operationDescription.requestId); + connection[kQueue].delete(operationDescription.requestId); operationDescription.cb(e); return; } diff --git a/lib/cmap/connection_pool.js b/lib/cmap/connection_pool.js index 51ab907ada9..7d8f996fc79 100644 --- a/lib/cmap/connection_pool.js +++ b/lib/cmap/connection_pool.js @@ -1,30 +1,25 @@ 'use strict'; const Denque = require('denque'); -const EventEmitter = require('events').EventEmitter; -const Logger = require('../core/connection/logger'); -const makeCounter = require('../utils').makeCounter; -const MongoError = require('../core/error').MongoError; -const Connection = require('./connection').Connection; -const eachAsync = require('../core/utils').eachAsync; -const connect = require('../core/connection/connect'); -const relayEvents = require('../core/utils').relayEvents; - -const errors = require('./errors'); -const PoolClosedError = errors.PoolClosedError; -const WaitQueueTimeoutError = errors.WaitQueueTimeoutError; - -const events = require('./events'); -const ConnectionPoolCreatedEvent = events.ConnectionPoolCreatedEvent; -const ConnectionPoolClosedEvent = events.ConnectionPoolClosedEvent; -const ConnectionCreatedEvent = events.ConnectionCreatedEvent; -const ConnectionReadyEvent = events.ConnectionReadyEvent; -const ConnectionClosedEvent = events.ConnectionClosedEvent; -const ConnectionCheckOutStartedEvent = events.ConnectionCheckOutStartedEvent; -const ConnectionCheckOutFailedEvent = events.ConnectionCheckOutFailedEvent; -const ConnectionCheckedOutEvent = events.ConnectionCheckedOutEvent; -const ConnectionCheckedInEvent = events.ConnectionCheckedInEvent; -const ConnectionPoolClearedEvent = events.ConnectionPoolClearedEvent; +const EventEmitter = require('events'); +const Logger = require('../logger'); +const { Connection } = require('./connection'); +const connect = require('./connect'); +const { eachAsync, relayEvents, makeCounter } = require('../utils'); +const { MongoError } = require('../error'); +const { PoolClosedError, WaitQueueTimeoutError } = require('./errors'); +const { + ConnectionPoolCreatedEvent, + ConnectionPoolClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + ConnectionClosedEvent, + ConnectionCheckOutStartedEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckedOutEvent, + ConnectionCheckedInEvent, + ConnectionPoolClearedEvent +} = require('./events'); const kLogger = Symbol('logger'); const kConnections = Symbol('connections'); diff --git a/lib/cmap/errors.js b/lib/cmap/errors.js index d9330195e74..0c31ca8fb19 100644 --- a/lib/cmap/errors.js +++ b/lib/cmap/errors.js @@ -1,5 +1,5 @@ 'use strict'; -const MongoError = require('../core/error').MongoError; +const { MongoError } = require('../error'); /** * An error indicating a connection pool is closed diff --git a/lib/cmap/events.js b/lib/cmap/events.js index dcc8b6752b9..acd23e3ab36 100644 --- a/lib/cmap/events.js +++ b/lib/cmap/events.js @@ -1,4 +1,6 @@ 'use strict'; +const { GetMore, KillCursor, Msg } = require('./commands'); +const { calculateDurationInMs } = require('../utils'); /** * The base class for all monitoring events published from the connection pool @@ -139,6 +141,234 @@ const CMAP_EVENT_NAMES = [ 'connectionPoolCleared' ]; +/** An event indicating the start of a given command */ +class CommandStartedEvent { + /** + * Create a started event + * + * @param {Pool} pool the pool that originated the command + * @param {Object} command the command + */ + constructor(pool, command) { + const cmd = extractCommand(command); + const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); + + // NOTE: remove in major revision, this is not spec behavior + if (SENSITIVE_COMMANDS.has(commandName)) { + this.commandObj = {}; + this.commandObj[commandName] = true; + } + + Object.assign(this, connectionDetails, { + requestId: command.requestId, + databaseName: databaseName(command), + commandName, + command: cmd + }); + } +} + +/** An event indicating the success of a given command */ +class CommandSucceededEvent { + /** + * Create a succeeded event + * + * @param {Pool} pool the pool that originated the command + * @param {Object} command the command + * @param {Object} reply the reply for this command from the server + * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration + */ + constructor(pool, command, reply, started) { + const cmd = extractCommand(command); + const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); + + Object.assign(this, connectionDetails, { + requestId: command.requestId, + commandName, + duration: calculateDurationInMs(started), + reply: maybeRedact(commandName, extractReply(command, reply)) + }); + } +} + +/** An event indicating the failure of a given command */ +class CommandFailedEvent { + /** + * Create a failure event + * + * @param {Pool} pool the pool that originated the command + * @param {Object} command the command + * @param {MongoError|Object} error the generated error or a server error response + * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration + */ + constructor(pool, command, error, started) { + const cmd = extractCommand(command); + const commandName = extractCommandName(cmd); + const connectionDetails = extractConnectionDetails(pool); + + Object.assign(this, connectionDetails, { + requestId: command.requestId, + commandName, + duration: calculateDurationInMs(started), + failure: maybeRedact(commandName, error) + }); + } +} + +/** Commands that we want to redact because of the sensitive nature of their contents */ +const SENSITIVE_COMMANDS = new Set([ + 'authenticate', + 'saslStart', + 'saslContinue', + 'getnonce', + 'createUser', + 'updateUser', + 'copydbgetnonce', + 'copydbsaslstart', + 'copydb' +]); + +// helper methods +const extractCommandName = commandDoc => Object.keys(commandDoc)[0]; +const namespace = command => command.ns; +const databaseName = command => command.ns.split('.')[0]; +const collectionName = command => command.ns.split('.')[1]; +const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); + +const LEGACY_FIND_QUERY_MAP = { + $query: 'filter', + $orderby: 'sort', + $hint: 'hint', + $comment: 'comment', + $maxScan: 'maxScan', + $max: 'max', + $min: 'min', + $returnKey: 'returnKey', + $showDiskLoc: 'showRecordId', + $maxTimeMS: 'maxTimeMS', + $snapshot: 'snapshot' +}; + +const LEGACY_FIND_OPTIONS_MAP = { + numberToSkip: 'skip', + numberToReturn: 'batchSize', + returnFieldsSelector: 'projection' +}; + +const OP_QUERY_KEYS = [ + 'tailable', + 'oplogReplay', + 'noCursorTimeout', + 'awaitData', + 'partial', + 'exhaust' +]; + +/** + * Extract the actual command from the query, possibly upconverting if it's a legacy + * format + * + * @param {Object} command the command + */ +const extractCommand = command => { + if (command instanceof GetMore) { + return { + getMore: command.cursorId, + collection: collectionName(command), + batchSize: command.numberToReturn + }; + } + + if (command instanceof KillCursor) { + return { + killCursors: collectionName(command), + cursors: command.cursorIds + }; + } + + if (command instanceof Msg) { + return command.command; + } + + if (command.query && command.query.$query) { + let result; + if (command.ns === 'admin.$cmd') { + // upconvert legacy command + result = Object.assign({}, command.query.$query); + } else { + // upconvert legacy find command + result = { find: collectionName(command) }; + Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => { + if (typeof command.query[key] !== 'undefined') + result[LEGACY_FIND_QUERY_MAP[key]] = command.query[key]; + }); + } + + Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => { + if (typeof command[key] !== 'undefined') result[LEGACY_FIND_OPTIONS_MAP[key]] = command[key]; + }); + + OP_QUERY_KEYS.forEach(key => { + if (command[key]) result[key] = command[key]; + }); + + if (typeof command.pre32Limit !== 'undefined') { + result.limit = command.pre32Limit; + } + + if (command.query.$explain) { + return { explain: result }; + } + + return result; + } + + return command.query ? command.query : command; +}; + +const extractReply = (command, reply) => { + if (command instanceof GetMore) { + return { + ok: 1, + cursor: { + id: reply.message.cursorId, + ns: namespace(command), + nextBatch: reply.message.documents + } + }; + } + + if (command instanceof KillCursor) { + return { + ok: 1, + cursorsUnknown: command.cursorIds + }; + } + + // is this a legacy find command? + if (command.query && typeof command.query.$query !== 'undefined') { + return { + ok: 1, + cursor: { + id: reply.message.cursorId, + ns: namespace(command), + firstBatch: reply.message.documents + } + }; + } + + return reply && reply.result ? reply.result : reply; +}; + +const extractConnectionDetails = connection => { + return { + address: connection.address, + connectionId: connection.id + }; +}; + module.exports = { CMAP_EVENT_NAMES, ConnectionPoolCreatedEvent, @@ -150,5 +380,8 @@ module.exports = { ConnectionCheckOutFailedEvent, ConnectionCheckedOutEvent, ConnectionCheckedInEvent, - ConnectionPoolClearedEvent + ConnectionPoolClearedEvent, + CommandStartedEvent, + CommandSucceededEvent, + CommandFailedEvent }; diff --git a/lib/cmap/message_stream.js b/lib/cmap/message_stream.js index c8f458e53a0..4c3bc7ceeae 100644 --- a/lib/cmap/message_stream.js +++ b/lib/cmap/message_stream.js @@ -1,21 +1,19 @@ 'use strict'; -const Duplex = require('stream').Duplex; const BufferList = require('bl'); -const MongoParseError = require('../core/error').MongoParseError; -const decompress = require('../core/wireprotocol/compression').decompress; -const Response = require('../core/connection/commands').Response; -const BinMsg = require('../core/connection/msg').BinMsg; -const MongoError = require('../core/error').MongoError; -const OP_COMPRESSED = require('../core/wireprotocol/shared').opcodes.OP_COMPRESSED; -const OP_MSG = require('../core/wireprotocol/shared').opcodes.OP_MSG; -const MESSAGE_HEADER_SIZE = require('../core/wireprotocol/shared').MESSAGE_HEADER_SIZE; -const COMPRESSION_DETAILS_SIZE = require('../core/wireprotocol/shared').COMPRESSION_DETAILS_SIZE; -const opcodes = require('../core/wireprotocol/shared').opcodes; -const compress = require('../core/wireprotocol/compression').compress; -const compressorIDs = require('../core/wireprotocol/compression').compressorIDs; -const uncompressibleCommands = require('../core/wireprotocol/compression').uncompressibleCommands; -const Msg = require('../core/connection/msg').Msg; +const { Duplex } = require('stream'); +const { Response, Msg, BinMsg } = require('./commands'); +const { MongoError, MongoParseError } = require('../error'); +const { OP_COMPRESSED, OP_MSG } = require('./wire_protocol/constants'); +const { + compress, + decompress, + compressorIDs, + uncompressibleCommands +} = require('./wire_protocol/compression'); + +const MESSAGE_HEADER_SIZE = 16; +const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID const kDefaultMaxBsonMessageSize = 1024 * 1024 * 16 * 4; const kBuffer = Symbol('buffer'); @@ -79,7 +77,7 @@ class MessageStream extends Duplex { ); // messageLength msgHeader.writeInt32LE(command.requestId, 4); // requestID msgHeader.writeInt32LE(0, 8); // responseTo (zero) - msgHeader.writeInt32LE(opcodes.OP_COMPRESSED, 12); // opCode + msgHeader.writeInt32LE(OP_COMPRESSED, 12); // opCode // Create the compression details of OP_COMPRESSED const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE); diff --git a/lib/cmap/stream_description.js b/lib/cmap/stream_description.js index e806a5f6522..1d918446ef0 100644 --- a/lib/cmap/stream_description.js +++ b/lib/cmap/stream_description.js @@ -1,5 +1,5 @@ 'use strict'; -const parseServerType = require('../core/sdam/server_description').parseServerType; +const { parseServerType } = require('../sdam/server_description'); const RESPONSE_FIELDS = [ 'minWireVersion', diff --git a/lib/core/wireprotocol/command.js b/lib/cmap/wire_protocol/command.js similarity index 85% rename from lib/core/wireprotocol/command.js rename to lib/cmap/wire_protocol/command.js index 214385bbc6f..037f02e6652 100644 --- a/lib/core/wireprotocol/command.js +++ b/lib/cmap/wire_protocol/command.js @@ -1,15 +1,11 @@ 'use strict'; -const Query = require('../connection/commands').Query; -const Msg = require('../connection/msg').Msg; -const MongoError = require('../error').MongoError; -const getReadPreference = require('./shared').getReadPreference; -const isSharded = require('./shared').isSharded; -const databaseNamespace = require('./shared').databaseNamespace; -const isTransactionCommand = require('../transactions').isTransactionCommand; -const applySession = require('../sessions').applySession; -const MongoNetworkError = require('../error').MongoNetworkError; -const maxWireVersion = require('../utils').maxWireVersion; +const { Query, Msg } = require('../commands'); +const { getReadPreference, isSharded } = require('./shared'); +const { isTransactionCommand } = require('../../transactions'); +const { applySession } = require('../../sessions'); +const { maxWireVersion, databaseNamespace } = require('../../utils'); +const { MongoError, MongoNetworkError } = require('../../error'); function isClientEncryptionEnabled(server) { const wireVersion = maxWireVersion(server); @@ -105,10 +101,7 @@ function _command(server, ns, cmd, options, callback) { err instanceof MongoNetworkError && !err.hasErrorLabel('TransientTransactionError') ) { - if (err.errorLabels == null) { - err.errorLabels = []; - } - err.errorLabels.push('TransientTransactionError'); + err.addErrorLabel('TransientTransactionError'); } if ( diff --git a/lib/core/wireprotocol/compression.js b/lib/cmap/wire_protocol/compression.js similarity index 96% rename from lib/core/wireprotocol/compression.js rename to lib/cmap/wire_protocol/compression.js index b1e15835b8b..57b25709ce3 100644 --- a/lib/core/wireprotocol/compression.js +++ b/lib/cmap/wire_protocol/compression.js @@ -1,6 +1,6 @@ 'use strict'; -const Snappy = require('../connection/utils').retrieveSnappy(); +const Snappy = require('../../utils').retrieveSnappy(); const zlib = require('zlib'); const compressorIDs = { diff --git a/lib/cmap/wire_protocol/constants.js b/lib/cmap/wire_protocol/constants.js new file mode 100644 index 00000000000..8b694267398 --- /dev/null +++ b/lib/cmap/wire_protocol/constants.js @@ -0,0 +1,19 @@ +'use strict'; + +module.exports = { + MIN_SUPPORTED_SERVER_VERSION: '2.6', + MAX_SUPPORTED_SERVER_VERSION: '4.4', + MIN_SUPPORTED_WIRE_VERSION: 2, + MAX_SUPPORTED_WIRE_VERSION: 9, + + // opcodes + OP_REPLY: 1, + OP_UPDATE: 2001, + OP_INSERT: 2002, + OP_QUERY: 2004, + OP_GETMORE: 2005, + OP_DELETE: 2006, + OP_KILL_CURSORS: 2007, + OP_COMPRESSED: 2012, + OP_MSG: 2013 +}; diff --git a/lib/core/wireprotocol/get_more.js b/lib/cmap/wire_protocol/get_more.js similarity index 83% rename from lib/core/wireprotocol/get_more.js rename to lib/cmap/wire_protocol/get_more.js index b2db320265c..8b3990e7d3a 100644 --- a/lib/core/wireprotocol/get_more.js +++ b/lib/cmap/wire_protocol/get_more.js @@ -1,14 +1,11 @@ 'use strict'; -const GetMore = require('../connection/commands').GetMore; -const retrieveBSON = require('../connection/utils').retrieveBSON; -const MongoError = require('../error').MongoError; -const MongoNetworkError = require('../error').MongoNetworkError; -const BSON = retrieveBSON(); +const { GetMore } = require('../commands'); +const BSON = require('../../utils').retrieveBSON(); const Long = BSON.Long; -const collectionNamespace = require('./shared').collectionNamespace; -const maxWireVersion = require('../utils').maxWireVersion; -const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions; +const { MongoError, MongoNetworkError } = require('../../error'); +const { applyCommonQueryOptions } = require('./shared'); +const { maxWireVersion, collectionNamespace } = require('../../utils'); const command = require('./command'); function getMore(server, ns, cursorState, batchSize, options, callback) { diff --git a/lib/core/wireprotocol/index.js b/lib/cmap/wire_protocol/index.js similarity index 100% rename from lib/core/wireprotocol/index.js rename to lib/cmap/wire_protocol/index.js diff --git a/lib/core/wireprotocol/kill_cursors.js b/lib/cmap/wire_protocol/kill_cursors.js similarity index 84% rename from lib/core/wireprotocol/kill_cursors.js rename to lib/cmap/wire_protocol/kill_cursors.js index bb1347737f2..ce92d298fe3 100644 --- a/lib/core/wireprotocol/kill_cursors.js +++ b/lib/cmap/wire_protocol/kill_cursors.js @@ -1,11 +1,9 @@ 'use strict'; -const KillCursor = require('../connection/commands').KillCursor; -const MongoError = require('../error').MongoError; -const MongoNetworkError = require('../error').MongoNetworkError; -const collectionNamespace = require('./shared').collectionNamespace; -const maxWireVersion = require('../utils').maxWireVersion; +const { KillCursor } = require('../commands'); +const { maxWireVersion, collectionNamespace } = require('../../utils'); const command = require('./command'); +const { MongoError, MongoNetworkError } = require('../../error'); function killCursors(server, ns, cursorState, callback) { callback = typeof callback === 'function' ? callback : () => {}; diff --git a/lib/core/wireprotocol/query.js b/lib/cmap/wire_protocol/query.js similarity index 94% rename from lib/core/wireprotocol/query.js rename to lib/cmap/wire_protocol/query.js index c501b506441..bb50b6098ac 100644 --- a/lib/core/wireprotocol/query.js +++ b/lib/cmap/wire_protocol/query.js @@ -1,13 +1,10 @@ 'use strict'; -const Query = require('../connection/commands').Query; -const MongoError = require('../error').MongoError; -const getReadPreference = require('./shared').getReadPreference; -const collectionNamespace = require('./shared').collectionNamespace; -const isSharded = require('./shared').isSharded; -const maxWireVersion = require('../utils').maxWireVersion; -const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions; const command = require('./command'); +const { Query } = require('../commands'); +const { MongoError } = require('../../error'); +const { maxWireVersion, collectionNamespace } = require('../../utils'); +const { getReadPreference, isSharded, applyCommonQueryOptions } = require('./shared'); function query(server, ns, cmd, cursorState, options, callback) { options = options || {}; @@ -100,6 +97,10 @@ function prepareFindCommand(server, ns, cmd, cursorState) { sortValue = sortObject; } + if (typeof cmd.allowDiskUse === 'boolean') { + findCmd.allowDiskUse = cmd.allowDiskUse; + } + if (cmd.sort) findCmd.sort = sortValue; if (cmd.fields) findCmd.projection = cmd.fields; if (cmd.hint) findCmd.hint = cmd.hint; diff --git a/lib/core/wireprotocol/shared.js b/lib/cmap/wire_protocol/shared.js similarity index 64% rename from lib/core/wireprotocol/shared.js rename to lib/cmap/wire_protocol/shared.js index c586f057546..5a8c6cbfdca 100644 --- a/lib/core/wireprotocol/shared.js +++ b/lib/cmap/wire_protocol/shared.js @@ -1,26 +1,9 @@ 'use strict'; -const ReadPreference = require('../topologies/read_preference'); -const MongoError = require('../error').MongoError; -const ServerType = require('../sdam/common').ServerType; -const TopologyDescription = require('../sdam/topology_description').TopologyDescription; - -const MESSAGE_HEADER_SIZE = 16; -const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID - -// OPCODE Numbers -// Defined at https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#request-opcodes -var opcodes = { - OP_REPLY: 1, - OP_UPDATE: 2001, - OP_INSERT: 2002, - OP_QUERY: 2004, - OP_GETMORE: 2005, - OP_DELETE: 2006, - OP_KILL_CURSORS: 2007, - OP_COMPRESSED: 2012, - OP_MSG: 2013 -}; +const { ServerType } = require('../../sdam/common'); +const { TopologyDescription } = require('../../sdam/topology_description'); +const ReadPreference = require('../../read_preference'); +const { MongoError } = require('../../error'); var getReadPreference = function(cmd, options) { // Default to command version of the readPreference @@ -41,16 +24,6 @@ var getReadPreference = function(cmd, options) { return readPreference; }; -// Parses the header of a wire protocol message -var parseHeader = function(message) { - return { - length: message.readInt32LE(0), - requestId: message.readInt32LE(4), - responseTo: message.readInt32LE(8), - opCode: message.readInt32LE(12) - }; -}; - function applyCommonQueryOptions(queryOptions, options) { Object.assign(queryOptions, { raw: typeof options.raw === 'boolean' ? options.raw : false, @@ -92,24 +65,8 @@ function isSharded(topologyOrServer) { return false; } -function databaseNamespace(ns) { - return ns.split('.')[0]; -} -function collectionNamespace(ns) { - return ns - .split('.') - .slice(1) - .join('.'); -} - module.exports = { getReadPreference, - MESSAGE_HEADER_SIZE, - COMPRESSION_DETAILS_SIZE, - opcodes, - parseHeader, applyCommonQueryOptions, - isSharded, - databaseNamespace, - collectionNamespace + isSharded }; diff --git a/lib/core/wireprotocol/write_command.js b/lib/cmap/wire_protocol/write_command.js similarity index 91% rename from lib/core/wireprotocol/write_command.js rename to lib/cmap/wire_protocol/write_command.js index e334d5182c7..c7b41b7364a 100644 --- a/lib/core/wireprotocol/write_command.js +++ b/lib/cmap/wire_protocol/write_command.js @@ -1,7 +1,7 @@ 'use strict'; -const MongoError = require('../error').MongoError; -const collectionNamespace = require('./shared').collectionNamespace; +const { MongoError } = require('../../error'); +const { collectionNamespace } = require('../../utils'); const command = require('./command'); function writeCommand(server, type, opsField, ns, ops, options, callback) { diff --git a/lib/collection.js b/lib/collection.js index 1de8b4f91a1..46b8f195336 100644 --- a/lib/collection.js +++ b/lib/collection.js @@ -1,36 +1,38 @@ 'use strict'; -const deprecate = require('util').deprecate; -const deprecateOptions = require('./utils').deprecateOptions; -const checkCollectionName = require('./utils').checkCollectionName; -const ObjectID = require('./core').BSON.ObjectID; -const MongoError = require('./core').MongoError; -const toError = require('./utils').toError; -const normalizeHintField = require('./utils').normalizeHintField; -const decorateCommand = require('./utils').decorateCommand; -const decorateWithCollation = require('./utils').decorateWithCollation; -const decorateWithReadConcern = require('./utils').decorateWithReadConcern; -const formattedOrderClause = require('./utils').formattedOrderClause; -const ReadPreference = require('./core').ReadPreference; +const ReadPreference = require('./read_preference'); +const { deprecate } = require('util'); +const { + toError, + normalizeHintField, + decorateCommand, + decorateWithCollation, + decorateWithReadConcern, + formattedOrderClause, + checkCollectionName, + deprecateOptions, + executeLegacyOperation, + resolveReadPreference, + MongoDBNamespace +} = require('./utils'); +const { ObjectID } = require('./utils').retrieveBSON(); +const { MongoError } = require('./error'); const unordered = require('./bulk/unordered'); const ordered = require('./bulk/ordered'); const ChangeStream = require('./change_stream'); -const executeLegacyOperation = require('./utils').executeLegacyOperation; -const resolveReadPreference = require('./utils').resolveReadPreference; const WriteConcern = require('./write_concern'); const ReadConcern = require('./read_concern'); -const MongoDBNamespace = require('./utils').MongoDBNamespace; -const AggregationCursor = require('./aggregation_cursor'); -const CommandCursor = require('./command_cursor'); +const { AggregationCursor, CommandCursor } = require('./cursor'); // Operations -const checkForAtomicOperators = require('./operations/collection_ops').checkForAtomicOperators; -const ensureIndex = require('./operations/collection_ops').ensureIndex; -const group = require('./operations/collection_ops').group; -const parallelCollectionScan = require('./operations/collection_ops').parallelCollectionScan; -const removeDocuments = require('./operations/common_functions').removeDocuments; -const save = require('./operations/collection_ops').save; -const updateDocuments = require('./operations/common_functions').updateDocuments; +const { + ensureIndex, + group, + parallelCollectionScan, + save, + checkForAtomicOperators +} = require('./operations/collection_ops'); +const { removeDocuments, updateDocuments } = require('./operations/common_functions'); const AggregateOperation = require('./operations/aggregate'); const BulkWriteOperation = require('./operations/bulk_write'); @@ -40,7 +42,7 @@ const CreateIndexesOperation = require('./operations/create_indexes'); const DeleteManyOperation = require('./operations/delete_many'); const DeleteOneOperation = require('./operations/delete_one'); const DistinctOperation = require('./operations/distinct'); -const DropCollectionOperation = require('./operations/drop').DropCollectionOperation; +const { DropCollectionOperation } = require('./operations/drop'); const DropIndexOperation = require('./operations/drop_index'); const DropIndexesOperation = require('./operations/drop_indexes'); const EstimatedDocumentCountOperation = require('./operations/estimated_document_count'); @@ -278,7 +280,7 @@ Object.defineProperty(Collection.prototype, 'hint', { } }); -const DEPRECATED_FIND_OPTIONS = ['maxScan', 'fields', 'snapshot']; +const DEPRECATED_FIND_OPTIONS = ['maxScan', 'fields', 'snapshot', 'oplogReplay']; /** * Creates a cursor for a query that can be used to iterate over results from MongoDB @@ -313,6 +315,7 @@ const DEPRECATED_FIND_OPTIONS = ['maxScan', 'fields', 'snapshot']; * @param {number} [options.maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. Requires `taiable` and `awaitData` to be true * @param {boolean} [options.noCursorTimeout] The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that. * @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields). + * @param {boolean} [options.allowDiskUse] Enables writing to temporary files on the server. * @param {ClientSession} [options.session] optional session to use for this operation * @throws {MongoError} * @return {Cursor} @@ -422,6 +425,10 @@ Collection.prototype.find = deprecateOptions( query: selector }; + if (typeof options.allowDiskUse === 'boolean') { + findCommand.allowDiskUse = options.allowDiskUse; + } + // Ensure we use the right await data option if (typeof newOptions.awaitdata === 'boolean') { newOptions.awaitData = newOptions.awaitdata; @@ -1683,6 +1690,7 @@ Collection.prototype.findOneAndDelete = function(filter, options, callback) { * @param {object} [options] Optional settings. * @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher. * @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields). + * @param {string|object} [options.hint] An optional index to use for this operation * @param {number} [options.maxTimeMS] The maximum amount of time to allow the query to run. * @param {object} [options.projection] Limits the fields to return for all matching documents. * @param {object} [options.sort] Determines which document the operation modifies if the query selects multiple documents. @@ -1732,6 +1740,7 @@ Collection.prototype.findOneAndReplace = function(filter, replacement, options, * @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators * @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher. * @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields). + * @param {string|object} [options.hint] An optional index to use for this operation * @param {number} [options.maxTimeMS] The maximum amount of time to allow the query to run. * @param {object} [options.projection] Limits the fields to return for all matching documents. * @param {object} [options.sort] Determines which document the operation modifies if the query selects multiple documents. @@ -1740,7 +1749,7 @@ Collection.prototype.findOneAndReplace = function(filter, replacement, options, * @param {boolean} [options.checkKeys=false] If true, will throw if bson documents start with `$` or include a `.` in any key value * @param {boolean} [options.serializeFunctions=false] Serialize functions on any object. * @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session] optional session to use for this operation + * @param {ClientSession} [options.session] An ptional session to use for this operation * @param {Collection~findAndModifyCallback} [callback] The collection result callback * @return {Promise} returns Promise if no callback passed */ diff --git a/lib/core/uri_parser.js b/lib/connection_string.js similarity index 97% rename from lib/core/uri_parser.js rename to lib/connection_string.js index 0f51e0d72ec..b468a3b0149 100644 --- a/lib/core/uri_parser.js +++ b/lib/connection_string.js @@ -2,8 +2,8 @@ const URL = require('url'); const qs = require('querystring'); const dns = require('dns'); -const MongoParseError = require('./error').MongoParseError; -const ReadPreference = require('./topologies/read_preference'); +const ReadPreference = require('./read_preference'); +const { MongoParseError } = require('./error'); /** * The following regular expression validates a connection string and breaks the @@ -171,6 +171,7 @@ const STRING_OPTIONS = new Set(['authsource', 'replicaset']); // NOTE: this list exists in native already, if it is merged here we should deduplicate const AUTH_MECHANISMS = new Set([ 'GSSAPI', + 'MONGODB-AWS', 'MONGODB-X509', 'MONGODB-CR', 'DEFAULT', @@ -257,7 +258,9 @@ function applyConnectionStringOption(obj, key, value, options) { if (key === 'authmechanism' && !AUTH_MECHANISMS.has(value)) { throw new MongoParseError( - 'Value for `authMechanism` must be one of: `DEFAULT`, `GSSAPI`, `PLAIN`, `MONGODB-X509`, `SCRAM-SHA-1`, `SCRAM-SHA-256`' + `Value for authMechanism must be one of: ${Array.from(AUTH_MECHANISMS).join( + ', ' + )}, found: ${value}` ); } @@ -357,6 +360,16 @@ function applyAuthExpectations(parsed) { parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } + if (authMechanism === 'MONGODB-AWS') { + if (authSource != null && authSource !== '$external') { + throw new MongoParseError( + `Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.` + ); + } + + parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); + } + if (authMechanism === 'MONGODB-X509') { if (parsed.auth && parsed.auth.password != null) { throw new MongoParseError(`Password not allowed for mechanism \`${authMechanism}\``); @@ -685,4 +698,6 @@ function parseConnectionString(uri, options, callback) { callback(null, result); } -module.exports = parseConnectionString; +module.exports = { + parseConnectionString +}; diff --git a/lib/core/connection/apm.js b/lib/core/connection/apm.js deleted file mode 100644 index 82858efda76..00000000000 --- a/lib/core/connection/apm.js +++ /dev/null @@ -1,251 +0,0 @@ -'use strict'; -const Msg = require('../connection/msg').Msg; -const KillCursor = require('../connection/commands').KillCursor; -const GetMore = require('../connection/commands').GetMore; -const calculateDurationInMs = require('../utils').calculateDurationInMs; - -/** Commands that we want to redact because of the sensitive nature of their contents */ -const SENSITIVE_COMMANDS = new Set([ - 'authenticate', - 'saslStart', - 'saslContinue', - 'getnonce', - 'createUser', - 'updateUser', - 'copydbgetnonce', - 'copydbsaslstart', - 'copydb' -]); - -// helper methods -const extractCommandName = commandDoc => Object.keys(commandDoc)[0]; -const namespace = command => command.ns; -const databaseName = command => command.ns.split('.')[0]; -const collectionName = command => command.ns.split('.')[1]; -const generateConnectionId = pool => - pool.options ? `${pool.options.host}:${pool.options.port}` : pool.address; -const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); -const isLegacyPool = pool => pool.s && pool.queue; - -const LEGACY_FIND_QUERY_MAP = { - $query: 'filter', - $orderby: 'sort', - $hint: 'hint', - $comment: 'comment', - $maxScan: 'maxScan', - $max: 'max', - $min: 'min', - $returnKey: 'returnKey', - $showDiskLoc: 'showRecordId', - $maxTimeMS: 'maxTimeMS', - $snapshot: 'snapshot' -}; - -const LEGACY_FIND_OPTIONS_MAP = { - numberToSkip: 'skip', - numberToReturn: 'batchSize', - returnFieldsSelector: 'projection' -}; - -const OP_QUERY_KEYS = [ - 'tailable', - 'oplogReplay', - 'noCursorTimeout', - 'awaitData', - 'partial', - 'exhaust' -]; - -/** - * Extract the actual command from the query, possibly upconverting if it's a legacy - * format - * - * @param {Object} command the command - */ -const extractCommand = command => { - if (command instanceof GetMore) { - return { - getMore: command.cursorId, - collection: collectionName(command), - batchSize: command.numberToReturn - }; - } - - if (command instanceof KillCursor) { - return { - killCursors: collectionName(command), - cursors: command.cursorIds - }; - } - - if (command instanceof Msg) { - return command.command; - } - - if (command.query && command.query.$query) { - let result; - if (command.ns === 'admin.$cmd') { - // upconvert legacy command - result = Object.assign({}, command.query.$query); - } else { - // upconvert legacy find command - result = { find: collectionName(command) }; - Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => { - if (typeof command.query[key] !== 'undefined') - result[LEGACY_FIND_QUERY_MAP[key]] = command.query[key]; - }); - } - - Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => { - if (typeof command[key] !== 'undefined') result[LEGACY_FIND_OPTIONS_MAP[key]] = command[key]; - }); - - OP_QUERY_KEYS.forEach(key => { - if (command[key]) result[key] = command[key]; - }); - - if (typeof command.pre32Limit !== 'undefined') { - result.limit = command.pre32Limit; - } - - if (command.query.$explain) { - return { explain: result }; - } - - return result; - } - - return command.query ? command.query : command; -}; - -const extractReply = (command, reply) => { - if (command instanceof GetMore) { - return { - ok: 1, - cursor: { - id: reply.message.cursorId, - ns: namespace(command), - nextBatch: reply.message.documents - } - }; - } - - if (command instanceof KillCursor) { - return { - ok: 1, - cursorsUnknown: command.cursorIds - }; - } - - // is this a legacy find command? - if (command.query && typeof command.query.$query !== 'undefined') { - return { - ok: 1, - cursor: { - id: reply.message.cursorId, - ns: namespace(command), - firstBatch: reply.message.documents - } - }; - } - - return reply && reply.result ? reply.result : reply; -}; - -const extractConnectionDetails = pool => { - if (isLegacyPool(pool)) { - return { - connectionId: generateConnectionId(pool) - }; - } - - // APM in the modern pool is done at the `Connection` level, so we rename it here for - // readability. - const connection = pool; - return { - address: connection.address, - connectionId: connection.id - }; -}; - -/** An event indicating the start of a given command */ -class CommandStartedEvent { - /** - * Create a started event - * - * @param {Pool} pool the pool that originated the command - * @param {Object} command the command - */ - constructor(pool, command) { - const cmd = extractCommand(command); - const commandName = extractCommandName(cmd); - const connectionDetails = extractConnectionDetails(pool); - - // NOTE: remove in major revision, this is not spec behavior - if (SENSITIVE_COMMANDS.has(commandName)) { - this.commandObj = {}; - this.commandObj[commandName] = true; - } - - Object.assign(this, connectionDetails, { - requestId: command.requestId, - databaseName: databaseName(command), - commandName, - command: cmd - }); - } -} - -/** An event indicating the success of a given command */ -class CommandSucceededEvent { - /** - * Create a succeeded event - * - * @param {Pool} pool the pool that originated the command - * @param {Object} command the command - * @param {Object} reply the reply for this command from the server - * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration - */ - constructor(pool, command, reply, started) { - const cmd = extractCommand(command); - const commandName = extractCommandName(cmd); - const connectionDetails = extractConnectionDetails(pool); - - Object.assign(this, connectionDetails, { - requestId: command.requestId, - commandName, - duration: calculateDurationInMs(started), - reply: maybeRedact(commandName, extractReply(command, reply)) - }); - } -} - -/** An event indicating the failure of a given command */ -class CommandFailedEvent { - /** - * Create a failure event - * - * @param {Pool} pool the pool that originated the command - * @param {Object} command the command - * @param {MongoError|Object} error the generated error or a server error response - * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration - */ - constructor(pool, command, error, started) { - const cmd = extractCommand(command); - const commandName = extractCommandName(cmd); - const connectionDetails = extractConnectionDetails(pool); - - Object.assign(this, connectionDetails, { - requestId: command.requestId, - commandName, - duration: calculateDurationInMs(started), - failure: maybeRedact(commandName, error) - }); - } -} - -module.exports = { - CommandStartedEvent, - CommandSucceededEvent, - CommandFailedEvent -}; diff --git a/lib/core/connection/command_result.js b/lib/core/connection/command_result.js deleted file mode 100644 index 762aa3f17b4..00000000000 --- a/lib/core/connection/command_result.js +++ /dev/null @@ -1,36 +0,0 @@ -'use strict'; - -/** - * Creates a new CommandResult instance - * @class - * @param {object} result CommandResult object - * @param {Connection} connection A connection instance associated with this result - * @return {CommandResult} A cursor instance - */ -var CommandResult = function(result, connection, message) { - this.result = result; - this.connection = connection; - this.message = message; -}; - -/** - * Convert CommandResult to JSON - * @method - * @return {object} - */ -CommandResult.prototype.toJSON = function() { - let result = Object.assign({}, this, this.result); - delete result.message; - return result; -}; - -/** - * Convert CommandResult to String representation - * @method - * @return {string} - */ -CommandResult.prototype.toString = function() { - return JSON.stringify(this.toJSON()); -}; - -module.exports = CommandResult; diff --git a/lib/core/connection/commands.js b/lib/core/connection/commands.js deleted file mode 100644 index b24ff8481c4..00000000000 --- a/lib/core/connection/commands.js +++ /dev/null @@ -1,507 +0,0 @@ -'use strict'; - -var retrieveBSON = require('./utils').retrieveBSON; -var BSON = retrieveBSON(); -var Long = BSON.Long; -const Buffer = require('safe-buffer').Buffer; - -// Incrementing request id -var _requestId = 0; - -// Wire command operation ids -var opcodes = require('../wireprotocol/shared').opcodes; - -// Query flags -var OPTS_TAILABLE_CURSOR = 2; -var OPTS_SLAVE = 4; -var OPTS_OPLOG_REPLAY = 8; -var OPTS_NO_CURSOR_TIMEOUT = 16; -var OPTS_AWAIT_DATA = 32; -var OPTS_EXHAUST = 64; -var OPTS_PARTIAL = 128; - -// Response flags -var CURSOR_NOT_FOUND = 1; -var QUERY_FAILURE = 2; -var SHARD_CONFIG_STALE = 4; -var AWAIT_CAPABLE = 8; - -/************************************************************** - * QUERY - **************************************************************/ -var Query = function(bson, ns, query, options) { - var self = this; - // Basic options needed to be passed in - if (ns == null) throw new Error('ns must be specified for query'); - if (query == null) throw new Error('query must be specified for query'); - - // Validate that we are not passing 0x00 in the collection name - if (ns.indexOf('\x00') !== -1) { - throw new Error('namespace cannot contain a null character'); - } - - // Basic options - this.bson = bson; - this.ns = ns; - this.query = query; - - // Additional options - this.numberToSkip = options.numberToSkip || 0; - this.numberToReturn = options.numberToReturn || 0; - this.returnFieldSelector = options.returnFieldSelector || null; - this.requestId = Query.getRequestId(); - - // special case for pre-3.2 find commands, delete ASAP - this.pre32Limit = options.pre32Limit; - - // Serialization option - this.serializeFunctions = - typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; - this.ignoreUndefined = - typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; - this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; - this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : true; - this.batchSize = self.numberToReturn; - - // Flags - this.tailable = false; - this.slaveOk = typeof options.slaveOk === 'boolean' ? options.slaveOk : false; - this.oplogReplay = false; - this.noCursorTimeout = false; - this.awaitData = false; - this.exhaust = false; - this.partial = false; -}; - -// -// Assign a new request Id -Query.prototype.incRequestId = function() { - this.requestId = _requestId++; -}; - -// -// Assign a new request Id -Query.nextRequestId = function() { - return _requestId + 1; -}; - -// -// Uses a single allocated buffer for the process, avoiding multiple memory allocations -Query.prototype.toBin = function() { - var self = this; - var buffers = []; - var projection = null; - - // Set up the flags - var flags = 0; - if (this.tailable) { - flags |= OPTS_TAILABLE_CURSOR; - } - - if (this.slaveOk) { - flags |= OPTS_SLAVE; - } - - if (this.oplogReplay) { - flags |= OPTS_OPLOG_REPLAY; - } - - if (this.noCursorTimeout) { - flags |= OPTS_NO_CURSOR_TIMEOUT; - } - - if (this.awaitData) { - flags |= OPTS_AWAIT_DATA; - } - - if (this.exhaust) { - flags |= OPTS_EXHAUST; - } - - if (this.partial) { - flags |= OPTS_PARTIAL; - } - - // If batchSize is different to self.numberToReturn - if (self.batchSize !== self.numberToReturn) self.numberToReturn = self.batchSize; - - // Allocate write protocol header buffer - var header = Buffer.alloc( - 4 * 4 + // Header - 4 + // Flags - Buffer.byteLength(self.ns) + - 1 + // namespace - 4 + // numberToSkip - 4 // numberToReturn - ); - - // Add header to buffers - buffers.push(header); - - // Serialize the query - var query = self.bson.serialize(this.query, { - checkKeys: this.checkKeys, - serializeFunctions: this.serializeFunctions, - ignoreUndefined: this.ignoreUndefined - }); - - // Add query document - buffers.push(query); - - if (self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) { - // Serialize the projection document - projection = self.bson.serialize(this.returnFieldSelector, { - checkKeys: this.checkKeys, - serializeFunctions: this.serializeFunctions, - ignoreUndefined: this.ignoreUndefined - }); - // Add projection document - buffers.push(projection); - } - - // Total message size - var totalLength = header.length + query.length + (projection ? projection.length : 0); - - // Set up the index - var index = 4; - - // Write total document length - header[3] = (totalLength >> 24) & 0xff; - header[2] = (totalLength >> 16) & 0xff; - header[1] = (totalLength >> 8) & 0xff; - header[0] = totalLength & 0xff; - - // Write header information requestId - header[index + 3] = (this.requestId >> 24) & 0xff; - header[index + 2] = (this.requestId >> 16) & 0xff; - header[index + 1] = (this.requestId >> 8) & 0xff; - header[index] = this.requestId & 0xff; - index = index + 4; - - // Write header information responseTo - header[index + 3] = (0 >> 24) & 0xff; - header[index + 2] = (0 >> 16) & 0xff; - header[index + 1] = (0 >> 8) & 0xff; - header[index] = 0 & 0xff; - index = index + 4; - - // Write header information OP_QUERY - header[index + 3] = (opcodes.OP_QUERY >> 24) & 0xff; - header[index + 2] = (opcodes.OP_QUERY >> 16) & 0xff; - header[index + 1] = (opcodes.OP_QUERY >> 8) & 0xff; - header[index] = opcodes.OP_QUERY & 0xff; - index = index + 4; - - // Write header information flags - header[index + 3] = (flags >> 24) & 0xff; - header[index + 2] = (flags >> 16) & 0xff; - header[index + 1] = (flags >> 8) & 0xff; - header[index] = flags & 0xff; - index = index + 4; - - // Write collection name - index = index + header.write(this.ns, index, 'utf8') + 1; - header[index - 1] = 0; - - // Write header information flags numberToSkip - header[index + 3] = (this.numberToSkip >> 24) & 0xff; - header[index + 2] = (this.numberToSkip >> 16) & 0xff; - header[index + 1] = (this.numberToSkip >> 8) & 0xff; - header[index] = this.numberToSkip & 0xff; - index = index + 4; - - // Write header information flags numberToReturn - header[index + 3] = (this.numberToReturn >> 24) & 0xff; - header[index + 2] = (this.numberToReturn >> 16) & 0xff; - header[index + 1] = (this.numberToReturn >> 8) & 0xff; - header[index] = this.numberToReturn & 0xff; - index = index + 4; - - // Return the buffers - return buffers; -}; - -Query.getRequestId = function() { - return ++_requestId; -}; - -/************************************************************** - * GETMORE - **************************************************************/ -var GetMore = function(bson, ns, cursorId, opts) { - opts = opts || {}; - this.numberToReturn = opts.numberToReturn || 0; - this.requestId = _requestId++; - this.bson = bson; - this.ns = ns; - this.cursorId = cursorId; -}; - -// -// Uses a single allocated buffer for the process, avoiding multiple memory allocations -GetMore.prototype.toBin = function() { - var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + 4 * 4; - // Create command buffer - var index = 0; - // Allocate buffer - var _buffer = Buffer.alloc(length); - - // Write header information - // index = write32bit(index, _buffer, length); - _buffer[index + 3] = (length >> 24) & 0xff; - _buffer[index + 2] = (length >> 16) & 0xff; - _buffer[index + 1] = (length >> 8) & 0xff; - _buffer[index] = length & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, requestId); - _buffer[index + 3] = (this.requestId >> 24) & 0xff; - _buffer[index + 2] = (this.requestId >> 16) & 0xff; - _buffer[index + 1] = (this.requestId >> 8) & 0xff; - _buffer[index] = this.requestId & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, 0); - _buffer[index + 3] = (0 >> 24) & 0xff; - _buffer[index + 2] = (0 >> 16) & 0xff; - _buffer[index + 1] = (0 >> 8) & 0xff; - _buffer[index] = 0 & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, OP_GETMORE); - _buffer[index + 3] = (opcodes.OP_GETMORE >> 24) & 0xff; - _buffer[index + 2] = (opcodes.OP_GETMORE >> 16) & 0xff; - _buffer[index + 1] = (opcodes.OP_GETMORE >> 8) & 0xff; - _buffer[index] = opcodes.OP_GETMORE & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, 0); - _buffer[index + 3] = (0 >> 24) & 0xff; - _buffer[index + 2] = (0 >> 16) & 0xff; - _buffer[index + 1] = (0 >> 8) & 0xff; - _buffer[index] = 0 & 0xff; - index = index + 4; - - // Write collection name - index = index + _buffer.write(this.ns, index, 'utf8') + 1; - _buffer[index - 1] = 0; - - // Write batch size - // index = write32bit(index, _buffer, numberToReturn); - _buffer[index + 3] = (this.numberToReturn >> 24) & 0xff; - _buffer[index + 2] = (this.numberToReturn >> 16) & 0xff; - _buffer[index + 1] = (this.numberToReturn >> 8) & 0xff; - _buffer[index] = this.numberToReturn & 0xff; - index = index + 4; - - // Write cursor id - // index = write32bit(index, _buffer, cursorId.getLowBits()); - _buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff; - _buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff; - _buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff; - _buffer[index] = this.cursorId.getLowBits() & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, cursorId.getHighBits()); - _buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff; - _buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff; - _buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff; - _buffer[index] = this.cursorId.getHighBits() & 0xff; - index = index + 4; - - // Return buffer - return _buffer; -}; - -/************************************************************** - * KILLCURSOR - **************************************************************/ -var KillCursor = function(bson, ns, cursorIds) { - this.ns = ns; - this.requestId = _requestId++; - this.cursorIds = cursorIds; -}; - -// -// Uses a single allocated buffer for the process, avoiding multiple memory allocations -KillCursor.prototype.toBin = function() { - var length = 4 + 4 + 4 * 4 + this.cursorIds.length * 8; - - // Create command buffer - var index = 0; - var _buffer = Buffer.alloc(length); - - // Write header information - // index = write32bit(index, _buffer, length); - _buffer[index + 3] = (length >> 24) & 0xff; - _buffer[index + 2] = (length >> 16) & 0xff; - _buffer[index + 1] = (length >> 8) & 0xff; - _buffer[index] = length & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, requestId); - _buffer[index + 3] = (this.requestId >> 24) & 0xff; - _buffer[index + 2] = (this.requestId >> 16) & 0xff; - _buffer[index + 1] = (this.requestId >> 8) & 0xff; - _buffer[index] = this.requestId & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, 0); - _buffer[index + 3] = (0 >> 24) & 0xff; - _buffer[index + 2] = (0 >> 16) & 0xff; - _buffer[index + 1] = (0 >> 8) & 0xff; - _buffer[index] = 0 & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, OP_KILL_CURSORS); - _buffer[index + 3] = (opcodes.OP_KILL_CURSORS >> 24) & 0xff; - _buffer[index + 2] = (opcodes.OP_KILL_CURSORS >> 16) & 0xff; - _buffer[index + 1] = (opcodes.OP_KILL_CURSORS >> 8) & 0xff; - _buffer[index] = opcodes.OP_KILL_CURSORS & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, 0); - _buffer[index + 3] = (0 >> 24) & 0xff; - _buffer[index + 2] = (0 >> 16) & 0xff; - _buffer[index + 1] = (0 >> 8) & 0xff; - _buffer[index] = 0 & 0xff; - index = index + 4; - - // Write batch size - // index = write32bit(index, _buffer, this.cursorIds.length); - _buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff; - _buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff; - _buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff; - _buffer[index] = this.cursorIds.length & 0xff; - index = index + 4; - - // Write all the cursor ids into the array - for (var i = 0; i < this.cursorIds.length; i++) { - // Write cursor id - // index = write32bit(index, _buffer, cursorIds[i].getLowBits()); - _buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff; - _buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff; - _buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff; - _buffer[index] = this.cursorIds[i].getLowBits() & 0xff; - index = index + 4; - - // index = write32bit(index, _buffer, cursorIds[i].getHighBits()); - _buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff; - _buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff; - _buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff; - _buffer[index] = this.cursorIds[i].getHighBits() & 0xff; - index = index + 4; - } - - // Return buffer - return _buffer; -}; - -var Response = function(bson, message, msgHeader, msgBody, opts) { - opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; - this.parsed = false; - this.raw = message; - this.data = msgBody; - this.bson = bson; - this.opts = opts; - - // Read the message header - this.length = msgHeader.length; - this.requestId = msgHeader.requestId; - this.responseTo = msgHeader.responseTo; - this.opCode = msgHeader.opCode; - this.fromCompressed = msgHeader.fromCompressed; - - // Read the message body - this.responseFlags = msgBody.readInt32LE(0); - this.cursorId = new Long(msgBody.readInt32LE(4), msgBody.readInt32LE(8)); - this.startingFrom = msgBody.readInt32LE(12); - this.numberReturned = msgBody.readInt32LE(16); - - // Preallocate document array - this.documents = new Array(this.numberReturned); - - // Flag values - this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0; - this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0; - this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0; - this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0; - this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; - this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; - this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; -}; - -Response.prototype.isParsed = function() { - return this.parsed; -}; - -Response.prototype.parse = function(options) { - // Don't parse again if not needed - if (this.parsed) return; - options = options || {}; - - // Allow the return of raw documents instead of parsing - var raw = options.raw || false; - var documentsReturnedIn = options.documentsReturnedIn || null; - var promoteLongs = - typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; - var promoteValues = - typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; - var promoteBuffers = - typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : this.opts.promoteBuffers; - var bsonSize, _options; - - // Set up the options - _options = { - promoteLongs: promoteLongs, - promoteValues: promoteValues, - promoteBuffers: promoteBuffers - }; - - // Position within OP_REPLY at which documents start - // (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply) - this.index = 20; - - // - // Parse Body - // - for (var i = 0; i < this.numberReturned; i++) { - bsonSize = - this.data[this.index] | - (this.data[this.index + 1] << 8) | - (this.data[this.index + 2] << 16) | - (this.data[this.index + 3] << 24); - - // If we have raw results specified slice the return document - if (raw) { - this.documents[i] = this.data.slice(this.index, this.index + bsonSize); - } else { - this.documents[i] = this.bson.deserialize( - this.data.slice(this.index, this.index + bsonSize), - _options - ); - } - - // Adjust the index - this.index = this.index + bsonSize; - } - - if (this.documents.length === 1 && documentsReturnedIn != null && raw) { - const fieldsAsRaw = {}; - fieldsAsRaw[documentsReturnedIn] = true; - _options.fieldsAsRaw = fieldsAsRaw; - - const doc = this.bson.deserialize(this.documents[0], _options); - this.documents = [doc]; - } - - // Set parsed - this.parsed = true; -}; - -module.exports = { - Query: Query, - GetMore: GetMore, - Response: Response, - KillCursor: KillCursor -}; diff --git a/lib/core/connection/connection.js b/lib/core/connection/connection.js deleted file mode 100644 index c1c5a3b399f..00000000000 --- a/lib/core/connection/connection.js +++ /dev/null @@ -1,644 +0,0 @@ -'use strict'; - -const EventEmitter = require('events').EventEmitter; -const crypto = require('crypto'); -const debugOptions = require('./utils').debugOptions; -const parseHeader = require('../wireprotocol/shared').parseHeader; -const decompress = require('../wireprotocol/compression').decompress; -const Response = require('./commands').Response; -const BinMsg = require('./msg').BinMsg; -const MongoNetworkError = require('../error').MongoNetworkError; -const MongoError = require('../error').MongoError; -const Logger = require('./logger'); -const OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED; -const OP_MSG = require('../wireprotocol/shared').opcodes.OP_MSG; -const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; -const Buffer = require('safe-buffer').Buffer; - -let _id = 0; - -const DEFAULT_MAX_BSON_MESSAGE_SIZE = 1024 * 1024 * 16 * 4; -const DEBUG_FIELDS = [ - 'host', - 'port', - 'size', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectionTimeout', - 'socketTimeout', - 'ssl', - 'ca', - 'crl', - 'cert', - 'rejectUnauthorized', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'checkServerIdentity' -]; - -let connectionAccountingSpy = undefined; -let connectionAccounting = false; -let connections = {}; - -/** - * A class representing a single connection to a MongoDB server - * - * @fires Connection#connect - * @fires Connection#close - * @fires Connection#error - * @fires Connection#timeout - * @fires Connection#parseError - * @fires Connection#message - */ -class Connection extends EventEmitter { - /** - * Creates a new Connection instance - * - * **NOTE**: Internal class, do not instantiate directly - * - * @param {Socket} socket The socket this connection wraps - * @param {Object} options Various settings - * @param {object} options.bson An implementation of bson serialize and deserialize - * @param {string} [options.host='localhost'] The host the socket is connected to - * @param {number} [options.port=27017] The port used for the socket connection - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled - * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting - * @param {boolean} [options.promoteLongs] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers] Promotes Binary BSON values to native Node Buffers. - * @param {number} [options.maxBsonMessageSize=0x4000000] Largest possible size of a BSON message (for legacy purposes) - */ - constructor(socket, options) { - super(); - - options = options || {}; - if (!options.bson) { - throw new TypeError('must pass in valid bson parser'); - } - - this.id = _id++; - this.options = options; - this.logger = Logger('Connection', options); - this.bson = options.bson; - this.tag = options.tag; - this.maxBsonMessageSize = options.maxBsonMessageSize || DEFAULT_MAX_BSON_MESSAGE_SIZE; - - this.port = options.port || 27017; - this.host = options.host || 'localhost'; - this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; - - // These values are inspected directly in tests, but maybe not necessary to keep around - this.keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true; - this.keepAliveInitialDelay = - typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000; - this.connectionTimeout = - typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000; - if (this.keepAliveInitialDelay > this.socketTimeout) { - this.keepAliveInitialDelay = Math.round(this.socketTimeout / 2); - } - - // Debug information - if (this.logger.isDebug()) { - this.logger.debug( - `creating connection ${this.id} with options [${JSON.stringify( - debugOptions(DEBUG_FIELDS, options) - )}]` - ); - } - - // Response options - this.responseOptions = { - promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, - promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, - promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false - }; - - // Flushing - this.flushing = false; - this.queue = []; - - // Internal state - this.writeStream = null; - this.destroyed = false; - this.timedOut = false; - - // Create hash method - const hash = crypto.createHash('sha1'); - hash.update(this.address); - this.hashedName = hash.digest('hex'); - - // All operations in flight on the connection - this.workItems = []; - - // setup socket - this.socket = socket; - this.socket.once('error', errorHandler(this)); - this.socket.once('timeout', timeoutHandler(this)); - this.socket.once('close', closeHandler(this)); - this.socket.on('data', dataHandler(this)); - - if (connectionAccounting) { - addConnection(this.id, this); - } - } - - setSocketTimeout(value) { - if (this.socket) { - this.socket.setTimeout(value); - } - } - - resetSocketTimeout() { - if (this.socket) { - this.socket.setTimeout(this.socketTimeout); - } - } - - static enableConnectionAccounting(spy) { - if (spy) { - connectionAccountingSpy = spy; - } - - connectionAccounting = true; - connections = {}; - } - - static disableConnectionAccounting() { - connectionAccounting = false; - connectionAccountingSpy = undefined; - } - - static connections() { - return connections; - } - - get address() { - return `${this.host}:${this.port}`; - } - - /** - * Unref this connection - * @method - * @return {boolean} - */ - unref() { - if (this.socket == null) { - this.once('connect', () => this.socket.unref()); - return; - } - - this.socket.unref(); - } - - /** - * Flush all work Items on this connection - * - * @param {*} err The error to propagate to the flushed work items - */ - flush(err) { - while (this.workItems.length > 0) { - const workItem = this.workItems.shift(); - if (workItem.cb) { - workItem.cb(err); - } - } - } - - /** - * Destroy connection - * @method - */ - destroy(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = Object.assign({ force: false }, options); - - if (connectionAccounting) { - deleteConnection(this.id); - } - - if (this.socket == null) { - this.destroyed = true; - return; - } - - if (options.force || this.timedOut) { - this.socket.destroy(); - this.destroyed = true; - if (typeof callback === 'function') callback(null, null); - return; - } - - this.socket.end(err => { - this.destroyed = true; - if (typeof callback === 'function') callback(err, null); - }); - } - - /** - * Write to connection - * @method - * @param {Command} command Command to write out need to implement toBin and toBinUnified - */ - write(buffer) { - // Debug Log - if (this.logger.isDebug()) { - if (!Array.isArray(buffer)) { - this.logger.debug(`writing buffer [${buffer.toString('hex')}] to ${this.address}`); - } else { - for (let i = 0; i < buffer.length; i++) - this.logger.debug(`writing buffer [${buffer[i].toString('hex')}] to ${this.address}`); - } - } - - // Double check that the connection is not destroyed - if (this.socket.destroyed === false) { - // Write out the command - if (!Array.isArray(buffer)) { - this.socket.write(buffer, 'binary'); - return true; - } - - // Iterate over all buffers and write them in order to the socket - for (let i = 0; i < buffer.length; i++) { - this.socket.write(buffer[i], 'binary'); - } - - return true; - } - - // Connection is destroyed return write failed - return false; - } - - /** - * Return id of connection as a string - * @method - * @return {string} - */ - toString() { - return '' + this.id; - } - - /** - * Return json object of connection - * @method - * @return {object} - */ - toJSON() { - return { id: this.id, host: this.host, port: this.port }; - } - - /** - * Is the connection connected - * @method - * @return {boolean} - */ - isConnected() { - if (this.destroyed) return false; - return !this.socket.destroyed && this.socket.writable; - } -} - -function deleteConnection(id) { - // console.log("=== deleted connection " + id + " :: " + (connections[id] ? connections[id].port : '')) - delete connections[id]; - - if (connectionAccountingSpy) { - connectionAccountingSpy.deleteConnection(id); - } -} - -function addConnection(id, connection) { - // console.log("=== added connection " + id + " :: " + connection.port) - connections[id] = connection; - - if (connectionAccountingSpy) { - connectionAccountingSpy.addConnection(id, connection); - } -} - -// -// Connection handlers -function errorHandler(conn) { - return function(err) { - if (connectionAccounting) deleteConnection(conn.id); - // Debug information - if (conn.logger.isDebug()) { - conn.logger.debug( - `connection ${conn.id} for [${conn.address}] errored out with [${JSON.stringify(err)}]` - ); - } - - conn.emit('error', new MongoNetworkError(err), conn); - }; -} - -function timeoutHandler(conn) { - return function() { - if (connectionAccounting) deleteConnection(conn.id); - - if (conn.logger.isDebug()) { - conn.logger.debug(`connection ${conn.id} for [${conn.address}] timed out`); - } - - conn.timedOut = true; - conn.emit( - 'timeout', - new MongoNetworkError(`connection ${conn.id} to ${conn.address} timed out`), - conn - ); - }; -} - -function closeHandler(conn) { - return function(hadError) { - if (connectionAccounting) deleteConnection(conn.id); - - if (conn.logger.isDebug()) { - conn.logger.debug(`connection ${conn.id} with for [${conn.address}] closed`); - } - - if (!hadError) { - conn.emit( - 'close', - new MongoNetworkError(`connection ${conn.id} to ${conn.address} closed`), - conn - ); - } - }; -} - -// Handle a message once it is received -function processMessage(conn, message) { - const msgHeader = parseHeader(message); - if (msgHeader.opCode !== OP_COMPRESSED) { - const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response; - conn.emit( - 'message', - new ResponseConstructor( - conn.bson, - message, - msgHeader, - message.slice(MESSAGE_HEADER_SIZE), - conn.responseOptions - ), - conn - ); - - return; - } - - msgHeader.fromCompressed = true; - let index = MESSAGE_HEADER_SIZE; - msgHeader.opCode = message.readInt32LE(index); - index += 4; - msgHeader.length = message.readInt32LE(index); - index += 4; - const compressorID = message[index]; - index++; - - decompress(compressorID, message.slice(index), (err, decompressedMsgBody) => { - if (err) { - conn.emit('error', err); - return; - } - - if (decompressedMsgBody.length !== msgHeader.length) { - conn.emit( - 'error', - new MongoError( - 'Decompressing a compressed message from the server failed. The message is corrupt.' - ) - ); - - return; - } - - const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response; - conn.emit( - 'message', - new ResponseConstructor( - conn.bson, - message, - msgHeader, - decompressedMsgBody, - conn.responseOptions - ), - conn - ); - }); -} - -function dataHandler(conn) { - return function(data) { - // Parse until we are done with the data - while (data.length > 0) { - // If we still have bytes to read on the current message - if (conn.bytesRead > 0 && conn.sizeOfMessage > 0) { - // Calculate the amount of remaining bytes - const remainingBytesToRead = conn.sizeOfMessage - conn.bytesRead; - // Check if the current chunk contains the rest of the message - if (remainingBytesToRead > data.length) { - // Copy the new data into the exiting buffer (should have been allocated when we know the message size) - data.copy(conn.buffer, conn.bytesRead); - // Adjust the number of bytes read so it point to the correct index in the buffer - conn.bytesRead = conn.bytesRead + data.length; - - // Reset state of buffer - data = Buffer.alloc(0); - } else { - // Copy the missing part of the data into our current buffer - data.copy(conn.buffer, conn.bytesRead, 0, remainingBytesToRead); - // Slice the overflow into a new buffer that we will then re-parse - data = data.slice(remainingBytesToRead); - - // Emit current complete message - const emitBuffer = conn.buffer; - // Reset state of buffer - conn.buffer = null; - conn.sizeOfMessage = 0; - conn.bytesRead = 0; - conn.stubBuffer = null; - - processMessage(conn, emitBuffer); - } - } else { - // Stub buffer is kept in case we don't get enough bytes to determine the - // size of the message (< 4 bytes) - if (conn.stubBuffer != null && conn.stubBuffer.length > 0) { - // If we have enough bytes to determine the message size let's do it - if (conn.stubBuffer.length + data.length > 4) { - // Prepad the data - const newData = Buffer.alloc(conn.stubBuffer.length + data.length); - conn.stubBuffer.copy(newData, 0); - data.copy(newData, conn.stubBuffer.length); - // Reassign for parsing - data = newData; - - // Reset state of buffer - conn.buffer = null; - conn.sizeOfMessage = 0; - conn.bytesRead = 0; - conn.stubBuffer = null; - } else { - // Add the the bytes to the stub buffer - const newStubBuffer = Buffer.alloc(conn.stubBuffer.length + data.length); - // Copy existing stub buffer - conn.stubBuffer.copy(newStubBuffer, 0); - // Copy missing part of the data - data.copy(newStubBuffer, conn.stubBuffer.length); - // Exit parsing loop - data = Buffer.alloc(0); - } - } else { - if (data.length > 4) { - // Retrieve the message size - const sizeOfMessage = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); - // If we have a negative sizeOfMessage emit error and return - if (sizeOfMessage < 0 || sizeOfMessage > conn.maxBsonMessageSize) { - const errorObject = { - err: 'socketHandler', - trace: '', - bin: conn.buffer, - parseState: { - sizeOfMessage: sizeOfMessage, - bytesRead: conn.bytesRead, - stubBuffer: conn.stubBuffer - } - }; - // We got a parse Error fire it off then keep going - conn.emit('parseError', errorObject, conn); - return; - } - - // Ensure that the size of message is larger than 0 and less than the max allowed - if ( - sizeOfMessage > 4 && - sizeOfMessage < conn.maxBsonMessageSize && - sizeOfMessage > data.length - ) { - conn.buffer = Buffer.alloc(sizeOfMessage); - // Copy all the data into the buffer - data.copy(conn.buffer, 0); - // Update bytes read - conn.bytesRead = data.length; - // Update sizeOfMessage - conn.sizeOfMessage = sizeOfMessage; - // Ensure stub buffer is null - conn.stubBuffer = null; - // Exit parsing loop - data = Buffer.alloc(0); - } else if ( - sizeOfMessage > 4 && - sizeOfMessage < conn.maxBsonMessageSize && - sizeOfMessage === data.length - ) { - const emitBuffer = data; - // Reset state of buffer - conn.buffer = null; - conn.sizeOfMessage = 0; - conn.bytesRead = 0; - conn.stubBuffer = null; - // Exit parsing loop - data = Buffer.alloc(0); - // Emit the message - processMessage(conn, emitBuffer); - } else if (sizeOfMessage <= 4 || sizeOfMessage > conn.maxBsonMessageSize) { - const errorObject = { - err: 'socketHandler', - trace: null, - bin: data, - parseState: { - sizeOfMessage: sizeOfMessage, - bytesRead: 0, - buffer: null, - stubBuffer: null - } - }; - // We got a parse Error fire it off then keep going - conn.emit('parseError', errorObject, conn); - - // Clear out the state of the parser - conn.buffer = null; - conn.sizeOfMessage = 0; - conn.bytesRead = 0; - conn.stubBuffer = null; - // Exit parsing loop - data = Buffer.alloc(0); - } else { - const emitBuffer = data.slice(0, sizeOfMessage); - // Reset state of buffer - conn.buffer = null; - conn.sizeOfMessage = 0; - conn.bytesRead = 0; - conn.stubBuffer = null; - // Copy rest of message - data = data.slice(sizeOfMessage); - // Emit the message - processMessage(conn, emitBuffer); - } - } else { - // Create a buffer that contains the space for the non-complete message - conn.stubBuffer = Buffer.alloc(data.length); - // Copy the data to the stub buffer - data.copy(conn.stubBuffer, 0); - // Exit parsing loop - data = Buffer.alloc(0); - } - } - } - } - }; -} - -/** - * A server connect event, used to verify that the connection is up and running - * - * @event Connection#connect - * @type {Connection} - */ - -/** - * The server connection closed, all pool connections closed - * - * @event Connection#close - * @type {Connection} - */ - -/** - * The server connection caused an error, all pool connections closed - * - * @event Connection#error - * @type {Connection} - */ - -/** - * The server connection timed out, all pool connections closed - * - * @event Connection#timeout - * @type {Connection} - */ - -/** - * The driver experienced an invalid message, all pool connections closed - * - * @event Connection#parseError - * @type {Connection} - */ - -/** - * An event emitted each time the connection receives a parsed message from the wire - * - * @event Connection#message - * @type {Connection} - */ - -module.exports = Connection; diff --git a/lib/core/connection/msg.js b/lib/core/connection/msg.js deleted file mode 100644 index 9f15a81114c..00000000000 --- a/lib/core/connection/msg.js +++ /dev/null @@ -1,222 +0,0 @@ -'use strict'; - -// Implementation of OP_MSG spec: -// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst -// -// struct Section { -// uint8 payloadType; -// union payload { -// document document; // payloadType == 0 -// struct sequence { // payloadType == 1 -// int32 size; -// cstring identifier; -// document* documents; -// }; -// }; -// }; - -// struct OP_MSG { -// struct MsgHeader { -// int32 messageLength; -// int32 requestID; -// int32 responseTo; -// int32 opCode = 2013; -// }; -// uint32 flagBits; -// Section+ sections; -// [uint32 checksum;] -// }; - -const Buffer = require('safe-buffer').Buffer; -const opcodes = require('../wireprotocol/shared').opcodes; -const databaseNamespace = require('../wireprotocol/shared').databaseNamespace; -const ReadPreference = require('../topologies/read_preference'); - -// Incrementing request id -let _requestId = 0; - -// Msg Flags -const OPTS_CHECKSUM_PRESENT = 1; -const OPTS_MORE_TO_COME = 2; -const OPTS_EXHAUST_ALLOWED = 1 << 16; - -class Msg { - constructor(bson, ns, command, options) { - // Basic options needed to be passed in - if (command == null) throw new Error('query must be specified for query'); - - // Basic options - this.bson = bson; - this.ns = ns; - this.command = command; - this.command.$db = databaseNamespace(ns); - - if (options.readPreference && options.readPreference.mode !== ReadPreference.PRIMARY) { - this.command.$readPreference = options.readPreference.toJSON(); - } - - // Ensure empty options - this.options = options || {}; - - // Additional options - this.requestId = options.requestId ? options.requestId : Msg.getRequestId(); - - // Serialization option - this.serializeFunctions = - typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; - this.ignoreUndefined = - typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; - this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false; - this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; - - // flags - this.checksumPresent = false; - this.moreToCome = options.moreToCome || false; - this.exhaustAllowed = - typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false; - } - - toBin() { - const buffers = []; - let flags = 0; - - if (this.checksumPresent) { - flags |= OPTS_CHECKSUM_PRESENT; - } - - if (this.moreToCome) { - flags |= OPTS_MORE_TO_COME; - } - - if (this.exhaustAllowed) { - flags |= OPTS_EXHAUST_ALLOWED; - } - - const header = Buffer.alloc( - 4 * 4 + // Header - 4 // Flags - ); - - buffers.push(header); - - let totalLength = header.length; - const command = this.command; - totalLength += this.makeDocumentSegment(buffers, command); - - header.writeInt32LE(totalLength, 0); // messageLength - header.writeInt32LE(this.requestId, 4); // requestID - header.writeInt32LE(0, 8); // responseTo - header.writeInt32LE(opcodes.OP_MSG, 12); // opCode - header.writeUInt32LE(flags, 16); // flags - return buffers; - } - - makeDocumentSegment(buffers, document) { - const payloadTypeBuffer = Buffer.alloc(1); - payloadTypeBuffer[0] = 0; - - const documentBuffer = this.serializeBson(document); - buffers.push(payloadTypeBuffer); - buffers.push(documentBuffer); - - return payloadTypeBuffer.length + documentBuffer.length; - } - - serializeBson(document) { - return this.bson.serialize(document, { - checkKeys: this.checkKeys, - serializeFunctions: this.serializeFunctions, - ignoreUndefined: this.ignoreUndefined - }); - } -} - -Msg.getRequestId = function() { - _requestId = (_requestId + 1) & 0x7fffffff; - return _requestId; -}; - -class BinMsg { - constructor(bson, message, msgHeader, msgBody, opts) { - opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; - this.parsed = false; - this.raw = message; - this.data = msgBody; - this.bson = bson; - this.opts = opts; - - // Read the message header - this.length = msgHeader.length; - this.requestId = msgHeader.requestId; - this.responseTo = msgHeader.responseTo; - this.opCode = msgHeader.opCode; - this.fromCompressed = msgHeader.fromCompressed; - - // Read response flags - this.responseFlags = msgBody.readInt32LE(0); - this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0; - this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0; - this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0; - this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; - this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; - this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; - - this.documents = []; - } - - isParsed() { - return this.parsed; - } - - parse(options) { - // Don't parse again if not needed - if (this.parsed) return; - options = options || {}; - - this.index = 4; - // Allow the return of raw documents instead of parsing - const raw = options.raw || false; - const documentsReturnedIn = options.documentsReturnedIn || null; - const promoteLongs = - typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; - const promoteValues = - typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; - const promoteBuffers = - typeof options.promoteBuffers === 'boolean' - ? options.promoteBuffers - : this.opts.promoteBuffers; - - // Set up the options - const _options = { - promoteLongs: promoteLongs, - promoteValues: promoteValues, - promoteBuffers: promoteBuffers - }; - - while (this.index < this.data.length) { - const payloadType = this.data.readUInt8(this.index++); - if (payloadType === 1) { - console.error('TYPE 1'); - } else if (payloadType === 0) { - const bsonSize = this.data.readUInt32LE(this.index); - const bin = this.data.slice(this.index, this.index + bsonSize); - this.documents.push(raw ? bin : this.bson.deserialize(bin, _options)); - - this.index += bsonSize; - } - } - - if (this.documents.length === 1 && documentsReturnedIn != null && raw) { - const fieldsAsRaw = {}; - fieldsAsRaw[documentsReturnedIn] = true; - _options.fieldsAsRaw = fieldsAsRaw; - - const doc = this.bson.deserialize(this.documents[0], _options); - this.documents = [doc]; - } - - this.parsed = true; - } -} - -module.exports = { Msg, BinMsg }; diff --git a/lib/core/connection/pool.js b/lib/core/connection/pool.js deleted file mode 100644 index 56d427e99c0..00000000000 --- a/lib/core/connection/pool.js +++ /dev/null @@ -1,1280 +0,0 @@ -'use strict'; - -const inherits = require('util').inherits; -const EventEmitter = require('events').EventEmitter; -const MongoError = require('../error').MongoError; -const MongoTimeoutError = require('../error').MongoTimeoutError; -const MongoWriteConcernError = require('../error').MongoWriteConcernError; -const Logger = require('./logger'); -const f = require('util').format; -const Msg = require('./msg').Msg; -const CommandResult = require('./command_result'); -const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; -const COMPRESSION_DETAILS_SIZE = require('../wireprotocol/shared').COMPRESSION_DETAILS_SIZE; -const opcodes = require('../wireprotocol/shared').opcodes; -const compress = require('../wireprotocol/compression').compress; -const compressorIDs = require('../wireprotocol/compression').compressorIDs; -const uncompressibleCommands = require('../wireprotocol/compression').uncompressibleCommands; -const apm = require('./apm'); -const Buffer = require('safe-buffer').Buffer; -const connect = require('./connect'); -const updateSessionFromResponse = require('../sessions').updateSessionFromResponse; -const eachAsync = require('../utils').eachAsync; -const makeStateMachine = require('../utils').makeStateMachine; - -const DISCONNECTED = 'disconnected'; -const CONNECTING = 'connecting'; -const CONNECTED = 'connected'; -const DRAINING = 'draining'; -const DESTROYING = 'destroying'; -const DESTROYED = 'destroyed'; -const stateTransition = makeStateMachine({ - [DISCONNECTED]: [CONNECTING, DRAINING, DISCONNECTED], - [CONNECTING]: [CONNECTING, CONNECTED, DRAINING, DISCONNECTED], - [CONNECTED]: [CONNECTED, DISCONNECTED, DRAINING], - [DRAINING]: [DRAINING, DESTROYING, DESTROYED], - [DESTROYING]: [DESTROYING, DESTROYED], - [DESTROYED]: [DESTROYED] -}); - -const CONNECTION_EVENTS = new Set([ - 'error', - 'close', - 'timeout', - 'parseError', - 'connect', - 'message' -]); - -var _id = 0; - -/** - * Creates a new Pool instance - * @class - * @param {string} options.host The server host - * @param {number} options.port The server port - * @param {number} [options.size=5] Max server connection pool size - * @param {number} [options.minSize=0] Minimum server connection pool size - * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection - * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times - * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting - * @param {number} [options.monitoringSocketTimeout=30000] TCP Socket timeout setting for replicaset monitoring socket - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @fires Pool#connect - * @fires Pool#close - * @fires Pool#error - * @fires Pool#timeout - * @fires Pool#parseError - * @return {Pool} A cursor instance - */ -var Pool = function(topology, options) { - // Add event listener - EventEmitter.call(this); - - // Store topology for later use - this.topology = topology; - - this.s = { - state: DISCONNECTED, - cancellationToken: new EventEmitter() - }; - - // we don't care how many connections are listening for cancellation - this.s.cancellationToken.setMaxListeners(Infinity); - - // Add the options - this.options = Object.assign( - { - // Host and port settings - host: 'localhost', - port: 27017, - // Pool default max size - size: 5, - // Pool default min size - minSize: 0, - // socket settings - connectionTimeout: 30000, - socketTimeout: 360000, - keepAlive: true, - keepAliveInitialDelay: 300000, - noDelay: true, - // SSL Settings - ssl: false, - checkServerIdentity: true, - ca: null, - crl: null, - cert: null, - key: null, - passphrase: null, - rejectUnauthorized: false, - promoteLongs: true, - promoteValues: true, - promoteBuffers: false, - // Reconnection options - reconnect: true, - reconnectInterval: 1000, - reconnectTries: 30, - // Enable domains - domainsEnabled: false, - // feature flag for determining if we are running with the unified topology or not - legacyCompatMode: true - }, - options - ); - - // Identification information - this.id = _id++; - // Current reconnect retries - this.retriesLeft = this.options.reconnectTries; - this.reconnectId = null; - this.reconnectError = null; - // No bson parser passed in - if ( - !options.bson || - (options.bson && - (typeof options.bson.serialize !== 'function' || - typeof options.bson.deserialize !== 'function')) - ) { - throw new Error('must pass in valid bson parser'); - } - - // Logger instance - this.logger = Logger('Pool', options); - // Connections - this.availableConnections = []; - this.inUseConnections = []; - this.connectingConnections = 0; - // Currently executing - this.executing = false; - // Operation work queue - this.queue = []; - - // Number of consecutive timeouts caught - this.numberOfConsecutiveTimeouts = 0; - // Current pool Index - this.connectionIndex = 0; - - // event handlers - const pool = this; - this._messageHandler = messageHandler(this); - this._connectionCloseHandler = function(err) { - const connection = this; - connectionFailureHandler(pool, 'close', err, connection); - }; - - this._connectionErrorHandler = function(err) { - const connection = this; - connectionFailureHandler(pool, 'error', err, connection); - }; - - this._connectionTimeoutHandler = function(err) { - const connection = this; - connectionFailureHandler(pool, 'timeout', err, connection); - }; - - this._connectionParseErrorHandler = function(err) { - const connection = this; - connectionFailureHandler(pool, 'parseError', err, connection); - }; -}; - -inherits(Pool, EventEmitter); - -Object.defineProperty(Pool.prototype, 'size', { - enumerable: true, - get: function() { - return this.options.size; - } -}); - -Object.defineProperty(Pool.prototype, 'minSize', { - enumerable: true, - get: function() { - return this.options.minSize; - } -}); - -Object.defineProperty(Pool.prototype, 'connectionTimeout', { - enumerable: true, - get: function() { - return this.options.connectionTimeout; - } -}); - -Object.defineProperty(Pool.prototype, 'socketTimeout', { - enumerable: true, - get: function() { - return this.options.socketTimeout; - } -}); - -Object.defineProperty(Pool.prototype, 'state', { - enumerable: true, - get: function() { - return this.s.state; - } -}); - -// clears all pool state -function resetPoolState(pool) { - pool.inUseConnections = []; - pool.availableConnections = []; - pool.connectingConnections = 0; - pool.executing = false; - pool.numberOfConsecutiveTimeouts = 0; - pool.connectionIndex = 0; - pool.retriesLeft = pool.options.reconnectTries; - pool.reconnectId = null; -} - -function connectionFailureHandler(pool, event, err, conn) { - if (conn) { - if (conn._connectionFailHandled) { - return; - } - - conn._connectionFailHandled = true; - conn.destroy(); - - // Remove the connection - removeConnection(pool, conn); - - // flush remaining work items - conn.flush(err); - } - - // Did we catch a timeout, increment the numberOfConsecutiveTimeouts - if (event === 'timeout') { - pool.numberOfConsecutiveTimeouts = pool.numberOfConsecutiveTimeouts + 1; - - // Have we timed out more than reconnectTries in a row ? - // Force close the pool as we are trying to connect to tcp sink hole - if (pool.numberOfConsecutiveTimeouts > pool.options.reconnectTries) { - pool.numberOfConsecutiveTimeouts = 0; - // Destroy all connections and pool - pool.destroy(true); - // Emit close event - return pool.emit('close', pool); - } - } - - // No more socket available propegate the event - if (pool.socketCount() === 0) { - if (pool.state !== DESTROYED && pool.state !== DESTROYING && pool.state !== DRAINING) { - if (pool.options.reconnect) { - stateTransition(pool, DISCONNECTED); - } - } - - // Do not emit error events, they are always close events - // do not trigger the low level error handler in node - event = event === 'error' ? 'close' : event; - pool.emit(event, err); - } - - // Start reconnection attempts - if (!pool.reconnectId && pool.options.reconnect) { - pool.reconnectError = err; - pool.reconnectId = setTimeout(attemptReconnect(pool), pool.options.reconnectInterval); - } - - // Do we need to do anything to maintain the minimum pool size - const totalConnections = totalConnectionCount(pool); - if (totalConnections < pool.minSize) { - createConnection(pool); - } -} - -function attemptReconnect(pool, callback) { - return function() { - pool.emit('attemptReconnect', pool); - - if (pool.state === DESTROYED || pool.state === DESTROYING) { - if (typeof callback === 'function') { - callback(new MongoError('Cannot create connection when pool is destroyed')); - } - - return; - } - - pool.retriesLeft = pool.retriesLeft - 1; - if (pool.retriesLeft <= 0) { - pool.destroy(); - - const error = new MongoTimeoutError( - `failed to reconnect after ${pool.options.reconnectTries} attempts with interval ${pool.options.reconnectInterval} ms`, - pool.reconnectError - ); - - pool.emit('reconnectFailed', error); - if (typeof callback === 'function') { - callback(error); - } - - return; - } - - // clear the reconnect id on retry - pool.reconnectId = null; - - // now retry creating a connection - createConnection(pool, (err, conn) => { - if (err == null) { - pool.reconnectId = null; - pool.retriesLeft = pool.options.reconnectTries; - pool.emit('reconnect', pool); - } - - if (typeof callback === 'function') { - callback(err, conn); - } - }); - }; -} - -function moveConnectionBetween(connection, from, to) { - var index = from.indexOf(connection); - // Move the connection from connecting to available - if (index !== -1) { - from.splice(index, 1); - to.push(connection); - } -} - -function messageHandler(self) { - return function(message, connection) { - // workItem to execute - var workItem = null; - - // Locate the workItem - for (var i = 0; i < connection.workItems.length; i++) { - if (connection.workItems[i].requestId === message.responseTo) { - // Get the callback - workItem = connection.workItems[i]; - // Remove from list of workItems - connection.workItems.splice(i, 1); - } - } - - if (workItem && workItem.monitoring) { - moveConnectionBetween(connection, self.inUseConnections, self.availableConnections); - } - - // Reset timeout counter - self.numberOfConsecutiveTimeouts = 0; - - // Reset the connection timeout if we modified it for - // this operation - if (workItem && workItem.socketTimeout) { - connection.resetSocketTimeout(); - } - - // Log if debug enabled - if (self.logger.isDebug()) { - self.logger.debug( - f( - 'message [%s] received from %s:%s', - message.raw.toString('hex'), - self.options.host, - self.options.port - ) - ); - } - - function handleOperationCallback(self, cb, err, result) { - // No domain enabled - if (!self.options.domainsEnabled) { - return process.nextTick(function() { - return cb(err, result); - }); - } - - // Domain enabled just call the callback - cb(err, result); - } - - // Keep executing, ensure current message handler does not stop execution - if (!self.executing) { - process.nextTick(function() { - _execute(self)(); - }); - } - - // Time to dispatch the message if we have a callback - if (workItem && !workItem.immediateRelease) { - try { - // Parse the message according to the provided options - message.parse(workItem); - } catch (err) { - return handleOperationCallback(self, workItem.cb, new MongoError(err)); - } - - if (message.documents[0]) { - const document = message.documents[0]; - const session = workItem.session; - if (session) { - updateSessionFromResponse(session, document); - } - - if (self.topology && document.$clusterTime) { - self.topology.clusterTime = document.$clusterTime; - } - } - - // Establish if we have an error - if (workItem.command && message.documents[0]) { - const responseDoc = message.documents[0]; - - if (responseDoc.writeConcernError) { - const err = new MongoWriteConcernError(responseDoc.writeConcernError, responseDoc); - return handleOperationCallback(self, workItem.cb, err); - } - - if (responseDoc.ok === 0 || responseDoc.$err || responseDoc.errmsg || responseDoc.code) { - return handleOperationCallback(self, workItem.cb, new MongoError(responseDoc)); - } - } - - // Add the connection details - message.hashedName = connection.hashedName; - - // Return the documents - handleOperationCallback( - self, - workItem.cb, - null, - new CommandResult(workItem.fullResult ? message : message.documents[0], connection, message) - ); - } - }; -} - -/** - * Return the total socket count in the pool. - * @method - * @return {Number} The number of socket available. - */ -Pool.prototype.socketCount = function() { - return this.availableConnections.length + this.inUseConnections.length; - // + this.connectingConnections.length; -}; - -function totalConnectionCount(pool) { - return ( - pool.availableConnections.length + pool.inUseConnections.length + pool.connectingConnections - ); -} - -/** - * Return all pool connections - * @method - * @return {Connection[]} The pool connections - */ -Pool.prototype.allConnections = function() { - return this.availableConnections.concat(this.inUseConnections); -}; - -/** - * Get a pool connection (round-robin) - * @method - * @return {Connection} - */ -Pool.prototype.get = function() { - return this.allConnections()[0]; -}; - -/** - * Is the pool connected - * @method - * @return {boolean} - */ -Pool.prototype.isConnected = function() { - // We are in a destroyed state - if (this.state === DESTROYED || this.state === DESTROYING) { - return false; - } - - // Get connections - var connections = this.availableConnections.concat(this.inUseConnections); - - // Check if we have any connected connections - for (var i = 0; i < connections.length; i++) { - if (connections[i].isConnected()) return true; - } - - // Not connected - return false; -}; - -/** - * Was the pool destroyed - * @method - * @return {boolean} - */ -Pool.prototype.isDestroyed = function() { - return this.state === DESTROYED || this.state === DESTROYING; -}; - -/** - * Is the pool in a disconnected state - * @method - * @return {boolean} - */ -Pool.prototype.isDisconnected = function() { - return this.state === DISCONNECTED; -}; - -/** - * Connect pool - */ -Pool.prototype.connect = function(callback) { - if (this.state !== DISCONNECTED) { - throw new MongoError('connection in unlawful state ' + this.state); - } - - stateTransition(this, CONNECTING); - createConnection(this, (err, conn) => { - if (err) { - if (typeof callback === 'function') { - this.destroy(); - callback(err); - return; - } - - if (this.state === CONNECTING) { - this.emit('error', err); - } - - this.destroy(); - return; - } - - stateTransition(this, CONNECTED); - - // create min connections - if (this.minSize) { - for (let i = 0; i < this.minSize; i++) { - createConnection(this); - } - } - - if (typeof callback === 'function') { - callback(null, conn); - } else { - this.emit('connect', this, conn); - } - }); -}; - -/** - * Authenticate using a specified mechanism - * @param {authResultCallback} callback A callback function - */ -Pool.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Logout all users against a database - * @param {authResultCallback} callback A callback function - */ -Pool.prototype.logout = function(dbName, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Unref the pool - * @method - */ -Pool.prototype.unref = function() { - // Get all the known connections - var connections = this.availableConnections.concat(this.inUseConnections); - - connections.forEach(function(c) { - c.unref(); - }); -}; - -// Destroy the connections -function destroy(self, connections, options, callback) { - stateTransition(self, DESTROYING); - - // indicate that in-flight connections should cancel - self.s.cancellationToken.emit('cancel'); - - eachAsync( - connections, - (conn, cb) => { - for (const eventName of CONNECTION_EVENTS) { - conn.removeAllListeners(eventName); - } - - // ignore any errors during destruction - conn.on('error', () => {}); - - conn.destroy(options, cb); - }, - err => { - if (err) { - if (typeof callback === 'function') callback(err, null); - return; - } - - resetPoolState(self); - self.queue = []; - - stateTransition(self, DESTROYED); - if (typeof callback === 'function') callback(null, null); - } - ); -} - -/** - * Destroy pool - * @method - */ -Pool.prototype.destroy = function(force, callback) { - var self = this; - if (typeof force === 'function') { - callback = force; - force = false; - } - - // Do not try again if the pool is already dead - if (this.state === DESTROYED || self.state === DESTROYING) { - if (typeof callback === 'function') callback(null, null); - return; - } - - // Set state to draining - stateTransition(this, DRAINING); - - // Are we force closing - if (force) { - // Get all the known connections - var connections = self.availableConnections.concat(self.inUseConnections); - - // Flush any remaining work items with - // an error - while (self.queue.length > 0) { - var workItem = self.queue.shift(); - if (typeof workItem.cb === 'function') { - workItem.cb(new MongoError('Pool was force destroyed')); - } - } - - // Destroy the topology - return destroy(self, connections, { force: true }, callback); - } - - // Clear out the reconnect if set - if (this.reconnectId) { - clearTimeout(this.reconnectId); - } - - // Wait for the operations to drain before we close the pool - function checkStatus() { - if (self.state === DESTROYED || self.state === DESTROYING) { - if (typeof callback === 'function') { - callback(); - } - - return; - } - - flushMonitoringOperations(self.queue); - - if (self.queue.length === 0) { - // Get all the known connections - var connections = self.availableConnections.concat(self.inUseConnections); - - // Check if we have any in flight operations - for (var i = 0; i < connections.length; i++) { - // There is an operation still in flight, reschedule a - // check waiting for it to drain - if (connections[i].workItems.length > 0) { - return setTimeout(checkStatus, 1); - } - } - - destroy(self, connections, { force: false }, callback); - } else { - // Ensure we empty the queue - _execute(self)(); - // Set timeout - setTimeout(checkStatus, 1); - } - } - - // Initiate drain of operations - checkStatus(); -}; - -/** - * Reset all connections of this pool - * - * @param {function} [callback] - */ -Pool.prototype.reset = function(callback) { - if (this.s.state !== CONNECTED) { - if (typeof callback === 'function') { - callback(new MongoError('pool is not connected, reset aborted')); - } - - return; - } - - // signal in-flight connections should be cancelled - this.s.cancellationToken.emit('cancel'); - - // destroy existing connections - const connections = this.availableConnections.concat(this.inUseConnections); - eachAsync( - connections, - (conn, cb) => { - for (const eventName of CONNECTION_EVENTS) { - conn.removeAllListeners(eventName); - } - - conn.destroy({ force: true }, cb); - }, - err => { - if (err) { - if (typeof callback === 'function') { - callback(err, null); - return; - } - } - - resetPoolState(this); - - // create a new connection, this will ultimately trigger execution - createConnection(this, () => { - if (typeof callback === 'function') { - callback(null, null); - } - }); - } - ); -}; - -// Prepare the buffer that Pool.prototype.write() uses to send to the server -function serializeCommand(self, command, callback) { - const originalCommandBuffer = command.toBin(); - - // Check whether we and the server have agreed to use a compressor - const shouldCompress = !!self.options.agreedCompressor; - if (!shouldCompress || !canCompress(command)) { - return callback(null, originalCommandBuffer); - } - - // Transform originalCommandBuffer into OP_COMPRESSED - const concatenatedOriginalCommandBuffer = Buffer.concat(originalCommandBuffer); - const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE); - - // Extract information needed for OP_COMPRESSED from the uncompressed message - const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12); - - // Compress the message body - compress(self, messageToBeCompressed, function(err, compressedMessage) { - if (err) return callback(err, null); - - // Create the msgHeader of OP_COMPRESSED - const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE); - msgHeader.writeInt32LE( - MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length, - 0 - ); // messageLength - msgHeader.writeInt32LE(command.requestId, 4); // requestID - msgHeader.writeInt32LE(0, 8); // responseTo (zero) - msgHeader.writeInt32LE(opcodes.OP_COMPRESSED, 12); // opCode - - // Create the compression details of OP_COMPRESSED - const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE); - compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode - compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader - compressionDetails.writeUInt8(compressorIDs[self.options.agreedCompressor], 8); // compressorID - - return callback(null, [msgHeader, compressionDetails, compressedMessage]); - }); -} - -/** - * Write a message to MongoDB - * @method - * @return {Connection} - */ -Pool.prototype.write = function(command, options, cb) { - var self = this; - // Ensure we have a callback - if (typeof options === 'function') { - cb = options; - } - - // Always have options - options = options || {}; - - // We need to have a callback function unless the message returns no response - if (!(typeof cb === 'function') && !options.noResponse) { - throw new MongoError('write method must provide a callback'); - } - - // Pool was destroyed error out - if (this.state === DESTROYED || this.state === DESTROYING) { - cb(new MongoError('pool destroyed')); - return; - } - - if (this.state === DRAINING) { - cb(new MongoError('pool is draining, new operations prohibited')); - return; - } - - if (this.options.domainsEnabled && process.domain && typeof cb === 'function') { - // if we have a domain bind to it - var oldCb = cb; - cb = process.domain.bind(function() { - // v8 - argumentsToArray one-liner - var args = new Array(arguments.length); - for (var i = 0; i < arguments.length; i++) { - args[i] = arguments[i]; - } - // bounce off event loop so domain switch takes place - process.nextTick(function() { - oldCb.apply(null, args); - }); - }); - } - - // Do we have an operation - var operation = { - cb: cb, - raw: false, - promoteLongs: true, - promoteValues: true, - promoteBuffers: false, - fullResult: false - }; - - // Set the options for the parsing - operation.promoteLongs = typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true; - operation.promoteValues = - typeof options.promoteValues === 'boolean' ? options.promoteValues : true; - operation.promoteBuffers = - typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false; - operation.raw = typeof options.raw === 'boolean' ? options.raw : false; - operation.immediateRelease = - typeof options.immediateRelease === 'boolean' ? options.immediateRelease : false; - operation.documentsReturnedIn = options.documentsReturnedIn; - operation.command = typeof options.command === 'boolean' ? options.command : false; - operation.fullResult = typeof options.fullResult === 'boolean' ? options.fullResult : false; - operation.noResponse = typeof options.noResponse === 'boolean' ? options.noResponse : false; - operation.session = options.session || null; - - // Optional per operation socketTimeout - operation.socketTimeout = options.socketTimeout; - operation.monitoring = options.monitoring; - - // Get the requestId - operation.requestId = command.requestId; - - // If command monitoring is enabled we need to modify the callback here - if (self.options.monitorCommands) { - this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); - - operation.started = process.hrtime(); - operation.cb = (err, reply) => { - if (err) { - self.emit( - 'commandFailed', - new apm.CommandFailedEvent(this, command, err, operation.started) - ); - } else { - if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { - self.emit( - 'commandFailed', - new apm.CommandFailedEvent(this, command, reply.result, operation.started) - ); - } else { - self.emit( - 'commandSucceeded', - new apm.CommandSucceededEvent(this, command, reply, operation.started) - ); - } - } - - if (typeof cb === 'function') cb(err, reply); - }; - } - - // Prepare the operation buffer - serializeCommand(self, command, (err, serializedBuffers) => { - if (err) throw err; - - // Set the operation's buffer to the serialization of the commands - operation.buffer = serializedBuffers; - - // If we have a monitoring operation schedule as the very first operation - // Otherwise add to back of queue - if (options.monitoring) { - self.queue.unshift(operation); - } else { - self.queue.push(operation); - } - - // Attempt to execute the operation - if (!self.executing) { - process.nextTick(function() { - _execute(self)(); - }); - } - }); -}; - -// Return whether a command contains an uncompressible command term -// Will return true if command contains no uncompressible command terms -function canCompress(command) { - const commandDoc = command instanceof Msg ? command.command : command.query; - const commandName = Object.keys(commandDoc)[0]; - return !uncompressibleCommands.has(commandName); -} - -// Remove connection method -function remove(connection, connections) { - for (var i = 0; i < connections.length; i++) { - if (connections[i] === connection) { - connections.splice(i, 1); - return true; - } - } -} - -function removeConnection(self, connection) { - if (remove(connection, self.availableConnections)) return; - if (remove(connection, self.inUseConnections)) return; -} - -function createConnection(pool, callback) { - if (pool.state === DESTROYED || pool.state === DESTROYING) { - if (typeof callback === 'function') { - callback(new MongoError('Cannot create connection when pool is destroyed')); - } - - return; - } - - pool.connectingConnections++; - connect(pool.options, pool.s.cancellationToken, (err, connection) => { - pool.connectingConnections--; - - if (err) { - if (pool.logger.isDebug()) { - pool.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); - } - - // check if reconnect is enabled, and attempt retry if so - if (!pool.reconnectId && pool.options.reconnect) { - if (pool.state === CONNECTING && pool.options.legacyCompatMode) { - callback(err); - return; - } - - pool.reconnectError = err; - pool.reconnectId = setTimeout( - attemptReconnect(pool, callback), - pool.options.reconnectInterval - ); - - return; - } - - if (typeof callback === 'function') { - callback(err); - } - - return; - } - - // the pool might have been closed since we started creating the connection - if (pool.state === DESTROYED || pool.state === DESTROYING) { - if (typeof callback === 'function') { - callback(new MongoError('Pool was destroyed after connection creation')); - } - - connection.destroy(); - return; - } - - // otherwise, connect relevant event handlers and add it to our available connections - connection.on('error', pool._connectionErrorHandler); - connection.on('close', pool._connectionCloseHandler); - connection.on('timeout', pool._connectionTimeoutHandler); - connection.on('parseError', pool._connectionParseErrorHandler); - connection.on('message', pool._messageHandler); - - pool.availableConnections.push(connection); - - // if a callback was provided, return the connection - if (typeof callback === 'function') { - callback(null, connection); - } - - // immediately execute any waiting work - _execute(pool)(); - }); -} - -function flushMonitoringOperations(queue) { - for (var i = 0; i < queue.length; i++) { - if (queue[i].monitoring) { - var workItem = queue[i]; - queue.splice(i, 1); - workItem.cb( - new MongoError({ message: 'no connection available for monitoring', driver: true }) - ); - } - } -} - -function _execute(self) { - return function() { - if (self.state === DESTROYED) return; - // Already executing, skip - if (self.executing) return; - // Set pool as executing - self.executing = true; - - // New pool connections are in progress, wait them to finish - // before executing any more operation to ensure distribution of - // operations - if (self.connectingConnections > 0) { - self.executing = false; - return; - } - - // As long as we have available connections - // eslint-disable-next-line - while (true) { - // Total availble connections - const totalConnections = totalConnectionCount(self); - - // No available connections available, flush any monitoring ops - if (self.availableConnections.length === 0) { - // Flush any monitoring operations - flushMonitoringOperations(self.queue); - - // Try to create a new connection to execute stuck operation - if (totalConnections < self.options.size && self.queue.length > 0) { - createConnection(self); - } - - break; - } - - // No queue break - if (self.queue.length === 0) { - break; - } - - var connection = null; - const connections = self.availableConnections.filter(conn => conn.workItems.length === 0); - - // No connection found that has no work on it, just pick one for pipelining - if (connections.length === 0) { - connection = - self.availableConnections[self.connectionIndex++ % self.availableConnections.length]; - } else { - connection = connections[self.connectionIndex++ % connections.length]; - } - - // Is the connection connected - if (!connection.isConnected()) { - // Remove the disconnected connection - removeConnection(self, connection); - // Flush any monitoring operations in the queue, failing fast - flushMonitoringOperations(self.queue); - break; - } - - // Get the next work item - var workItem = self.queue.shift(); - - // If we are monitoring we need to use a connection that is not - // running another operation to avoid socket timeout changes - // affecting an existing operation - if (workItem.monitoring) { - var foundValidConnection = false; - - for (let i = 0; i < self.availableConnections.length; i++) { - // If the connection is connected - // And there are no pending workItems on it - // Then we can safely use it for monitoring. - if ( - self.availableConnections[i].isConnected() && - self.availableConnections[i].workItems.length === 0 - ) { - foundValidConnection = true; - connection = self.availableConnections[i]; - break; - } - } - - // No safe connection found, attempt to grow the connections - // if possible and break from the loop - if (!foundValidConnection) { - // Put workItem back on the queue - self.queue.unshift(workItem); - - // Attempt to grow the pool if it's not yet maxsize - if (totalConnections < self.options.size && self.queue.length > 0) { - // Create a new connection - createConnection(self); - } - - // Re-execute the operation - setTimeout(() => _execute(self)(), 10); - break; - } - } - - // Don't execute operation until we have a full pool - if (totalConnections < self.options.size) { - // Connection has work items, then put it back on the queue - // and create a new connection - if (connection.workItems.length > 0) { - // Lets put the workItem back on the list - self.queue.unshift(workItem); - // Create a new connection - createConnection(self); - // Break from the loop - break; - } - } - - // Get actual binary commands - var buffer = workItem.buffer; - - // If we are monitoring take the connection of the availableConnections - if (workItem.monitoring) { - moveConnectionBetween(connection, self.availableConnections, self.inUseConnections); - } - - // Track the executing commands on the mongo server - // as long as there is an expected response - if (!workItem.noResponse) { - connection.workItems.push(workItem); - } - - // We have a custom socketTimeout - if (!workItem.immediateRelease && typeof workItem.socketTimeout === 'number') { - connection.setSocketTimeout(workItem.socketTimeout); - } - - // Capture if write was successful - var writeSuccessful = true; - - // Put operation on the wire - if (Array.isArray(buffer)) { - for (let i = 0; i < buffer.length; i++) { - writeSuccessful = connection.write(buffer[i]); - } - } else { - writeSuccessful = connection.write(buffer); - } - - // if the command is designated noResponse, call the callback immeditely - if (workItem.noResponse && typeof workItem.cb === 'function') { - workItem.cb(null, null); - } - - if (writeSuccessful === false) { - // If write not successful put back on queue - self.queue.unshift(workItem); - // Remove the disconnected connection - removeConnection(self, connection); - // Flush any monitoring operations in the queue, failing fast - flushMonitoringOperations(self.queue); - break; - } - } - - self.executing = false; - }; -} - -// Make execution loop available for testing -Pool._execute = _execute; - -/** - * A server connect event, used to verify that the connection is up and running - * - * @event Pool#connect - * @type {Pool} - */ - -/** - * A server reconnect event, used to verify that pool reconnected. - * - * @event Pool#reconnect - * @type {Pool} - */ - -/** - * The server connection closed, all pool connections closed - * - * @event Pool#close - * @type {Pool} - */ - -/** - * The server connection caused an error, all pool connections closed - * - * @event Pool#error - * @type {Pool} - */ - -/** - * The server connection timed out, all pool connections closed - * - * @event Pool#timeout - * @type {Pool} - */ - -/** - * The driver experienced an invalid message, all pool connections closed - * - * @event Pool#parseError - * @type {Pool} - */ - -/** - * The driver attempted to reconnect - * - * @event Pool#attemptReconnect - * @type {Pool} - */ - -/** - * The driver exhausted all reconnect attempts - * - * @event Pool#reconnectFailed - * @type {Pool} - */ - -module.exports = Pool; diff --git a/lib/core/connection/utils.js b/lib/core/connection/utils.js deleted file mode 100644 index 2f3d889f431..00000000000 --- a/lib/core/connection/utils.js +++ /dev/null @@ -1,57 +0,0 @@ -'use strict'; - -const require_optional = require('require_optional'); - -function debugOptions(debugFields, options) { - var finaloptions = {}; - debugFields.forEach(function(n) { - finaloptions[n] = options[n]; - }); - - return finaloptions; -} - -function retrieveBSON() { - var BSON = require('bson'); - BSON.native = false; - - try { - var optionalBSON = require_optional('bson-ext'); - if (optionalBSON) { - optionalBSON.native = true; - return optionalBSON; - } - } catch (err) {} // eslint-disable-line - - return BSON; -} - -// Throw an error if an attempt to use Snappy is made when Snappy is not installed -function noSnappyWarning() { - throw new Error( - 'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.' - ); -} - -// Facilitate loading Snappy optionally -function retrieveSnappy() { - var snappy = null; - try { - snappy = require_optional('snappy'); - } catch (error) {} // eslint-disable-line - if (!snappy) { - snappy = { - compress: noSnappyWarning, - uncompress: noSnappyWarning, - compressSync: noSnappyWarning, - uncompressSync: noSnappyWarning - }; - } - return snappy; -} - -module.exports = { - debugOptions, - retrieveBSON, - retrieveSnappy -}; diff --git a/lib/core/error.js b/lib/core/error.js deleted file mode 100644 index 38fbb379a3a..00000000000 --- a/lib/core/error.js +++ /dev/null @@ -1,270 +0,0 @@ -'use strict'; - -const mongoErrorContextSymbol = Symbol('mongoErrorContextSymbol'); - -/** - * Creates a new MongoError - * - * @augments Error - * @param {Error|string|object} message The error message - * @property {string} message The error message - * @property {string} stack The error call stack - */ -class MongoError extends Error { - constructor(message) { - if (message instanceof Error) { - super(message.message); - this.stack = message.stack; - } else { - if (typeof message === 'string') { - super(message); - } else { - super(message.message || message.errmsg || message.$err || 'n/a'); - for (var name in message) { - this[name] = message[name]; - } - } - - Error.captureStackTrace(this, this.constructor); - } - - this.name = 'MongoError'; - this[mongoErrorContextSymbol] = this[mongoErrorContextSymbol] || {}; - } - - /** - * Creates a new MongoError object - * - * @param {Error|string|object} options The options used to create the error. - * @return {MongoError} A MongoError instance - * @deprecated Use `new MongoError()` instead. - */ - static create(options) { - return new MongoError(options); - } - - /** - * Checks the error to see if it has an error label - * @param {string} label The error label to check for - * @returns {boolean} returns true if the error has the provided error label - */ - hasErrorLabel(label) { - return this.errorLabels && this.errorLabels.indexOf(label) !== -1; - } -} - -/** - * An error indicating an issue with the network, including TCP - * errors and timeouts. - * - * @param {Error|string|object} message The error message - * @property {string} message The error message - * @property {string} stack The error call stack - * @extends MongoError - */ -class MongoNetworkError extends MongoError { - constructor(message) { - super(message); - this.name = 'MongoNetworkError'; - } -} - -/** - * An error used when attempting to parse a value (like a connection string) - * - * @param {Error|string|object} message The error message - * @property {string} message The error message - * @extends MongoError - */ -class MongoParseError extends MongoError { - constructor(message) { - super(message); - this.name = 'MongoParseError'; - } -} - -/** - * An error signifying a client-side timeout event - * - * @param {Error|string|object} message The error message - * @param {string|object} [reason] The reason the timeout occured - * @property {string} message The error message - * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers - * @extends MongoError - */ -class MongoTimeoutError extends MongoError { - constructor(message, reason) { - if (reason && reason.error) { - super(reason.error.message || reason.error); - } else { - super(message); - } - - this.name = 'MongoTimeoutError'; - if (reason) { - this.reason = reason; - } - } -} - -/** - * An error signifying a client-side server selection error - * - * @param {Error|string|object} message The error message - * @param {string|object} [reason] The reason the timeout occured - * @property {string} message The error message - * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers - * @extends MongoError - */ -class MongoServerSelectionError extends MongoTimeoutError { - constructor(message, reason) { - super(message, reason); - this.name = 'MongoServerSelectionError'; - } -} - -function makeWriteConcernResultObject(input) { - const output = Object.assign({}, input); - - if (output.ok === 0) { - output.ok = 1; - delete output.errmsg; - delete output.code; - delete output.codeName; - } - - return output; -} - -/** - * An error thrown when the server reports a writeConcernError - * - * @param {Error|string|object} message The error message - * @param {object} result The result document (provided if ok: 1) - * @property {string} message The error message - * @property {object} [result] The result document (provided if ok: 1) - * @extends MongoError - */ -class MongoWriteConcernError extends MongoError { - constructor(message, result) { - super(message); - this.name = 'MongoWriteConcernError'; - - if (result != null) { - this.result = makeWriteConcernResultObject(result); - } - } -} - -// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms -const RETRYABLE_ERROR_CODES = new Set([ - 6, // HostUnreachable - 7, // HostNotFound - 89, // NetworkTimeout - 91, // ShutdownInProgress - 189, // PrimarySteppedDown - 9001, // SocketException - 10107, // NotMaster - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 13435, // NotMasterNoSlaveOk - 13436 // NotMasterOrSecondary -]); - -/** - * Determines whether an error is something the driver should attempt to retry - * - * @ignore - * @param {MongoError|Error} error - */ -function isRetryableError(error) { - return ( - RETRYABLE_ERROR_CODES.has(error.code) || - error instanceof MongoNetworkError || - error.message.match(/not master/) || - error.message.match(/node is recovering/) - ); -} - -const SDAM_RECOVERING_CODES = new Set([ - 91, // ShutdownInProgress - 189, // PrimarySteppedDown - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 13436 // NotMasterOrSecondary -]); - -const SDAM_NOTMASTER_CODES = new Set([ - 10107, // NotMaster - 13435 // NotMasterNoSlaveOk -]); - -const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([ - 11600, // InterruptedAtShutdown - 91 // ShutdownInProgress -]); - -function isRecoveringError(err) { - if (err.code && SDAM_RECOVERING_CODES.has(err.code)) { - return true; - } - - return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/); -} - -function isNotMasterError(err) { - if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) { - return true; - } - - if (isRecoveringError(err)) { - return false; - } - - return err.message.match(/not master/); -} - -function isNodeShuttingDownError(err) { - return err.code && SDAM_NODE_SHUTTING_DOWN_ERROR_CODES.has(err.code); -} - -/** - * Determines whether SDAM can recover from a given error. If it cannot - * then the pool will be cleared, and server state will completely reset - * locally. - * - * @ignore - * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering - * @param {MongoError|Error} error - */ -function isSDAMUnrecoverableError(error) { - // NOTE: null check is here for a strictly pre-CMAP world, a timeout or - // close event are considered unrecoverable - if (error instanceof MongoParseError || error == null) { - return true; - } - - if (isRecoveringError(error) || isNotMasterError(error)) { - return true; - } - - return false; -} - -function isNetworkTimeoutError(err) { - return err instanceof MongoNetworkError && err.message.match(/timed out/); -} - -module.exports = { - MongoError, - MongoNetworkError, - MongoParseError, - MongoTimeoutError, - MongoServerSelectionError, - MongoWriteConcernError, - mongoErrorContextSymbol, - isRetryableError, - isSDAMUnrecoverableError, - isNodeShuttingDownError, - isNetworkTimeoutError -}; diff --git a/lib/core/index.js b/lib/core/index.js deleted file mode 100644 index 2da5573a47c..00000000000 --- a/lib/core/index.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict'; - -let BSON = require('bson'); -const require_optional = require('require_optional'); -const EJSON = require('./utils').retrieveEJSON(); - -try { - // Attempt to grab the native BSON parser - const BSONNative = require_optional('bson-ext'); - // If we got the native parser, use it instead of the - // Javascript one - if (BSONNative) { - BSON = BSONNative; - } -} catch (err) {} // eslint-disable-line - -module.exports = { - // Errors - MongoError: require('./error').MongoError, - MongoNetworkError: require('./error').MongoNetworkError, - MongoParseError: require('./error').MongoParseError, - MongoTimeoutError: require('./error').MongoTimeoutError, - MongoServerSelectionError: require('./error').MongoServerSelectionError, - MongoWriteConcernError: require('./error').MongoWriteConcernError, - mongoErrorContextSymbol: require('./error').mongoErrorContextSymbol, - // Core - Connection: require('./connection/connection'), - Server: require('./topologies/server'), - ReplSet: require('./topologies/replset'), - Mongos: require('./topologies/mongos'), - Logger: require('./connection/logger'), - Cursor: require('./cursor').CoreCursor, - ReadPreference: require('./topologies/read_preference'), - Sessions: require('./sessions'), - BSON: BSON, - EJSON: EJSON, - Topology: require('./sdam/topology').Topology, - // Raw operations - Query: require('./connection/commands').Query, - // Auth mechanisms - MongoCredentials: require('./auth/mongo_credentials').MongoCredentials, - defaultAuthProviders: require('./auth/defaultAuthProviders').defaultAuthProviders, - MongoCR: require('./auth/mongocr'), - X509: require('./auth/x509'), - Plain: require('./auth/plain'), - GSSAPI: require('./auth/gssapi'), - ScramSHA1: require('./auth/scram').ScramSHA1, - ScramSHA256: require('./auth/scram').ScramSHA256, - // Utilities - parseConnectionString: require('./uri_parser') -}; diff --git a/lib/core/tools/smoke_plugin.js b/lib/core/tools/smoke_plugin.js deleted file mode 100644 index 22d0298627f..00000000000 --- a/lib/core/tools/smoke_plugin.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict'; - -var fs = require('fs'); - -/* Note: because this plugin uses process.on('uncaughtException'), only one - * of these can exist at any given time. This plugin and anything else that - * uses process.on('uncaughtException') will conflict. */ -exports.attachToRunner = function(runner, outputFile) { - var smokeOutput = { results: [] }; - var runningTests = {}; - - var integraPlugin = { - beforeTest: function(test, callback) { - test.startTime = Date.now(); - runningTests[test.name] = test; - callback(); - }, - afterTest: function(test, callback) { - smokeOutput.results.push({ - status: test.status, - start: test.startTime, - end: Date.now(), - test_file: test.name, - exit_code: 0, - url: '' - }); - delete runningTests[test.name]; - callback(); - }, - beforeExit: function(obj, callback) { - fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() { - callback(); - }); - } - }; - - // In case of exception, make sure we write file - process.on('uncaughtException', function(err) { - // Mark all currently running tests as failed - for (var testName in runningTests) { - smokeOutput.results.push({ - status: 'fail', - start: runningTests[testName].startTime, - end: Date.now(), - test_file: testName, - exit_code: 0, - url: '' - }); - } - - // write file - fs.writeFileSync(outputFile, JSON.stringify(smokeOutput)); - - // Standard NodeJS uncaught exception handler - console.error(err.stack); - process.exit(1); - }); - - runner.plugin(integraPlugin); - return integraPlugin; -}; diff --git a/lib/core/topologies/mongos.js b/lib/core/topologies/mongos.js deleted file mode 100644 index 29371931af7..00000000000 --- a/lib/core/topologies/mongos.js +++ /dev/null @@ -1,1384 +0,0 @@ -'use strict'; - -const inherits = require('util').inherits; -const f = require('util').format; -const EventEmitter = require('events').EventEmitter; -const CoreCursor = require('../cursor').CoreCursor; -const Logger = require('../connection/logger'); -const retrieveBSON = require('../connection/utils').retrieveBSON; -const MongoError = require('../error').MongoError; -const Server = require('./server'); -const diff = require('./shared').diff; -const cloneOptions = require('./shared').cloneOptions; -const SessionMixins = require('./shared').SessionMixins; -const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; -const relayEvents = require('../utils').relayEvents; -const isRetryableError = require('../error').isRetryableError; -const BSON = retrieveBSON(); -const getMMAPError = require('./shared').getMMAPError; -const makeClientMetadata = require('../utils').makeClientMetadata; - -/** - * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is - * used to construct connections. - */ - -// -// States -var DISCONNECTED = 'disconnected'; -var CONNECTING = 'connecting'; -var CONNECTED = 'connected'; -var UNREFERENCED = 'unreferenced'; -var DESTROYING = 'destroying'; -var DESTROYED = 'destroyed'; - -function stateTransition(self, newState) { - var legalTransitions = { - disconnected: [CONNECTING, DESTROYING, DESTROYED, DISCONNECTED], - connecting: [CONNECTING, DESTROYING, DESTROYED, CONNECTED, DISCONNECTED], - connected: [CONNECTED, DISCONNECTED, DESTROYING, DESTROYED, UNREFERENCED], - unreferenced: [UNREFERENCED, DESTROYING, DESTROYED], - destroyed: [DESTROYED] - }; - - // Get current state - var legalStates = legalTransitions[self.state]; - if (legalStates && legalStates.indexOf(newState) !== -1) { - self.state = newState; - } else { - self.s.logger.error( - f( - 'Mongos with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', - self.id, - self.state, - newState, - legalStates - ) - ); - } -} - -// -// ReplSet instance id -var id = 1; -var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; - -/** - * Creates a new Mongos instance - * @class - * @param {array} seedlist A list of seeds for the replicaset - * @param {number} [options.haInterval=5000] The High availability period for replicaset inquiry - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled - * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for MongoS proxy selection - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=1000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=0] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {Mongos} A cursor instance - * @fires Mongos#connect - * @fires Mongos#reconnect - * @fires Mongos#joined - * @fires Mongos#left - * @fires Mongos#failed - * @fires Mongos#fullsetup - * @fires Mongos#all - * @fires Mongos#serverHeartbeatStarted - * @fires Mongos#serverHeartbeatSucceeded - * @fires Mongos#serverHeartbeatFailed - * @fires Mongos#topologyOpening - * @fires Mongos#topologyClosed - * @fires Mongos#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var Mongos = function(seedlist, options) { - options = options || {}; - - // Get replSet Id - this.id = id++; - - // Internal state - this.s = { - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // Logger instance - logger: Logger('Mongos', options), - // Seedlist - seedlist: seedlist, - // Ha interval - haInterval: options.haInterval ? options.haInterval : 10000, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Server selection index - index: 0, - // Connect function options passed in - connectOptions: {}, - // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false, - // localThresholdMS - localThresholdMS: options.localThresholdMS || 15 - }; - - // Log info warning if the socketTimeout < haInterval as it will cause - // a lot of recycled connections to happen. - if ( - this.s.logger.isWarn() && - this.s.options.socketTimeout !== 0 && - this.s.options.socketTimeout < this.s.haInterval - ) { - this.s.logger.warn( - f( - 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', - this.s.options.socketTimeout, - this.s.haInterval - ) - ); - } - - // Disconnected state - this.state = DISCONNECTED; - - // Current proxies we are connecting to - this.connectingProxies = []; - // Currently connected proxies - this.connectedProxies = []; - // Disconnected proxies - this.disconnectedProxies = []; - // Index of proxy to run operations against - this.index = 0; - // High availability timeout id - this.haTimeoutId = null; - // Last ismaster - this.ismaster = null; - - // Description of the Replicaset - this.topologyDescription = { - topologyType: 'Unknown', - servers: [] - }; - - // Highest clusterTime seen in responses from the current deployment - this.clusterTime = null; - - // Add event listener - EventEmitter.call(this); -}; - -inherits(Mongos, EventEmitter); -Object.assign(Mongos.prototype, SessionMixins); - -Object.defineProperty(Mongos.prototype, 'type', { - enumerable: true, - get: function() { - return 'mongos'; - } -}); - -Object.defineProperty(Mongos.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(Mongos.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - if (!this.ismaster) return null; - return this.ismaster.logicalSessionTimeoutMinutes || null; - } -}); - -/** - * Emit event if it exists - * @method - */ -function emitSDAMEvent(self, event, description) { - if (self.listeners(event).length > 0) { - self.emit(event, description); - } -} - -const SERVER_EVENTS = ['serverDescriptionChanged', 'error', 'close', 'timeout', 'parseError']; -function destroyServer(server, options, callback) { - options = options || {}; - SERVER_EVENTS.forEach(event => server.removeAllListeners(event)); - server.destroy(options, callback); -} - -/** - * Initiate server connect - */ -Mongos.prototype.connect = function(options) { - var self = this; - // Add any connect level options to the internal state - this.s.connectOptions = options || {}; - - // Set connecting state - stateTransition(this, CONNECTING); - - // Create server instances - var servers = this.s.seedlist.map(function(x) { - const server = new Server( - Object.assign({}, self.s.options, x, options, { - reconnect: false, - monitoring: false, - parent: self - }) - ); - - relayEvents(server, self, ['serverDescriptionChanged']); - return server; - }); - - // Emit the topology opening event - emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); - - // Start all server connections - connectProxies(self, servers); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -Mongos.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -function handleEvent(self) { - return function() { - if (self.state === DESTROYED || self.state === DESTROYING) { - return; - } - - // Move to list of disconnectedProxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, this); - // Emit the initial topology - emitTopologyDescriptionChanged(self); - // Emit the left signal - self.emit('left', 'mongos', this); - // Emit the sdam event - self.emit('serverClosed', { - topologyId: self.id, - address: this.name - }); - }; -} - -function handleInitialConnectEvent(self, event) { - return function() { - var _this = this; - - // Destroy the instance - if (self.state === DESTROYED) { - // Emit the initial topology - emitTopologyDescriptionChanged(self); - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); - return this.destroy(); - } - - // Check the type of server - if (event === 'connect') { - // Get last known ismaster - self.ismaster = _this.lastIsMaster(); - - // Is this not a proxy, remove t - if (self.ismaster.msg === 'isdbgrid') { - // Add to the connectd list - for (let i = 0; i < self.connectedProxies.length; i++) { - if (self.connectedProxies[i].name === _this.name) { - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _this); - // Emit the initial topology - emitTopologyDescriptionChanged(self); - _this.destroy(); - return self.emit('failed', _this); - } - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _this.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _this.on('error', handleEvent(self, 'error')); - _this.on('close', handleEvent(self, 'close')); - _this.on('timeout', handleEvent(self, 'timeout')); - _this.on('parseError', handleEvent(self, 'parseError')); - - // Move from connecting proxies connected - moveServerFrom(self.connectingProxies, self.connectedProxies, _this); - // Emit the joined event - self.emit('joined', 'mongos', _this); - } else { - // Print warning if we did not find a mongos proxy - if (self.s.logger.isWarn()) { - var message = 'expected mongos proxy, but found replicaset member mongod for server %s'; - // We have a standalone server - if (!self.ismaster.hosts) { - message = 'expected mongos proxy, but found standalone mongod for server %s'; - } - - self.s.logger.warn(f(message, _this.name)); - } - - // This is not a mongos proxy, destroy and remove it completely - _this.destroy(true); - removeProxyFrom(self.connectingProxies, _this); - // Emit the left event - self.emit('left', 'server', _this); - // Emit failed event - self.emit('failed', _this); - } - } else { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); - // Emit the left event - self.emit('left', 'mongos', this); - // Emit failed event - self.emit('failed', this); - } - - // Emit the initial topology - emitTopologyDescriptionChanged(self); - - // Trigger topologyMonitor - if (self.connectingProxies.length === 0) { - // Emit connected if we are connected - if (self.connectedProxies.length > 0 && self.state === CONNECTING) { - // Set the state to connected - stateTransition(self, CONNECTED); - // Emit the connect event - self.emit('connect', self); - self.emit('fullsetup', self); - self.emit('all', self); - } else if (self.disconnectedProxies.length === 0) { - // Print warning if we did not find a mongos proxy - if (self.s.logger.isWarn()) { - self.s.logger.warn( - f('no mongos proxies found in seed list, did you mean to connect to a replicaset') - ); - } - - // Emit the error that no proxies were found - return self.emit('error', new MongoError('no mongos proxies found in seed list')); - } - - // Topology monitor - topologyMonitor(self, { firstConnect: true }); - } - }; -} - -function connectProxies(self, servers) { - // Update connectingProxies - self.connectingProxies = self.connectingProxies.concat(servers); - - // Index used to interleaf the server connects, avoiding - // runtime issues on io constrained vm's - var timeoutInterval = 0; - - function connect(server, timeoutInterval) { - setTimeout(function() { - // Emit opening server event - self.emit('serverOpening', { - topologyId: self.id, - address: server.name - }); - - // Emit the initial topology - emitTopologyDescriptionChanged(self); - - // Add event handlers - server.once('close', handleInitialConnectEvent(self, 'close')); - server.once('timeout', handleInitialConnectEvent(self, 'timeout')); - server.once('parseError', handleInitialConnectEvent(self, 'parseError')); - server.once('error', handleInitialConnectEvent(self, 'error')); - server.once('connect', handleInitialConnectEvent(self, 'connect')); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Start connection - server.connect(self.s.connectOptions); - }, timeoutInterval); - } - - // Start all the servers - servers.forEach(server => connect(server, timeoutInterval++)); -} - -function pickProxy(self, session) { - // TODO: Destructure :) - const transaction = session && session.transaction; - - if (transaction && transaction.server) { - if (transaction.server.isConnected()) { - return transaction.server; - } else { - transaction.unpinServer(); - } - } - - // Get the currently connected Proxies - var connectedProxies = self.connectedProxies.slice(0); - - // Set lower bound - var lowerBoundLatency = Number.MAX_VALUE; - - // Determine the lower bound for the Proxies - for (var i = 0; i < connectedProxies.length; i++) { - if (connectedProxies[i].lastIsMasterMS < lowerBoundLatency) { - lowerBoundLatency = connectedProxies[i].lastIsMasterMS; - } - } - - // Filter out the possible servers - connectedProxies = connectedProxies.filter(function(server) { - if ( - server.lastIsMasterMS <= lowerBoundLatency + self.s.localThresholdMS && - server.isConnected() - ) { - return true; - } - }); - - let proxy; - - // We have no connectedProxies pick first of the connected ones - if (connectedProxies.length === 0) { - proxy = self.connectedProxies[0]; - } else { - // Get proxy - proxy = connectedProxies[self.index % connectedProxies.length]; - // Update the index - self.index = (self.index + 1) % connectedProxies.length; - } - - if (transaction && transaction.isActive && proxy && proxy.isConnected()) { - transaction.pinServer(proxy); - } - - // Return the proxy - return proxy; -} - -function moveServerFrom(from, to, proxy) { - for (var i = 0; i < from.length; i++) { - if (from[i].name === proxy.name) { - from.splice(i, 1); - } - } - - for (i = 0; i < to.length; i++) { - if (to[i].name === proxy.name) { - to.splice(i, 1); - } - } - - to.push(proxy); -} - -function removeProxyFrom(from, proxy) { - for (var i = 0; i < from.length; i++) { - if (from[i].name === proxy.name) { - from.splice(i, 1); - } - } -} - -function reconnectProxies(self, proxies, callback) { - // Count lefts - var count = proxies.length; - - // Handle events - var _handleEvent = function(self, event) { - return function() { - var _self = this; - count = count - 1; - - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - return this.destroy(); - } - - if (event === 'connect') { - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - return _self.destroy(); - } - - // Remove the handlers - for (var i = 0; i < handlers.length; i++) { - _self.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _self.on('error', handleEvent(self, 'error')); - _self.on('close', handleEvent(self, 'close')); - _self.on('timeout', handleEvent(self, 'timeout')); - _self.on('parseError', handleEvent(self, 'parseError')); - - // Move to the connected servers - moveServerFrom(self.connectingProxies, self.connectedProxies, _self); - // Emit topology Change - emitTopologyDescriptionChanged(self); - // Emit joined event - self.emit('joined', 'mongos', _self); - } else { - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - this.destroy(); - } - - // Are we done finish up callback - if (count === 0) { - callback(); - } - }; - }; - - // No new servers - if (count === 0) { - return callback(); - } - - // Execute method - function execute(_server, i) { - setTimeout(function() { - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Create a new server instance - var server = new Server( - Object.assign({}, self.s.options, { - host: _server.name.split(':')[0], - port: parseInt(_server.name.split(':')[1], 10), - reconnect: false, - monitoring: false, - parent: self - }) - ); - - destroyServer(_server, { force: true }); - removeProxyFrom(self.disconnectedProxies, _server); - - // Relay the server description change - relayEvents(server, self, ['serverDescriptionChanged']); - - // Emit opening server event - self.emit('serverOpening', { - topologyId: server.s.topologyId !== -1 ? server.s.topologyId : self.id, - address: server.name - }); - - // Add temp handlers - server.once('connect', _handleEvent(self, 'connect')); - server.once('close', _handleEvent(self, 'close')); - server.once('timeout', _handleEvent(self, 'timeout')); - server.once('error', _handleEvent(self, 'error')); - server.once('parseError', _handleEvent(self, 'parseError')); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Connect to proxy - self.connectingProxies.push(server); - server.connect(self.s.connectOptions); - }, i); - } - - // Create new instances - for (var i = 0; i < proxies.length; i++) { - execute(proxies[i], i); - } -} - -function topologyMonitor(self, options) { - options = options || {}; - - // no need to set up the monitor if we're already closed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Set momitoring timeout - self.haTimeoutId = setTimeout(function() { - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // If we have a primary and a disconnect handler, execute - // buffered operations - if (self.isConnected() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute(); - } - - // Get the connectingServers - var proxies = self.connectedProxies.slice(0); - // Get the count - var count = proxies.length; - - // If the count is zero schedule a new fast - function pingServer(_self, _server, cb) { - // Measure running time - var start = new Date().getTime(); - - // Emit the server heartbeat start - emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: _server.name }); - - // Execute ismaster - _server.command( - 'admin.$cmd', - { - ismaster: true - }, - { - monitoring: true, - socketTimeout: self.s.options.connectionTimeout || 2000 - }, - function(err, r) { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - // Move from connectingProxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); - _server.destroy(); - return cb(err, r); - } - - // Calculate latency - var latencyMS = new Date().getTime() - start; - - // We had an error, remove it from the state - if (err) { - // Emit the server heartbeat failure - emitSDAMEvent(self, 'serverHeartbeatFailed', { - durationMS: latencyMS, - failure: err, - connectionId: _server.name - }); - // Move from connected proxies to disconnected proxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); - } else { - // Update the server ismaster - _server.ismaster = r.result; - _server.lastIsMasterMS = latencyMS; - - // Server heart beat event - emitSDAMEvent(self, 'serverHeartbeatSucceeded', { - durationMS: latencyMS, - reply: r.result, - connectionId: _server.name - }); - } - - cb(err, r); - } - ); - } - - // No proxies initiate monitor again - if (proxies.length === 0) { - // Emit close event if any listeners registered - if (self.listeners('close').length > 0 && self.state === CONNECTING) { - self.emit('error', new MongoError('no mongos proxy available')); - } else { - self.emit('close', self); - } - - // Attempt to connect to any unknown servers - return reconnectProxies(self, self.disconnectedProxies, function() { - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Are we connected ? emit connect event - if (self.state === CONNECTING && options.firstConnect) { - self.emit('connect', self); - self.emit('fullsetup', self); - self.emit('all', self); - } else if (self.isConnected()) { - self.emit('reconnect', self); - } else if (!self.isConnected() && self.listeners('close').length > 0) { - self.emit('close', self); - } - - // Perform topology monitor - topologyMonitor(self); - }); - } - - // Ping all servers - for (var i = 0; i < proxies.length; i++) { - pingServer(self, proxies[i], function() { - count = count - 1; - - if (count === 0) { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - return; - } - - // Attempt to connect to any unknown servers - reconnectProxies(self, self.disconnectedProxies, function() { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - return; - } - - // Perform topology monitor - topologyMonitor(self); - }); - } - }); - } - }, self.s.haInterval); -} - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -Mongos.prototype.lastIsMaster = function() { - return this.ismaster; -}; - -/** - * Unref all connections belong to this server - * @method - */ -Mongos.prototype.unref = function() { - // Transition state - stateTransition(this, UNREFERENCED); - // Get all proxies - var proxies = this.connectedProxies.concat(this.connectingProxies); - proxies.forEach(function(x) { - x.unref(); - }); - - clearTimeout(this.haTimeoutId); -}; - -/** - * Destroy the server connection - * @param {boolean} [options.force=false] Force destroy the pool - * @method - */ -Mongos.prototype.destroy = function(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - - stateTransition(this, DESTROYING); - if (this.haTimeoutId) { - clearTimeout(this.haTimeoutId); - } - - const proxies = this.connectedProxies.concat(this.connectingProxies); - let serverCount = proxies.length; - const serverDestroyed = () => { - serverCount--; - if (serverCount > 0) { - return; - } - - emitTopologyDescriptionChanged(this); - emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); - stateTransition(this, DESTROYED); - if (typeof callback === 'function') { - callback(null, null); - } - }; - - if (serverCount === 0) { - serverDestroyed(); - return; - } - - // Destroy all connecting servers - proxies.forEach(server => { - // Emit the sdam event - this.emit('serverClosed', { - topologyId: this.id, - address: server.name - }); - - destroyServer(server, options, serverDestroyed); - moveServerFrom(this.connectedProxies, this.disconnectedProxies, server); - }); -}; - -/** - * Figure out if the server is connected - * @method - * @return {boolean} - */ -Mongos.prototype.isConnected = function() { - return this.connectedProxies.length > 0; -}; - -/** - * Figure out if the server instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -Mongos.prototype.isDestroyed = function() { - return this.state === DESTROYED; -}; - -// -// Operations -// - -function executeWriteOperation(args, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - // TODO: once we drop Node 4, use destructuring either here or in arguments. - const self = args.self; - const op = args.op; - const ns = args.ns; - const ops = args.ops; - - // Pick a server - let server = pickProxy(self, options.session); - // No server found error out - if (!server) return callback(new MongoError('no mongos proxy available')); - - const willRetryWrite = - !args.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction(); - - const handler = (err, result) => { - if (!err) return callback(null, result); - if (!isRetryableError(err) || !willRetryWrite) { - err = getMMAPError(err); - return callback(err); - } - - // Pick another server - server = pickProxy(self, options.session); - - // No server found error out with original error - if (!server) { - return callback(err); - } - - const newArgs = Object.assign({}, args, { retrying: true }); - return executeWriteOperation(newArgs, options, callback); - }; - - if (callback.operationId) { - handler.operationId = callback.operationId; - } - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - // rerun the operation - server[op](ns, ops, options, handler); -} - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.insert = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('insert', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.update = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('update', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.remove = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('remove', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); -}; - -const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; - -function isWriteCommand(command) { - return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Connection} [options.connection] Specify connection object to execute command against - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.command = function(ns, cmd, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - var self = this; - - // Pick a proxy - var server = pickProxy(self, options.session); - - // Topology is not connected, save the call in the provided store to be - // Executed at some point when the handler deems it's reconnected - if ((server == null || !server.isConnected()) && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } - - // No server returned we had an error - if (server == null) { - return callback(new MongoError('no mongos proxy available')); - } - - // Cloned options - var clonedOptions = cloneOptions(options); - clonedOptions.topology = self; - - const willRetryWrite = - !options.retrying && - options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction() && - isWriteCommand(cmd); - - const cb = (err, result) => { - if (!err) return callback(null, result); - if (!isRetryableError(err)) { - return callback(err); - } - - if (willRetryWrite) { - const newOptions = Object.assign({}, clonedOptions, { retrying: true }); - return this.command(ns, cmd, newOptions, callback); - } - - return callback(err); - }; - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - // Execute the command - server.command(ns, cmd, clonedOptions, cb); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -Mongos.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * Selects a server - * - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Unused - * @param {ClientSession} [options.session] Specify a session if it is being used - * @param {function} callback - */ -Mongos.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') - (callback = options), (options = selector), (selector = undefined); - options = options || {}; - - const server = pickProxy(this, options.session); - if (server == null) { - callback(new MongoError('server selection failed')); - return; - } - - if (this.s.debug) this.emit('pickedServer', null, server); - callback(null, server); -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -Mongos.prototype.connections = function() { - var connections = []; - - for (var i = 0; i < this.connectedProxies.length; i++) { - connections = connections.concat(this.connectedProxies[i].connections()); - } - - return connections; -}; - -function emitTopologyDescriptionChanged(self) { - if (self.listeners('topologyDescriptionChanged').length > 0) { - var topology = 'Unknown'; - if (self.connectedProxies.length > 0) { - topology = 'Sharded'; - } - - // Generate description - var description = { - topologyType: topology, - servers: [] - }; - - // All proxies - var proxies = self.disconnectedProxies.concat(self.connectingProxies); - - // Add all the disconnected proxies - description.servers = description.servers.concat( - proxies.map(function(x) { - var description = x.getDescription(); - description.type = 'Unknown'; - return description; - }) - ); - - // Add all the connected proxies - description.servers = description.servers.concat( - self.connectedProxies.map(function(x) { - var description = x.getDescription(); - description.type = 'Mongos'; - return description; - }) - ); - - // Get the diff - var diffResult = diff(self.topologyDescription, description); - - // Create the result - var result = { - topologyId: self.id, - previousDescription: self.topologyDescription, - newDescription: description, - diff: diffResult - }; - - // Emit the topologyDescription change - if (diffResult.servers.length > 0) { - self.emit('topologyDescriptionChanged', result); - } - - // Set the new description - self.topologyDescription = description; - } -} - -/** - * A mongos connect event, used to verify that the connection is up and running - * - * @event Mongos#connect - * @type {Mongos} - */ - -/** - * A mongos reconnect event, used to verify that the mongos topology has reconnected - * - * @event Mongos#reconnect - * @type {Mongos} - */ - -/** - * A mongos fullsetup event, used to signal that all topology members have been contacted. - * - * @event Mongos#fullsetup - * @type {Mongos} - */ - -/** - * A mongos all event, used to signal that all topology members have been contacted. - * - * @event Mongos#all - * @type {Mongos} - */ - -/** - * A server member left the mongos list - * - * @event Mongos#left - * @type {Mongos} - * @param {string} type The type of member that left (mongos) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the mongos list - * - * @event Mongos#joined - * @type {Mongos} - * @param {string} type The type of member that left (mongos) - * @param {Server} server The server object that joined - */ - -/** - * A server opening SDAM monitoring event - * - * @event Mongos#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event Mongos#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event Mongos#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event Mongos#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event Mongos#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event Mongos#topologyDescriptionChanged - * @type {object} - */ - -/** - * A topology serverHeartbeatStarted SDAM event - * - * @event Mongos#serverHeartbeatStarted - * @type {object} - */ - -/** - * A topology serverHeartbeatFailed SDAM event - * - * @event Mongos#serverHeartbeatFailed - * @type {object} - */ - -/** - * A topology serverHeartbeatSucceeded SDAM change event - * - * @event Mongos#serverHeartbeatSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Mongos#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Mongos#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Mongos#commandFailed - * @type {object} - */ - -module.exports = Mongos; diff --git a/lib/core/topologies/replset.js b/lib/core/topologies/replset.js deleted file mode 100644 index b289d59a345..00000000000 --- a/lib/core/topologies/replset.js +++ /dev/null @@ -1,1559 +0,0 @@ -'use strict'; - -const inherits = require('util').inherits; -const f = require('util').format; -const EventEmitter = require('events').EventEmitter; -const ReadPreference = require('./read_preference'); -const CoreCursor = require('../cursor').CoreCursor; -const retrieveBSON = require('../connection/utils').retrieveBSON; -const Logger = require('../connection/logger'); -const MongoError = require('../error').MongoError; -const Server = require('./server'); -const ReplSetState = require('./replset_state'); -const Timeout = require('./shared').Timeout; -const Interval = require('./shared').Interval; -const SessionMixins = require('./shared').SessionMixins; -const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; -const relayEvents = require('../utils').relayEvents; -const isRetryableError = require('../error').isRetryableError; -const BSON = retrieveBSON(); -const calculateDurationInMs = require('../utils').calculateDurationInMs; -const getMMAPError = require('./shared').getMMAPError; -const makeClientMetadata = require('../utils').makeClientMetadata; - -// -// States -var DISCONNECTED = 'disconnected'; -var CONNECTING = 'connecting'; -var CONNECTED = 'connected'; -var UNREFERENCED = 'unreferenced'; -var DESTROYED = 'destroyed'; - -function stateTransition(self, newState) { - var legalTransitions = { - disconnected: [CONNECTING, DESTROYED, DISCONNECTED], - connecting: [CONNECTING, DESTROYED, CONNECTED, DISCONNECTED], - connected: [CONNECTED, DISCONNECTED, DESTROYED, UNREFERENCED], - unreferenced: [UNREFERENCED, DESTROYED], - destroyed: [DESTROYED] - }; - - // Get current state - var legalStates = legalTransitions[self.state]; - if (legalStates && legalStates.indexOf(newState) !== -1) { - self.state = newState; - } else { - self.s.logger.error( - f( - 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', - self.id, - self.state, - newState, - legalStates - ) - ); - } -} - -// -// ReplSet instance id -var id = 1; -var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; - -/** - * Creates a new Replset instance - * @class - * @param {array} seedlist A list of seeds for the replicaset - * @param {boolean} options.setName The Replicaset set name - * @param {boolean} [options.secondaryOnlyConnectionAllowed=false] Allow connection to a secondary only replicaset - * @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry - * @param {boolean} [options.emitError=false] Server will emit errors events - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=10000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=0] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {number} [options.pingInterval=5000] Ping interval to check the response time to the different servers - * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for Replicaset member selection - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {ReplSet} A cursor instance - * @fires ReplSet#connect - * @fires ReplSet#ha - * @fires ReplSet#joined - * @fires ReplSet#left - * @fires ReplSet#failed - * @fires ReplSet#fullsetup - * @fires ReplSet#all - * @fires ReplSet#error - * @fires ReplSet#serverHeartbeatStarted - * @fires ReplSet#serverHeartbeatSucceeded - * @fires ReplSet#serverHeartbeatFailed - * @fires ReplSet#topologyOpening - * @fires ReplSet#topologyClosed - * @fires ReplSet#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var ReplSet = function(seedlist, options) { - var self = this; - options = options || {}; - - // Validate seedlist - if (!Array.isArray(seedlist)) throw new MongoError('seedlist must be an array'); - // Validate list - if (seedlist.length === 0) throw new MongoError('seedlist must contain at least one entry'); - // Validate entries - seedlist.forEach(function(e) { - if (typeof e.host !== 'string' || typeof e.port !== 'number') - throw new MongoError('seedlist entry must contain a host and port'); - }); - - // Add event listener - EventEmitter.call(this); - - // Get replSet Id - this.id = id++; - - // Get the localThresholdMS - var localThresholdMS = options.localThresholdMS || 15; - // Backward compatibility - if (options.acceptableLatency) localThresholdMS = options.acceptableLatency; - - // Create a logger - var logger = Logger('ReplSet', options); - - // Internal state - this.s = { - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // Logger instance - logger: logger, - // Seedlist - seedlist: seedlist, - // Replicaset state - replicaSetState: new ReplSetState({ - id: this.id, - setName: options.setName, - acceptableLatency: localThresholdMS, - heartbeatFrequencyMS: options.haInterval ? options.haInterval : 10000, - logger: logger - }), - // Current servers we are connecting to - connectingServers: [], - // Ha interval - haInterval: options.haInterval ? options.haInterval : 10000, - // Minimum heartbeat frequency used if we detect a server close - minHeartbeatFrequencyMS: 500, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Server selection index - index: 0, - // Connect function options passed in - connectOptions: {}, - // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false - }; - - // Add handler for topology change - this.s.replicaSetState.on('topologyDescriptionChanged', function(r) { - self.emit('topologyDescriptionChanged', r); - }); - - // Log info warning if the socketTimeout < haInterval as it will cause - // a lot of recycled connections to happen. - if ( - this.s.logger.isWarn() && - this.s.options.socketTimeout !== 0 && - this.s.options.socketTimeout < this.s.haInterval - ) { - this.s.logger.warn( - f( - 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', - this.s.options.socketTimeout, - this.s.haInterval - ) - ); - } - - // Add forwarding of events from state handler - var types = ['joined', 'left']; - types.forEach(function(x) { - self.s.replicaSetState.on(x, function(t, s) { - self.emit(x, t, s); - }); - }); - - // Connect stat - this.initialConnectState = { - connect: false, - fullsetup: false, - all: false - }; - - // Disconnected state - this.state = DISCONNECTED; - this.haTimeoutId = null; - // Last ismaster - this.ismaster = null; - // Contains the intervalId - this.intervalIds = []; - - // Highest clusterTime seen in responses from the current deployment - this.clusterTime = null; -}; - -inherits(ReplSet, EventEmitter); -Object.assign(ReplSet.prototype, SessionMixins); - -Object.defineProperty(ReplSet.prototype, 'type', { - enumerable: true, - get: function() { - return 'replset'; - } -}); - -Object.defineProperty(ReplSet.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(ReplSet.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - return this.s.replicaSetState.logicalSessionTimeoutMinutes || null; - } -}); - -function rexecuteOperations(self) { - // If we have a primary and a disconnect handler, execute - // buffered operations - if (self.s.replicaSetState.hasPrimaryAndSecondary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute(); - } else if (self.s.replicaSetState.hasPrimary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute({ executePrimary: true }); - } else if (self.s.replicaSetState.hasSecondary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute({ executeSecondary: true }); - } -} - -function connectNewServers(self, servers, callback) { - // No new servers - if (servers.length === 0) { - return callback(); - } - - // Count lefts - var count = servers.length; - var error = null; - - function done() { - count = count - 1; - if (count === 0) { - callback(error); - } - } - - // Handle events - var _handleEvent = function(self, event) { - return function(err) { - var _self = this; - - // Destroyed - if (self.state === DESTROYED || self.state === UNREFERENCED) { - this.destroy({ force: true }); - return done(); - } - - if (event === 'connect') { - // Update the state - var result = self.s.replicaSetState.update(_self); - // Update the state with the new server - if (result) { - // Primary lastIsMaster store it - if (_self.lastIsMaster() && _self.lastIsMaster().ismaster) { - self.ismaster = _self.lastIsMaster(); - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _self.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _self.on('error', handleEvent(self, 'error')); - _self.on('close', handleEvent(self, 'close')); - _self.on('timeout', handleEvent(self, 'timeout')); - _self.on('parseError', handleEvent(self, 'parseError')); - - // Enalbe the monitoring of the new server - monitorServer(_self.lastIsMaster().me, self, {}); - - // Rexecute any stalled operation - rexecuteOperations(self); - } else { - _self.destroy({ force: true }); - } - } else if (event === 'error') { - error = err; - } - - // Rexecute any stalled operation - rexecuteOperations(self); - done(); - }; - }; - - // Execute method - function execute(_server, i) { - setTimeout(function() { - // Destroyed - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - // remove existing connecting server if it's failed to connect, otherwise - // wait for that server to connect - const existingServerIdx = self.s.connectingServers.findIndex(s => s.name === _server); - if (existingServerIdx >= 0) { - const connectingServer = self.s.connectingServers[existingServerIdx]; - connectingServer.destroy({ force: true }); - - self.s.connectingServers.splice(existingServerIdx, 1); - return done(); - } - - // Create a new server instance - var server = new Server( - Object.assign({}, self.s.options, { - host: _server.split(':')[0], - port: parseInt(_server.split(':')[1], 10), - reconnect: false, - monitoring: false, - parent: self - }) - ); - - // Add temp handlers - server.once('connect', _handleEvent(self, 'connect')); - server.once('close', _handleEvent(self, 'close')); - server.once('timeout', _handleEvent(self, 'timeout')); - server.once('error', _handleEvent(self, 'error')); - server.once('parseError', _handleEvent(self, 'parseError')); - - // SDAM Monitoring events - server.on('serverOpening', e => self.emit('serverOpening', e)); - server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); - server.on('serverClosed', e => self.emit('serverClosed', e)); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - self.s.connectingServers.push(server); - server.connect(self.s.connectOptions); - }, i); - } - - // Create new instances - for (var i = 0; i < servers.length; i++) { - execute(servers[i], i); - } -} - -// Ping the server -var pingServer = function(self, server, cb) { - // Measure running time - var start = new Date().getTime(); - - // Emit the server heartbeat start - emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: server.name }); - - // Execute ismaster - // Set the socketTimeout for a monitoring message to a low number - // Ensuring ismaster calls are timed out quickly - server.command( - 'admin.$cmd', - { - ismaster: true - }, - { - monitoring: true, - socketTimeout: self.s.options.connectionTimeout || 2000 - }, - function(err, r) { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - server.destroy({ force: true }); - return cb(err, r); - } - - // Calculate latency - var latencyMS = new Date().getTime() - start; - - // Set the last updatedTime - var hrtime = process.hrtime(); - server.lastUpdateTime = (hrtime[0] * 1e9 + hrtime[1]) / 1e6; - - // We had an error, remove it from the state - if (err) { - // Emit the server heartbeat failure - emitSDAMEvent(self, 'serverHeartbeatFailed', { - durationMS: latencyMS, - failure: err, - connectionId: server.name - }); - - // Remove server from the state - self.s.replicaSetState.remove(server); - } else { - // Update the server ismaster - server.ismaster = r.result; - - // Check if we have a lastWriteDate convert it to MS - // and store on the server instance for later use - if (server.ismaster.lastWrite && server.ismaster.lastWrite.lastWriteDate) { - server.lastWriteDate = server.ismaster.lastWrite.lastWriteDate.getTime(); - } - - // Do we have a brand new server - if (server.lastIsMasterMS === -1) { - server.lastIsMasterMS = latencyMS; - } else if (server.lastIsMasterMS) { - // After the first measurement, average RTT MUST be computed using an - // exponentially-weighted moving average formula, with a weighting factor (alpha) of 0.2. - // If the prior average is denoted old_rtt, then the new average (new_rtt) is - // computed from a new RTT measurement (x) using the following formula: - // alpha = 0.2 - // new_rtt = alpha * x + (1 - alpha) * old_rtt - server.lastIsMasterMS = 0.2 * latencyMS + (1 - 0.2) * server.lastIsMasterMS; - } - - if (self.s.replicaSetState.update(server)) { - // Primary lastIsMaster store it - if (server.lastIsMaster() && server.lastIsMaster().ismaster) { - self.ismaster = server.lastIsMaster(); - } - } - - // Server heart beat event - emitSDAMEvent(self, 'serverHeartbeatSucceeded', { - durationMS: latencyMS, - reply: r.result, - connectionId: server.name - }); - } - - // Calculate the staleness for this server - self.s.replicaSetState.updateServerMaxStaleness(server, self.s.haInterval); - - // Callback - cb(err, r); - } - ); -}; - -// Each server is monitored in parallel in their own timeout loop -var monitorServer = function(host, self, options) { - // If this is not the initial scan - // Is this server already being monitoried, then skip monitoring - if (!options.haInterval) { - for (var i = 0; i < self.intervalIds.length; i++) { - if (self.intervalIds[i].__host === host) { - return; - } - } - } - - // Get the haInterval - var _process = options.haInterval ? Timeout : Interval; - var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; - - // Create the interval - var intervalId = new _process(function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - // clearInterval(intervalId); - intervalId.stop(); - return; - } - - // Do we already have server connection available for this host - var _server = self.s.replicaSetState.get(host); - - // Check if we have a known server connection and reuse - if (_server) { - // Ping the server - return pingServer(self, _server, function(err) { - if (err) { - // NOTE: should something happen here? - return; - } - - if (self.state === DESTROYED || self.state === UNREFERENCED) { - intervalId.stop(); - return; - } - - // Filter out all called intervaliIds - self.intervalIds = self.intervalIds.filter(function(intervalId) { - return intervalId.isRunning(); - }); - - // Initial sweep - if (_process === Timeout) { - if ( - self.state === CONNECTING && - ((self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed) || - self.s.replicaSetState.hasPrimary()) - ) { - self.state = CONNECTED; - - // Emit connected sign - process.nextTick(function() { - self.emit('connect', self); - }); - - // Start topology interval check - topologyMonitor(self, {}); - } - } else { - if ( - self.state === DISCONNECTED && - ((self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed) || - self.s.replicaSetState.hasPrimary()) - ) { - self.state = CONNECTED; - - // Rexecute any stalled operation - rexecuteOperations(self); - - // Emit connected sign - process.nextTick(function() { - self.emit('reconnect', self); - }); - } - } - - if ( - self.initialConnectState.connect && - !self.initialConnectState.fullsetup && - self.s.replicaSetState.hasPrimaryAndSecondary() - ) { - // Set initial connect state - self.initialConnectState.fullsetup = true; - self.initialConnectState.all = true; - - process.nextTick(function() { - self.emit('fullsetup', self); - self.emit('all', self); - }); - } - }); - } - }, _haInterval); - - // Start the interval - intervalId.start(); - // Add the intervalId host name - intervalId.__host = host; - // Add the intervalId to our list of intervalIds - self.intervalIds.push(intervalId); -}; - -function topologyMonitor(self, options) { - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - options = options || {}; - - // Get the servers - var servers = Object.keys(self.s.replicaSetState.set); - - // Get the haInterval - var _process = options.haInterval ? Timeout : Interval; - var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; - - if (_process === Timeout) { - return connectNewServers(self, self.s.replicaSetState.unknownServers, function(err) { - // Don't emit errors if the connection was already - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - if (!self.s.replicaSetState.hasPrimary() && !self.s.options.secondaryOnlyConnectionAllowed) { - if (err) { - return self.emit('error', err); - } - - self.emit( - 'error', - new MongoError('no primary found in replicaset or invalid replica set name') - ); - return self.destroy({ force: true }); - } else if ( - !self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed - ) { - if (err) { - return self.emit('error', err); - } - - self.emit( - 'error', - new MongoError('no secondary found in replicaset or invalid replica set name') - ); - return self.destroy({ force: true }); - } - - for (var i = 0; i < servers.length; i++) { - monitorServer(servers[i], self, options); - } - }); - } else { - for (var i = 0; i < servers.length; i++) { - monitorServer(servers[i], self, options); - } - } - - // Run the reconnect process - function executeReconnect(self) { - return function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - connectNewServers(self, self.s.replicaSetState.unknownServers, function() { - var monitoringFrequencey = self.s.replicaSetState.hasPrimary() - ? _haInterval - : self.s.minHeartbeatFrequencyMS; - - // Create a timeout - self.intervalIds.push(new Timeout(executeReconnect(self), monitoringFrequencey).start()); - }); - }; - } - - // Decide what kind of interval to use - var intervalTime = !self.s.replicaSetState.hasPrimary() - ? self.s.minHeartbeatFrequencyMS - : _haInterval; - - self.intervalIds.push(new Timeout(executeReconnect(self), intervalTime).start()); -} - -function addServerToList(list, server) { - for (var i = 0; i < list.length; i++) { - if (list[i].name.toLowerCase() === server.name.toLowerCase()) return true; - } - - list.push(server); -} - -function handleEvent(self, event) { - return function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f('handleEvent %s from server %s in replset with id %s', event, this.name, self.id) - ); - } - - // Remove from the replicaset state - self.s.replicaSetState.remove(this); - - // Are we in a destroyed state return - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - - // If no primary and secondary available - if ( - !self.s.replicaSetState.hasPrimary() && - !self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed - ) { - stateTransition(self, DISCONNECTED); - } else if (!self.s.replicaSetState.hasPrimary()) { - stateTransition(self, DISCONNECTED); - } - - addServerToList(self.s.connectingServers, this); - }; -} - -function shouldTriggerConnect(self) { - const isConnecting = self.state === CONNECTING; - const hasPrimary = self.s.replicaSetState.hasPrimary(); - const hasSecondary = self.s.replicaSetState.hasSecondary(); - const secondaryOnlyConnectionAllowed = self.s.options.secondaryOnlyConnectionAllowed; - const readPreferenceSecondary = - self.s.connectOptions.readPreference && - self.s.connectOptions.readPreference.equals(ReadPreference.secondary); - - return ( - (isConnecting && - ((readPreferenceSecondary && hasSecondary) || (!readPreferenceSecondary && hasPrimary))) || - (hasSecondary && secondaryOnlyConnectionAllowed) - ); -} - -function handleInitialConnectEvent(self, event) { - return function() { - var _this = this; - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f( - 'handleInitialConnectEvent %s from server %s in replset with id %s', - event, - this.name, - self.id - ) - ); - } - - // Destroy the instance - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return this.destroy({ force: true }); - } - - // Check the type of server - if (event === 'connect') { - // Update the state - var result = self.s.replicaSetState.update(_this); - if (result === true) { - // Primary lastIsMaster store it - if (_this.lastIsMaster() && _this.lastIsMaster().ismaster) { - self.ismaster = _this.lastIsMaster(); - } - - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f( - 'handleInitialConnectEvent %s from server %s in replset with id %s has state [%s]', - event, - _this.name, - self.id, - JSON.stringify(self.s.replicaSetState.set) - ) - ); - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _this.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _this.on('error', handleEvent(self, 'error')); - _this.on('close', handleEvent(self, 'close')); - _this.on('timeout', handleEvent(self, 'timeout')); - _this.on('parseError', handleEvent(self, 'parseError')); - - // Do we have a primary or primaryAndSecondary - if (shouldTriggerConnect(self)) { - // We are connected - self.state = CONNECTED; - - // Set initial connect state - self.initialConnectState.connect = true; - // Emit connect event - process.nextTick(function() { - self.emit('connect', self); - }); - - topologyMonitor(self, {}); - } - } else if (result instanceof MongoError) { - _this.destroy({ force: true }); - self.destroy({ force: true }); - return self.emit('error', result); - } else { - _this.destroy({ force: true }); - } - } else { - // Emit failure to connect - self.emit('failed', this); - - addServerToList(self.s.connectingServers, this); - // Remove from the state - self.s.replicaSetState.remove(this); - } - - if ( - self.initialConnectState.connect && - !self.initialConnectState.fullsetup && - self.s.replicaSetState.hasPrimaryAndSecondary() - ) { - // Set initial connect state - self.initialConnectState.fullsetup = true; - self.initialConnectState.all = true; - - process.nextTick(function() { - self.emit('fullsetup', self); - self.emit('all', self); - }); - } - - // Remove from the list from connectingServers - for (var i = 0; i < self.s.connectingServers.length; i++) { - if (self.s.connectingServers[i].equals(this)) { - self.s.connectingServers.splice(i, 1); - } - } - - // Trigger topologyMonitor - if (self.s.connectingServers.length === 0 && self.state === CONNECTING) { - topologyMonitor(self, { haInterval: 1 }); - } - }; -} - -function connectServers(self, servers) { - // Update connectingServers - self.s.connectingServers = self.s.connectingServers.concat(servers); - - // Index used to interleaf the server connects, avoiding - // runtime issues on io constrained vm's - var timeoutInterval = 0; - - function connect(server, timeoutInterval) { - setTimeout(function() { - // Add the server to the state - if (self.s.replicaSetState.update(server)) { - // Primary lastIsMaster store it - if (server.lastIsMaster() && server.lastIsMaster().ismaster) { - self.ismaster = server.lastIsMaster(); - } - } - - // Add event handlers - server.once('close', handleInitialConnectEvent(self, 'close')); - server.once('timeout', handleInitialConnectEvent(self, 'timeout')); - server.once('parseError', handleInitialConnectEvent(self, 'parseError')); - server.once('error', handleInitialConnectEvent(self, 'error')); - server.once('connect', handleInitialConnectEvent(self, 'connect')); - - // SDAM Monitoring events - server.on('serverOpening', e => self.emit('serverOpening', e)); - server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); - server.on('serverClosed', e => self.emit('serverClosed', e)); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Start connection - server.connect(self.s.connectOptions); - }, timeoutInterval); - } - - // Start all the servers - while (servers.length > 0) { - connect(servers.shift(), timeoutInterval++); - } -} - -/** - * Emit event if it exists - * @method - */ -function emitSDAMEvent(self, event, description) { - if (self.listeners(event).length > 0) { - self.emit(event, description); - } -} - -/** - * Initiate server connect - */ -ReplSet.prototype.connect = function(options) { - var self = this; - // Add any connect level options to the internal state - this.s.connectOptions = options || {}; - - // Set connecting state - stateTransition(this, CONNECTING); - - // Create server instances - var servers = this.s.seedlist.map(function(x) { - return new Server( - Object.assign({}, self.s.options, x, options, { - reconnect: false, - monitoring: false, - parent: self - }) - ); - }); - - // Error out as high availbility interval must be < than socketTimeout - if ( - this.s.options.socketTimeout > 0 && - this.s.options.socketTimeout <= this.s.options.haInterval - ) { - return self.emit( - 'error', - new MongoError( - f( - 'haInterval [%s] MS must be set to less than socketTimeout [%s] MS', - this.s.options.haInterval, - this.s.options.socketTimeout - ) - ) - ); - } - - // Emit the topology opening event - emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); - // Start all server connections - connectServers(self, servers); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -ReplSet.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Destroy the server connection - * @param {boolean} [options.force=false] Force destroy the pool - * @method - */ -ReplSet.prototype.destroy = function(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - - let destroyCount = this.s.connectingServers.length + 1; // +1 for the callback from `replicaSetState.destroy` - const serverDestroyed = () => { - destroyCount--; - if (destroyCount > 0) { - return; - } - - // Emit toplogy closing event - emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); - - // Transition state - stateTransition(this, DESTROYED); - - if (typeof callback === 'function') { - callback(null, null); - } - }; - - // Clear out any monitoring process - if (this.haTimeoutId) clearTimeout(this.haTimeoutId); - - // Clear out all monitoring - for (var i = 0; i < this.intervalIds.length; i++) { - this.intervalIds[i].stop(); - } - - // Reset list of intervalIds - this.intervalIds = []; - - if (destroyCount === 0) { - serverDestroyed(); - return; - } - - // Destroy the replicaset - this.s.replicaSetState.destroy(options, serverDestroyed); - - // Destroy all connecting servers - this.s.connectingServers.forEach(function(x) { - x.destroy(options, serverDestroyed); - }); -}; - -/** - * Unref all connections belong to this server - * @method - */ -ReplSet.prototype.unref = function() { - // Transition state - stateTransition(this, UNREFERENCED); - - this.s.replicaSetState.allServers().forEach(function(x) { - x.unref(); - }); - - clearTimeout(this.haTimeoutId); -}; - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -ReplSet.prototype.lastIsMaster = function() { - // If secondaryOnlyConnectionAllowed and no primary but secondary - // return the secondaries ismaster result. - if ( - this.s.options.secondaryOnlyConnectionAllowed && - !this.s.replicaSetState.hasPrimary() && - this.s.replicaSetState.hasSecondary() - ) { - return this.s.replicaSetState.secondaries[0].lastIsMaster(); - } - - return this.s.replicaSetState.primary - ? this.s.replicaSetState.primary.lastIsMaster() - : this.ismaster; -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -ReplSet.prototype.connections = function() { - var servers = this.s.replicaSetState.allServers(); - var connections = []; - for (var i = 0; i < servers.length; i++) { - connections = connections.concat(servers[i].connections()); - } - - return connections; -}; - -/** - * Figure out if the server is connected - * @method - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @return {boolean} - */ -ReplSet.prototype.isConnected = function(options) { - options = options || {}; - - // If we specified a read preference check if we are connected to something - // than can satisfy this - if (options.readPreference && options.readPreference.equals(ReadPreference.secondary)) { - return this.s.replicaSetState.hasSecondary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.primary)) { - return this.s.replicaSetState.hasPrimary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.primaryPreferred)) { - return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.secondaryPreferred)) { - return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); - } - - if (this.s.options.secondaryOnlyConnectionAllowed && this.s.replicaSetState.hasSecondary()) { - return true; - } - - return this.s.replicaSetState.hasPrimary(); -}; - -/** - * Figure out if the replicaset instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -ReplSet.prototype.isDestroyed = function() { - return this.state === DESTROYED; -}; - -const SERVER_SELECTION_TIMEOUT_MS = 10000; // hardcoded `serverSelectionTimeoutMS` for legacy topology -const SERVER_SELECTION_INTERVAL_MS = 1000; // time to wait between selection attempts -/** - * Selects a server - * - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {ClientSession} [options.session] Unused - * @param {function} callback - */ -ReplSet.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') (callback = options), (options = selector); - options = options || {}; - - let readPreference; - if (selector instanceof ReadPreference) { - readPreference = selector; - } else { - readPreference = options.readPreference || ReadPreference.primary; - } - - let lastError; - const start = process.hrtime(); - const _selectServer = () => { - if (calculateDurationInMs(start) >= SERVER_SELECTION_TIMEOUT_MS) { - if (lastError != null) { - callback(lastError, null); - } else { - callback(new MongoError('Server selection timed out')); - } - - return; - } - - const server = this.s.replicaSetState.pickServer(readPreference); - if (server == null) { - setTimeout(_selectServer, SERVER_SELECTION_INTERVAL_MS); - return; - } - - if (!(server instanceof Server)) { - lastError = server; - setTimeout(_selectServer, SERVER_SELECTION_INTERVAL_MS); - return; - } - - if (this.s.debug) this.emit('pickedServer', options.readPreference, server); - callback(null, server); - }; - - _selectServer(); -}; - -/** - * Get all connected servers - * @method - * @return {Server[]} - */ -ReplSet.prototype.getServers = function() { - return this.s.replicaSetState.allServers(); -}; - -// -// Execute write operation -function executeWriteOperation(args, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - // TODO: once we drop Node 4, use destructuring either here or in arguments. - const self = args.self; - const op = args.op; - const ns = args.ns; - const ops = args.ops; - - if (self.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - const willRetryWrite = - !args.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction(); - - if (!self.s.replicaSetState.hasPrimary()) { - if (self.s.disconnectHandler) { - // Not connected but we have a disconnecthandler - return self.s.disconnectHandler.add(op, ns, ops, options, callback); - } else if (!willRetryWrite) { - // No server returned we had an error - return callback(new MongoError('no primary server found')); - } - } - - const handler = (err, result) => { - if (!err) return callback(null, result); - if (!isRetryableError(err)) { - err = getMMAPError(err); - return callback(err); - } - - if (willRetryWrite) { - const newArgs = Object.assign({}, args, { retrying: true }); - return executeWriteOperation(newArgs, options, callback); - } - - // Per SDAM, remove primary from replicaset - if (self.s.replicaSetState.primary) { - self.s.replicaSetState.primary.destroy(); - self.s.replicaSetState.remove(self.s.replicaSetState.primary, { force: true }); - } - - return callback(err); - }; - - if (callback.operationId) { - handler.operationId = callback.operationId; - } - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - self.s.replicaSetState.primary[op](ns, ops, options, handler); -} - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.insert = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.update = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.remove = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); -}; - -const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; - -function isWriteCommand(command) { - return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Connection} [options.connection] Specify connection object to execute command against - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.command = function(ns, cmd, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); - var self = this; - - // Establish readPreference - var readPreference = options.readPreference ? options.readPreference : ReadPreference.primary; - - // If the readPreference is primary and we have no primary, store it - if ( - readPreference.preference === 'primary' && - !this.s.replicaSetState.hasPrimary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } else if ( - readPreference.preference === 'secondary' && - !this.s.replicaSetState.hasSecondary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } else if ( - readPreference.preference !== 'primary' && - !this.s.replicaSetState.hasSecondary() && - !this.s.replicaSetState.hasPrimary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } - - // Pick a server - var server = this.s.replicaSetState.pickServer(readPreference); - // We received an error, return it - if (!(server instanceof Server)) return callback(server); - // Emit debug event - if (self.s.debug) self.emit('pickedServer', ReadPreference.primary, server); - - // No server returned we had an error - if (server == null) { - return callback( - new MongoError( - f('no server found that matches the provided readPreference %s', readPreference) - ) - ); - } - - const willRetryWrite = - !options.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction() && - isWriteCommand(cmd); - - const cb = (err, result) => { - if (!err) return callback(null, result); - if (!isRetryableError(err)) { - return callback(err); - } - - if (willRetryWrite) { - const newOptions = Object.assign({}, options, { retrying: true }); - return this.command(ns, cmd, newOptions, callback); - } - - // Per SDAM, remove primary from replicaset - if (this.s.replicaSetState.primary) { - this.s.replicaSetState.primary.destroy(); - this.s.replicaSetState.remove(this.s.replicaSetState.primary, { force: true }); - } - - return callback(err); - }; - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - // Execute the command - server.command(ns, cmd, options, cb); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -ReplSet.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * A replset connect event, used to verify that the connection is up and running - * - * @event ReplSet#connect - * @type {ReplSet} - */ - -/** - * A replset reconnect event, used to verify that the topology reconnected - * - * @event ReplSet#reconnect - * @type {ReplSet} - */ - -/** - * A replset fullsetup event, used to signal that all topology members have been contacted. - * - * @event ReplSet#fullsetup - * @type {ReplSet} - */ - -/** - * A replset all event, used to signal that all topology members have been contacted. - * - * @event ReplSet#all - * @type {ReplSet} - */ - -/** - * A replset failed event, used to signal that initial replset connection failed. - * - * @event ReplSet#failed - * @type {ReplSet} - */ - -/** - * A server member left the replicaset - * - * @event ReplSet#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the replicaset - * - * @event ReplSet#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * A server opening SDAM monitoring event - * - * @event ReplSet#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event ReplSet#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event ReplSet#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event ReplSet#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event ReplSet#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event ReplSet#topologyDescriptionChanged - * @type {object} - */ - -/** - * A topology serverHeartbeatStarted SDAM event - * - * @event ReplSet#serverHeartbeatStarted - * @type {object} - */ - -/** - * A topology serverHeartbeatFailed SDAM event - * - * @event ReplSet#serverHeartbeatFailed - * @type {object} - */ - -/** - * A topology serverHeartbeatSucceeded SDAM change event - * - * @event ReplSet#serverHeartbeatSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event ReplSet#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event ReplSet#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event ReplSet#commandFailed - * @type {object} - */ - -module.exports = ReplSet; diff --git a/lib/core/topologies/replset_state.js b/lib/core/topologies/replset_state.js deleted file mode 100644 index 24c16d6d71e..00000000000 --- a/lib/core/topologies/replset_state.js +++ /dev/null @@ -1,1121 +0,0 @@ -'use strict'; - -var inherits = require('util').inherits, - f = require('util').format, - diff = require('./shared').diff, - EventEmitter = require('events').EventEmitter, - Logger = require('../connection/logger'), - ReadPreference = require('./read_preference'), - MongoError = require('../error').MongoError, - Buffer = require('safe-buffer').Buffer; - -var TopologyType = { - Single: 'Single', - ReplicaSetNoPrimary: 'ReplicaSetNoPrimary', - ReplicaSetWithPrimary: 'ReplicaSetWithPrimary', - Sharded: 'Sharded', - Unknown: 'Unknown' -}; - -var ServerType = { - Standalone: 'Standalone', - Mongos: 'Mongos', - PossiblePrimary: 'PossiblePrimary', - RSPrimary: 'RSPrimary', - RSSecondary: 'RSSecondary', - RSArbiter: 'RSArbiter', - RSOther: 'RSOther', - RSGhost: 'RSGhost', - Unknown: 'Unknown' -}; - -var ReplSetState = function(options) { - options = options || {}; - // Add event listener - EventEmitter.call(this); - // Topology state - this.topologyType = TopologyType.ReplicaSetNoPrimary; - this.setName = options.setName; - - // Server set - this.set = {}; - - // Unpacked options - this.id = options.id; - this.setName = options.setName; - - // Replicaset logger - this.logger = options.logger || Logger('ReplSet', options); - - // Server selection index - this.index = 0; - // Acceptable latency - this.acceptableLatency = options.acceptableLatency || 15; - - // heartbeatFrequencyMS - this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; - - // Server side - this.primary = null; - this.secondaries = []; - this.arbiters = []; - this.passives = []; - this.ghosts = []; - // Current unknown hosts - this.unknownServers = []; - // In set status - this.set = {}; - // Status - this.maxElectionId = null; - this.maxSetVersion = 0; - // Description of the Replicaset - this.replicasetDescription = { - topologyType: 'Unknown', - servers: [] - }; - - this.logicalSessionTimeoutMinutes = undefined; -}; - -inherits(ReplSetState, EventEmitter); - -ReplSetState.prototype.hasPrimaryAndSecondary = function() { - return this.primary != null && this.secondaries.length > 0; -}; - -ReplSetState.prototype.hasPrimaryOrSecondary = function() { - return this.hasPrimary() || this.hasSecondary(); -}; - -ReplSetState.prototype.hasPrimary = function() { - return this.primary != null; -}; - -ReplSetState.prototype.hasSecondary = function() { - return this.secondaries.length > 0; -}; - -ReplSetState.prototype.get = function(host) { - var servers = this.allServers(); - - for (var i = 0; i < servers.length; i++) { - if (servers[i].name.toLowerCase() === host.toLowerCase()) { - return servers[i]; - } - } - - return null; -}; - -ReplSetState.prototype.allServers = function(options) { - options = options || {}; - var servers = this.primary ? [this.primary] : []; - servers = servers.concat(this.secondaries); - if (!options.ignoreArbiters) servers = servers.concat(this.arbiters); - servers = servers.concat(this.passives); - return servers; -}; - -ReplSetState.prototype.destroy = function(options, callback) { - const serversToDestroy = this.secondaries - .concat(this.arbiters) - .concat(this.passives) - .concat(this.ghosts); - if (this.primary) serversToDestroy.push(this.primary); - - let serverCount = serversToDestroy.length; - const serverDestroyed = () => { - serverCount--; - if (serverCount > 0) { - return; - } - - // Clear out the complete state - this.secondaries = []; - this.arbiters = []; - this.passives = []; - this.ghosts = []; - this.unknownServers = []; - this.set = {}; - this.primary = null; - - // Emit the topology changed - emitTopologyDescriptionChanged(this); - - if (typeof callback === 'function') { - callback(null, null); - } - }; - - if (serverCount === 0) { - serverDestroyed(); - return; - } - - serversToDestroy.forEach(server => server.destroy(options, serverDestroyed)); -}; - -ReplSetState.prototype.remove = function(server, options) { - options = options || {}; - - // Get the server name and lowerCase it - var serverName = server.name.toLowerCase(); - - // Only remove if the current server is not connected - var servers = this.primary ? [this.primary] : []; - servers = servers.concat(this.secondaries); - servers = servers.concat(this.arbiters); - servers = servers.concat(this.passives); - - // Check if it's active and this is just a failed connection attempt - for (var i = 0; i < servers.length; i++) { - if ( - !options.force && - servers[i].equals(server) && - servers[i].isConnected && - servers[i].isConnected() - ) { - return; - } - } - - // If we have it in the set remove it - if (this.set[serverName]) { - this.set[serverName].type = ServerType.Unknown; - this.set[serverName].electionId = null; - this.set[serverName].setName = null; - this.set[serverName].setVersion = null; - } - - // Remove type - var removeType = null; - - // Remove from any lists - if (this.primary && this.primary.equals(server)) { - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - removeType = 'primary'; - } - - // Remove from any other server lists - removeType = removeFrom(server, this.secondaries) ? 'secondary' : removeType; - removeType = removeFrom(server, this.arbiters) ? 'arbiter' : removeType; - removeType = removeFrom(server, this.passives) ? 'secondary' : removeType; - removeFrom(server, this.ghosts); - removeFrom(server, this.unknownServers); - - // Push to unknownServers - this.unknownServers.push(serverName); - - // Do we have a removeType - if (removeType) { - this.emit('left', removeType, server); - } -}; - -const isArbiter = ismaster => ismaster.arbiterOnly && ismaster.setName; - -ReplSetState.prototype.update = function(server) { - var self = this; - // Get the current ismaster - var ismaster = server.lastIsMaster(); - - // Get the server name and lowerCase it - var serverName = server.name.toLowerCase(); - - // - // Add any hosts - // - if (ismaster) { - // Join all the possible new hosts - var hosts = Array.isArray(ismaster.hosts) ? ismaster.hosts : []; - hosts = hosts.concat(Array.isArray(ismaster.arbiters) ? ismaster.arbiters : []); - hosts = hosts.concat(Array.isArray(ismaster.passives) ? ismaster.passives : []); - hosts = hosts.map(function(s) { - return s.toLowerCase(); - }); - - // Add all hosts as unknownServers - for (var i = 0; i < hosts.length; i++) { - // Add to the list of unknown server - if ( - this.unknownServers.indexOf(hosts[i]) === -1 && - (!this.set[hosts[i]] || this.set[hosts[i]].type === ServerType.Unknown) - ) { - this.unknownServers.push(hosts[i].toLowerCase()); - } - - if (!this.set[hosts[i]]) { - this.set[hosts[i]] = { - type: ServerType.Unknown, - electionId: null, - setName: null, - setVersion: null - }; - } - } - } - - // - // Unknown server - // - if (!ismaster && !inList(ismaster, server, this.unknownServers)) { - self.set[serverName] = { - type: ServerType.Unknown, - setVersion: null, - electionId: null, - setName: null - }; - // Update set information about the server instance - self.set[serverName].type = ServerType.Unknown; - self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; - self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; - self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; - - if (self.unknownServers.indexOf(server.name) === -1) { - self.unknownServers.push(serverName); - } - - // Set the topology - return false; - } - - // Update logicalSessionTimeoutMinutes - if (ismaster.logicalSessionTimeoutMinutes !== undefined && !isArbiter(ismaster)) { - if ( - self.logicalSessionTimeoutMinutes === undefined || - ismaster.logicalSessionTimeoutMinutes === null - ) { - self.logicalSessionTimeoutMinutes = ismaster.logicalSessionTimeoutMinutes; - } else { - self.logicalSessionTimeoutMinutes = Math.min( - self.logicalSessionTimeoutMinutes, - ismaster.logicalSessionTimeoutMinutes - ); - } - } - - // - // Is this a mongos - // - if (ismaster && ismaster.msg === 'isdbgrid') { - if (this.primary && this.primary.name === serverName) { - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - return false; - } - - // A RSGhost instance - if (ismaster.isreplicaset) { - self.set[serverName] = { - type: ServerType.RSGhost, - setVersion: null, - electionId: null, - setName: ismaster.setName - }; - - if (this.primary && this.primary.name === serverName) { - this.primary = null; - } - - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - - // Set the topology - return false; - } - - // A RSOther instance - if ( - (ismaster.setName && ismaster.hidden) || - (ismaster.setName && - !ismaster.ismaster && - !ismaster.secondary && - !ismaster.arbiterOnly && - !ismaster.passive) - ) { - self.set[serverName] = { - type: ServerType.RSOther, - setVersion: null, - electionId: null, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - return false; - } - - // - // Standalone server, destroy and return - // - if (ismaster && ismaster.ismaster && !ismaster.setName) { - this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.Unknown; - this.remove(server, { force: true }); - return false; - } - - // - // Server in maintanance mode - // - if (ismaster && !ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly) { - this.remove(server, { force: true }); - return false; - } - - // - // If the .me field does not match the passed in server - // - if (ismaster.me && ismaster.me.toLowerCase() !== serverName) { - if (this.logger.isWarn()) { - this.logger.warn( - f( - 'the seedlist server was removed due to its address %s not matching its ismaster.me address %s', - server.name, - ismaster.me - ) - ); - } - - // Delete from the set - delete this.set[serverName]; - // Delete unknown servers - removeFrom(server, self.unknownServers); - - // Destroy the instance - server.destroy({ force: true }); - - // Set the type of topology we have - if (this.primary && !this.primary.equals(server)) { - this.topologyType = TopologyType.ReplicaSetWithPrimary; - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - // - // We have a potential primary - // - if (!this.primary && ismaster.primary) { - this.set[ismaster.primary.toLowerCase()] = { - type: ServerType.PossiblePrimary, - setName: null, - electionId: null, - setVersion: null - }; - } - - return false; - } - - // - // Primary handling - // - if (!this.primary && ismaster.ismaster && ismaster.setName) { - var ismasterElectionId = server.lastIsMaster().electionId; - if (this.setName && this.setName !== ismaster.setName) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return new MongoError( - f( - 'setName from ismaster does not match provided connection setName [%s] != [%s]', - ismaster.setName, - this.setName - ) - ); - } - - if (!this.maxElectionId && ismasterElectionId) { - this.maxElectionId = ismasterElectionId; - } else if (this.maxElectionId && ismasterElectionId) { - var result = compareObjectIds(this.maxElectionId, ismasterElectionId); - // Get the electionIds - var ismasterSetVersion = server.lastIsMaster().setVersion; - - if (result === 1) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } else if (result === 0 && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } - } - - this.maxSetVersion = ismasterSetVersion; - this.maxElectionId = ismasterElectionId; - } - - // Hande normalization of server names - var normalizedHosts = ismaster.hosts.map(function(x) { - return x.toLowerCase(); - }); - var locationIndex = normalizedHosts.indexOf(serverName); - - // Validate that the server exists in the host list - if (locationIndex !== -1) { - self.primary = server; - self.set[serverName] = { - type: ServerType.RSPrimary, - setVersion: ismaster.setVersion, - electionId: ismaster.electionId, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = TopologyType.ReplicaSetWithPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - removeFrom(server, self.secondaries); - removeFrom(server, self.passives); - self.emit('joined', 'primary', server); - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - emitTopologyDescriptionChanged(self); - return true; - } else if (ismaster.ismaster && ismaster.setName) { - // Get the electionIds - var currentElectionId = self.set[self.primary.name.toLowerCase()].electionId; - var currentSetVersion = self.set[self.primary.name.toLowerCase()].setVersion; - var currentSetName = self.set[self.primary.name.toLowerCase()].setName; - ismasterElectionId = server.lastIsMaster().electionId; - ismasterSetVersion = server.lastIsMaster().setVersion; - var ismasterSetName = server.lastIsMaster().setName; - - // Is it the same server instance - if (this.primary.equals(server) && currentSetName === ismasterSetName) { - return false; - } - - // If we do not have the same rs name - if (currentSetName && currentSetName !== ismasterSetName) { - if (!this.primary.equals(server)) { - this.topologyType = TopologyType.ReplicaSetWithPrimary; - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - return false; - } - - // Check if we need to replace the server - if (currentElectionId && ismasterElectionId) { - result = compareObjectIds(currentElectionId, ismasterElectionId); - - if (result === 1) { - return false; - } else if (result === 0 && currentSetVersion > ismasterSetVersion) { - return false; - } - } else if (!currentElectionId && ismasterElectionId && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } - - if (!this.maxElectionId && ismasterElectionId) { - this.maxElectionId = ismasterElectionId; - } else if (this.maxElectionId && ismasterElectionId) { - result = compareObjectIds(this.maxElectionId, ismasterElectionId); - - if (result === 1) { - return false; - } else if (result === 0 && currentSetVersion && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } else { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } - - this.maxElectionId = ismasterElectionId; - this.maxSetVersion = ismasterSetVersion; - } else { - this.maxSetVersion = ismasterSetVersion; - } - - // Modify the entry to unknown - self.set[self.primary.name.toLowerCase()] = { - type: ServerType.Unknown, - setVersion: null, - electionId: null, - setName: null - }; - - // Signal primary left - self.emit('left', 'primary', this.primary); - // Destroy the instance - self.primary.destroy({ force: true }); - // Set the new instance - self.primary = server; - // Set the set information - self.set[serverName] = { - type: ServerType.RSPrimary, - setVersion: ismaster.setVersion, - electionId: ismaster.electionId, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = TopologyType.ReplicaSetWithPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - removeFrom(server, self.secondaries); - removeFrom(server, self.passives); - self.emit('joined', 'primary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // A possible instance - if (!this.primary && ismaster.primary) { - self.set[ismaster.primary.toLowerCase()] = { - type: ServerType.PossiblePrimary, - setVersion: null, - electionId: null, - setName: null - }; - } - - // - // Secondary handling - // - if ( - ismaster.secondary && - ismaster.setName && - !inList(ismaster, server, this.secondaries) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSSecondary, ismaster, server, this.secondaries); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - - // Remove primary - if (this.primary && this.primary.name.toLowerCase() === serverName) { - server.destroy({ force: true }); - this.primary = null; - self.emit('left', 'primary', server); - } - - // Emit secondary joined replicaset - self.emit('joined', 'secondary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Arbiter handling - // - if ( - isArbiter(ismaster) && - !inList(ismaster, server, this.arbiters) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSArbiter, ismaster, server, this.arbiters); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - self.emit('joined', 'arbiter', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Passive handling - // - if ( - ismaster.passive && - ismaster.setName && - !inList(ismaster, server, this.passives) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSSecondary, ismaster, server, this.passives); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - - // Remove primary - if (this.primary && this.primary.name.toLowerCase() === serverName) { - server.destroy({ force: true }); - this.primary = null; - self.emit('left', 'primary', server); - } - - self.emit('joined', 'secondary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Remove the primary - // - if (this.set[serverName] && this.set[serverName].type === ServerType.RSPrimary) { - self.emit('left', 'primary', this.primary); - this.primary.destroy({ force: true }); - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } - - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - return false; -}; - -/** - * Recalculate single server max staleness - * @method - */ -ReplSetState.prototype.updateServerMaxStaleness = function(server, haInterval) { - // Locate the max secondary lastwrite - var max = 0; - // Go over all secondaries - for (var i = 0; i < this.secondaries.length; i++) { - max = Math.max(max, this.secondaries[i].lastWriteDate); - } - - // Perform this servers staleness calculation - if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary && this.hasPrimary()) { - server.staleness = - server.lastUpdateTime - - server.lastWriteDate - - (this.primary.lastUpdateTime - this.primary.lastWriteDate) + - haInterval; - } else if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary) { - server.staleness = max - server.lastWriteDate + haInterval; - } -}; - -/** - * Recalculate all the staleness values for secodaries - * @method - */ -ReplSetState.prototype.updateSecondariesMaxStaleness = function(haInterval) { - for (var i = 0; i < this.secondaries.length; i++) { - this.updateServerMaxStaleness(this.secondaries[i], haInterval); - } -}; - -/** - * Pick a server by the passed in ReadPreference - * @method - * @param {ReadPreference} readPreference The ReadPreference instance to use - */ -ReplSetState.prototype.pickServer = function(readPreference) { - // If no read Preference set to primary by default - readPreference = readPreference || ReadPreference.primary; - - // maxStalenessSeconds is not allowed with a primary read - if (readPreference.preference === 'primary' && readPreference.maxStalenessSeconds != null) { - return new MongoError('primary readPreference incompatible with maxStalenessSeconds'); - } - - // Check if we have any non compatible servers for maxStalenessSeconds - var allservers = this.primary ? [this.primary] : []; - allservers = allservers.concat(this.secondaries); - - // Does any of the servers not support the right wire protocol version - // for maxStalenessSeconds when maxStalenessSeconds specified on readPreference. Then error out - if (readPreference.maxStalenessSeconds != null) { - for (var i = 0; i < allservers.length; i++) { - if (allservers[i].ismaster.maxWireVersion < 5) { - return new MongoError( - 'maxStalenessSeconds not supported by at least one of the replicaset members' - ); - } - } - } - - // Do we have the nearest readPreference - if (readPreference.preference === 'nearest' && readPreference.maxStalenessSeconds == null) { - return pickNearest(this, readPreference); - } else if ( - readPreference.preference === 'nearest' && - readPreference.maxStalenessSeconds != null - ) { - return pickNearestMaxStalenessSeconds(this, readPreference); - } - - // Get all the secondaries - var secondaries = this.secondaries; - - // Check if we can satisfy and of the basic read Preferences - if (readPreference.equals(ReadPreference.secondary) && secondaries.length === 0) { - return new MongoError('no secondary server available'); - } - - if ( - readPreference.equals(ReadPreference.secondaryPreferred) && - secondaries.length === 0 && - this.primary == null - ) { - return new MongoError('no secondary or primary server available'); - } - - if (readPreference.equals(ReadPreference.primary) && this.primary == null) { - return new MongoError('no primary server available'); - } - - // Secondary preferred or just secondaries - if ( - readPreference.equals(ReadPreference.secondaryPreferred) || - readPreference.equals(ReadPreference.secondary) - ) { - if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { - // Pick nearest of any other servers available - var server = pickNearest(this, readPreference); - // No server in the window return primary - if (server) { - return server; - } - } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { - // Pick nearest of any other servers available - server = pickNearestMaxStalenessSeconds(this, readPreference); - // No server in the window return primary - if (server) { - return server; - } - } - - if (readPreference.equals(ReadPreference.secondaryPreferred)) { - return this.primary; - } - - return null; - } - - // Primary preferred - if (readPreference.equals(ReadPreference.primaryPreferred)) { - server = null; - - // We prefer the primary if it's available - if (this.primary) { - return this.primary; - } - - // Pick a secondary - if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { - server = pickNearest(this, readPreference); - } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { - server = pickNearestMaxStalenessSeconds(this, readPreference); - } - - // Did we find a server - if (server) return server; - } - - // Return the primary - return this.primary; -}; - -// -// Filter serves by tags -var filterByTags = function(readPreference, servers) { - if (readPreference.tags == null) return servers; - var filteredServers = []; - var tagsArray = Array.isArray(readPreference.tags) ? readPreference.tags : [readPreference.tags]; - - // Iterate over the tags - for (var j = 0; j < tagsArray.length; j++) { - var tags = tagsArray[j]; - - // Iterate over all the servers - for (var i = 0; i < servers.length; i++) { - var serverTag = servers[i].lastIsMaster().tags || {}; - - // Did we find the a matching server - var found = true; - // Check if the server is valid - for (var name in tags) { - if (serverTag[name] !== tags[name]) { - found = false; - } - } - - // Add to candidate list - if (found) { - filteredServers.push(servers[i]); - } - } - } - - // Returned filtered servers - return filteredServers; -}; - -function pickNearestMaxStalenessSeconds(self, readPreference) { - // Only get primary and secondaries as seeds - var servers = []; - - // Get the maxStalenessMS - var maxStalenessMS = readPreference.maxStalenessSeconds * 1000; - - // Check if the maxStalenessMS > 90 seconds - if (maxStalenessMS < 90 * 1000) { - return new MongoError('maxStalenessSeconds must be set to at least 90 seconds'); - } - - // Add primary to list if not a secondary read preference - if ( - self.primary && - readPreference.preference !== 'secondary' && - readPreference.preference !== 'secondaryPreferred' - ) { - servers.push(self.primary); - } - - // Add all the secondaries - for (var i = 0; i < self.secondaries.length; i++) { - servers.push(self.secondaries[i]); - } - - // If we have a secondaryPreferred readPreference and no server add the primary - if (self.primary && servers.length === 0 && readPreference.preference !== 'secondaryPreferred') { - servers.push(self.primary); - } - - // Filter by tags - servers = filterByTags(readPreference, servers); - - // Filter by latency - servers = servers.filter(function(s) { - return s.staleness <= maxStalenessMS; - }); - - // Sort by time - servers.sort(function(a, b) { - return a.lastIsMasterMS - b.lastIsMasterMS; - }); - - // No servers, default to primary - if (servers.length === 0) { - return null; - } - - // Ensure index does not overflow the number of available servers - self.index = self.index % servers.length; - - // Get the server - var server = servers[self.index]; - // Add to the index - self.index = self.index + 1; - // Return the first server of the sorted and filtered list - return server; -} - -function pickNearest(self, readPreference) { - // Only get primary and secondaries as seeds - var servers = []; - - // Add primary to list if not a secondary read preference - if ( - self.primary && - readPreference.preference !== 'secondary' && - readPreference.preference !== 'secondaryPreferred' - ) { - servers.push(self.primary); - } - - // Add all the secondaries - for (var i = 0; i < self.secondaries.length; i++) { - servers.push(self.secondaries[i]); - } - - // If we have a secondaryPreferred readPreference and no server add the primary - if (servers.length === 0 && self.primary && readPreference.preference !== 'secondaryPreferred') { - servers.push(self.primary); - } - - // Filter by tags - servers = filterByTags(readPreference, servers); - - // Sort by time - servers.sort(function(a, b) { - return a.lastIsMasterMS - b.lastIsMasterMS; - }); - - // Locate lowest time (picked servers are lowest time + acceptable Latency margin) - var lowest = servers.length > 0 ? servers[0].lastIsMasterMS : 0; - - // Filter by latency - servers = servers.filter(function(s) { - return s.lastIsMasterMS <= lowest + self.acceptableLatency; - }); - - // No servers, default to primary - if (servers.length === 0) { - return null; - } - - // Ensure index does not overflow the number of available servers - self.index = self.index % servers.length; - // Get the server - var server = servers[self.index]; - // Add to the index - self.index = self.index + 1; - // Return the first server of the sorted and filtered list - return server; -} - -function inList(ismaster, server, list) { - for (var i = 0; i < list.length; i++) { - if (list[i] && list[i].name && list[i].name.toLowerCase() === server.name.toLowerCase()) - return true; - } - - return false; -} - -function addToList(self, type, ismaster, server, list) { - var serverName = server.name.toLowerCase(); - // Update set information about the server instance - self.set[serverName].type = type; - self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; - self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; - self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; - // Add to the list - list.push(server); -} - -function compareObjectIds(id1, id2) { - var a = Buffer.from(id1.toHexString(), 'hex'); - var b = Buffer.from(id2.toHexString(), 'hex'); - - if (a === b) { - return 0; - } - - if (typeof Buffer.compare === 'function') { - return Buffer.compare(a, b); - } - - var x = a.length; - var y = b.length; - var len = Math.min(x, y); - - for (var i = 0; i < len; i++) { - if (a[i] !== b[i]) { - break; - } - } - - if (i !== len) { - x = a[i]; - y = b[i]; - } - - return x < y ? -1 : y < x ? 1 : 0; -} - -function removeFrom(server, list) { - for (var i = 0; i < list.length; i++) { - if (list[i].equals && list[i].equals(server)) { - list.splice(i, 1); - return true; - } else if (typeof list[i] === 'string' && list[i].toLowerCase() === server.name.toLowerCase()) { - list.splice(i, 1); - return true; - } - } - - return false; -} - -function emitTopologyDescriptionChanged(self) { - if (self.listeners('topologyDescriptionChanged').length > 0) { - var topology = 'Unknown'; - var setName = self.setName; - - if (self.hasPrimaryAndSecondary()) { - topology = 'ReplicaSetWithPrimary'; - } else if (!self.hasPrimary() && self.hasSecondary()) { - topology = 'ReplicaSetNoPrimary'; - } - - // Generate description - var description = { - topologyType: topology, - setName: setName, - servers: [] - }; - - // Add the primary to the list - if (self.hasPrimary()) { - var desc = self.primary.getDescription(); - desc.type = 'RSPrimary'; - description.servers.push(desc); - } - - // Add all the secondaries - description.servers = description.servers.concat( - self.secondaries.map(function(x) { - var description = x.getDescription(); - description.type = 'RSSecondary'; - return description; - }) - ); - - // Add all the arbiters - description.servers = description.servers.concat( - self.arbiters.map(function(x) { - var description = x.getDescription(); - description.type = 'RSArbiter'; - return description; - }) - ); - - // Add all the passives - description.servers = description.servers.concat( - self.passives.map(function(x) { - var description = x.getDescription(); - description.type = 'RSSecondary'; - return description; - }) - ); - - // Get the diff - var diffResult = diff(self.replicasetDescription, description); - - // Create the result - var result = { - topologyId: self.id, - previousDescription: self.replicasetDescription, - newDescription: description, - diff: diffResult - }; - - // Emit the topologyDescription change - // if(diffResult.servers.length > 0) { - self.emit('topologyDescriptionChanged', result); - // } - - // Set the new description - self.replicasetDescription = description; - } -} - -module.exports = ReplSetState; diff --git a/lib/core/topologies/server.js b/lib/core/topologies/server.js deleted file mode 100644 index 6f6de12eaa7..00000000000 --- a/lib/core/topologies/server.js +++ /dev/null @@ -1,990 +0,0 @@ -'use strict'; - -var inherits = require('util').inherits, - f = require('util').format, - EventEmitter = require('events').EventEmitter, - ReadPreference = require('./read_preference'), - Logger = require('../connection/logger'), - debugOptions = require('../connection/utils').debugOptions, - retrieveBSON = require('../connection/utils').retrieveBSON, - Pool = require('../connection/pool'), - MongoError = require('../error').MongoError, - MongoNetworkError = require('../error').MongoNetworkError, - wireProtocol = require('../wireprotocol'), - CoreCursor = require('../cursor').CoreCursor, - sdam = require('./shared'), - createCompressionInfo = require('./shared').createCompressionInfo, - resolveClusterTime = require('./shared').resolveClusterTime, - SessionMixins = require('./shared').SessionMixins, - relayEvents = require('../utils').relayEvents; - -const collationNotSupported = require('../utils').collationNotSupported; -const makeClientMetadata = require('../utils').makeClientMetadata; - -// Used for filtering out fields for loggin -var debugFields = [ - 'reconnect', - 'reconnectTries', - 'reconnectInterval', - 'emitError', - 'cursorFactory', - 'host', - 'port', - 'size', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectionTimeout', - 'checkServerIdentity', - 'socketTimeout', - 'ssl', - 'ca', - 'crl', - 'cert', - 'key', - 'rejectUnauthorized', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'servername' -]; - -// Server instance id -var id = 0; -var serverAccounting = false; -var servers = {}; -var BSON = retrieveBSON(); - -function topologyId(server) { - return server.s.parent == null ? server.id : server.s.parent.id; -} - -/** - * Creates a new Server instance - * @class - * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection - * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times - * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries - * @param {number} [options.monitoring=true] Enable the server state monitoring (calling ismaster at monitoringInterval) - * @param {number} [options.monitoringInterval=5000] The interval of calling ismaster when monitoring is enabled. - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {string} options.host The server host - * @param {number} options.port The server port - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {string} [options.appname=null] Application name, passed in on ismaster call and logged in mongod server logs. Maximum size 128 bytes. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {Server} A cursor instance - * @fires Server#connect - * @fires Server#close - * @fires Server#error - * @fires Server#timeout - * @fires Server#parseError - * @fires Server#reconnect - * @fires Server#reconnectFailed - * @fires Server#serverHeartbeatStarted - * @fires Server#serverHeartbeatSucceeded - * @fires Server#serverHeartbeatFailed - * @fires Server#topologyOpening - * @fires Server#topologyClosed - * @fires Server#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var Server = function(options) { - options = options || {}; - - // Add event listener - EventEmitter.call(this); - - // Server instance id - this.id = id++; - - // Internal state - this.s = { - // Options - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // Logger - logger: Logger('Server', options), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Pool - pool: null, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Monitor thread (keeps the connection alive) - monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : true, - // Is the server in a topology - inTopology: !!options.parent, - // Monitoring timeout - monitoringInterval: - typeof options.monitoringInterval === 'number' ? options.monitoringInterval : 5000, - compression: { compressors: createCompressionInfo(options) }, - // Optional parent topology - parent: options.parent - }; - - // If this is a single deployment we need to track the clusterTime here - if (!this.s.parent) { - this.s.clusterTime = null; - } - - // Curent ismaster - this.ismaster = null; - // Current ping time - this.lastIsMasterMS = -1; - // The monitoringProcessId - this.monitoringProcessId = null; - // Initial connection - this.initialConnect = true; - // Default type - this._type = 'server'; - - // Max Stalleness values - // last time we updated the ismaster state - this.lastUpdateTime = 0; - // Last write time - this.lastWriteDate = 0; - // Stalleness - this.staleness = 0; -}; - -inherits(Server, EventEmitter); -Object.assign(Server.prototype, SessionMixins); - -Object.defineProperty(Server.prototype, 'type', { - enumerable: true, - get: function() { - return this._type; - } -}); - -Object.defineProperty(Server.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - if (!this.ismaster) return null; - return this.ismaster.logicalSessionTimeoutMinutes || null; - } -}); - -Object.defineProperty(Server.prototype, 'clientMetadata', { - enumerable: true, - get: function() { - return this.s.options.metadata; - } -}); - -// In single server deployments we track the clusterTime directly on the topology, however -// in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the -// tracking objects so we can ensure we are gossiping the maximum time received from the -// server. -Object.defineProperty(Server.prototype, 'clusterTime', { - enumerable: true, - set: function(clusterTime) { - const settings = this.s.parent ? this.s.parent : this.s; - resolveClusterTime(settings, clusterTime); - }, - get: function() { - const settings = this.s.parent ? this.s.parent : this.s; - return settings.clusterTime || null; - } -}); - -Server.enableServerAccounting = function() { - serverAccounting = true; - servers = {}; -}; - -Server.disableServerAccounting = function() { - serverAccounting = false; -}; - -Server.servers = function() { - return servers; -}; - -Object.defineProperty(Server.prototype, 'name', { - enumerable: true, - get: function() { - return this.s.options.host + ':' + this.s.options.port; - } -}); - -function disconnectHandler(self, type, ns, cmd, options, callback) { - // Topology is not connected, save the call in the provided store to be - // Executed at some point when the handler deems it's reconnected - if ( - !self.s.pool.isConnected() && - self.s.options.reconnect && - self.s.disconnectHandler != null && - !options.monitoring - ) { - self.s.disconnectHandler.add(type, ns, cmd, options, callback); - return true; - } - - // If we have no connection error - if (!self.s.pool.isConnected()) { - callback(new MongoError(f('no connection available to server %s', self.name))); - return true; - } -} - -function monitoringProcess(self) { - return function() { - // Pool was destroyed do not continue process - if (self.s.pool.isDestroyed()) return; - // Emit monitoring Process event - self.emit('monitoring', self); - // Perform ismaster call - // Get start time - var start = new Date().getTime(); - - // Execute the ismaster query - self.command( - 'admin.$cmd', - { ismaster: true }, - { - socketTimeout: - typeof self.s.options.connectionTimeout !== 'number' - ? 2000 - : self.s.options.connectionTimeout, - monitoring: true - }, - (err, result) => { - // Set initial lastIsMasterMS - self.lastIsMasterMS = new Date().getTime() - start; - if (self.s.pool.isDestroyed()) return; - // Update the ismaster view if we have a result - if (result) { - self.ismaster = result.result; - } - // Re-schedule the monitoring process - self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); - } - ); - }; -} - -var eventHandler = function(self, event) { - return function(err, conn) { - // Log information of received information if in info mode - if (self.s.logger.isInfo()) { - var object = err instanceof MongoError ? JSON.stringify(err) : {}; - self.s.logger.info( - f('server %s fired event %s out with message %s', self.name, event, object) - ); - } - - // Handle connect event - if (event === 'connect') { - self.initialConnect = false; - self.ismaster = conn.ismaster; - self.lastIsMasterMS = conn.lastIsMasterMS; - if (conn.agreedCompressor) { - self.s.pool.options.agreedCompressor = conn.agreedCompressor; - } - - if (conn.zlibCompressionLevel) { - self.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; - } - - if (conn.ismaster.$clusterTime) { - const $clusterTime = conn.ismaster.$clusterTime; - self.clusterTime = $clusterTime; - } - - // It's a proxy change the type so - // the wireprotocol will send $readPreference - if (self.ismaster.msg === 'isdbgrid') { - self._type = 'mongos'; - } - - // Have we defined self monitoring - if (self.s.monitoring) { - self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); - } - - // Emit server description changed if something listening - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - }); - - if (!self.s.inTopology) { - // Emit topology description changed if something listening - sdam.emitTopologyDescriptionChanged(self, { - topologyType: 'Single', - servers: [ - { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - } - ] - }); - } - - // Log the ismaster if available - if (self.s.logger.isInfo()) { - self.s.logger.info( - f('server %s connected with ismaster [%s]', self.name, JSON.stringify(self.ismaster)) - ); - } - - // Emit connect - self.emit('connect', self); - } else if ( - event === 'error' || - event === 'parseError' || - event === 'close' || - event === 'timeout' || - event === 'reconnect' || - event === 'attemptReconnect' || - 'reconnectFailed' - ) { - // Remove server instance from accounting - if ( - serverAccounting && - ['close', 'timeout', 'error', 'parseError', 'reconnectFailed'].indexOf(event) !== -1 - ) { - // Emit toplogy opening event if not in topology - if (!self.s.inTopology) { - self.emit('topologyOpening', { topologyId: self.id }); - } - - delete servers[self.id]; - } - - if (event === 'close') { - // Closing emits a server description changed event going to unknown. - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: 'Unknown' - }); - } - - // Reconnect failed return error - if (event === 'reconnectFailed') { - self.emit('reconnectFailed', err); - // Emit error if any listeners - if (self.listeners('error').length > 0) { - self.emit('error', err); - } - // Terminate - return; - } - - // On first connect fail - if ( - ['disconnected', 'connecting'].indexOf(self.s.pool.state) !== -1 && - self.initialConnect && - ['close', 'timeout', 'error', 'parseError'].indexOf(event) !== -1 - ) { - self.initialConnect = false; - return self.emit( - 'error', - new MongoNetworkError( - f('failed to connect to server [%s] on first connect [%s]', self.name, err) - ) - ); - } - - // Reconnect event, emit the server - if (event === 'reconnect') { - // Reconnecting emits a server description changed event going from unknown to the - // current server type. - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - }); - return self.emit(event, self); - } - - // Emit the event - self.emit(event, err); - } - }; -}; - -/** - * Initiate server connect - */ -Server.prototype.connect = function(options) { - var self = this; - options = options || {}; - - // Set the connections - if (serverAccounting) servers[this.id] = this; - - // Do not allow connect to be called on anything that's not disconnected - if (self.s.pool && !self.s.pool.isDisconnected() && !self.s.pool.isDestroyed()) { - throw new MongoError(f('server instance in invalid state %s', self.s.pool.state)); - } - - // Create a pool - self.s.pool = new Pool(this, Object.assign(self.s.options, options, { bson: this.s.bson })); - - // Set up listeners - self.s.pool.on('close', eventHandler(self, 'close')); - self.s.pool.on('error', eventHandler(self, 'error')); - self.s.pool.on('timeout', eventHandler(self, 'timeout')); - self.s.pool.on('parseError', eventHandler(self, 'parseError')); - self.s.pool.on('connect', eventHandler(self, 'connect')); - self.s.pool.on('reconnect', eventHandler(self, 'reconnect')); - self.s.pool.on('reconnectFailed', eventHandler(self, 'reconnectFailed')); - - // Set up listeners for command monitoring - relayEvents(self.s.pool, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Emit toplogy opening event if not in topology - if (!self.s.inTopology) { - this.emit('topologyOpening', { topologyId: topologyId(self) }); - } - - // Emit opening server event - self.emit('serverOpening', { topologyId: topologyId(self), address: self.name }); - - self.s.pool.connect(); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -Server.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Get the server description - * @method - * @return {object} - */ -Server.prototype.getDescription = function() { - var ismaster = this.ismaster || {}; - var description = { - type: sdam.getTopologyType(this), - address: this.name - }; - - // Add fields if available - if (ismaster.hosts) description.hosts = ismaster.hosts; - if (ismaster.arbiters) description.arbiters = ismaster.arbiters; - if (ismaster.passives) description.passives = ismaster.passives; - if (ismaster.setName) description.setName = ismaster.setName; - return description; -}; - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -Server.prototype.lastIsMaster = function() { - return this.ismaster; -}; - -/** - * Unref all connections belong to this server - * @method - */ -Server.prototype.unref = function() { - this.s.pool.unref(); -}; - -/** - * Figure out if the server is connected - * @method - * @return {boolean} - */ -Server.prototype.isConnected = function() { - if (!this.s.pool) return false; - return this.s.pool.isConnected(); -}; - -/** - * Figure out if the server instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -Server.prototype.isDestroyed = function() { - if (!this.s.pool) return false; - return this.s.pool.isDestroyed(); -}; - -function basicWriteValidations(self) { - if (!self.s.pool) return new MongoError('server instance is not connected'); - if (self.s.pool.isDestroyed()) return new MongoError('server instance pool was destroyed'); -} - -function basicReadValidations(self, options) { - basicWriteValidations(self, options); - - if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { - throw new Error('readPreference must be an instance of ReadPreference'); - } -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.command = function(ns, cmd, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicReadValidations(self, options); - if (result) return callback(result); - - // Clone the options - options = Object.assign({}, options, { wireProtocolCommand: false }); - - // Debug log - if (self.s.logger.isDebug()) - self.s.logger.debug( - f( - 'executing command [%s] against %s', - JSON.stringify({ - ns: ns, - cmd: cmd, - options: debugOptions(debugFields, options) - }), - self.name - ) - ); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'command', ns, cmd, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, cmd)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - wireProtocol.command(self, ns, cmd, options, callback); -}; - -/** - * Execute a query against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command document for the query - * @param {object} options Optional settings - * @param {function} callback - */ -Server.prototype.query = function(ns, cmd, cursorState, options, callback) { - wireProtocol.query(this, ns, cmd, cursorState, options, callback); -}; - -/** - * Execute a `getMore` against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cursorState State data associated with the cursor calling this method - * @param {object} options Optional settings - * @param {function} callback - */ -Server.prototype.getMore = function(ns, cursorState, batchSize, options, callback) { - wireProtocol.getMore(this, ns, cursorState, batchSize, options, callback); -}; - -/** - * Execute a `killCursors` command against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cursorState State data associated with the cursor calling this method - * @param {function} callback - */ -Server.prototype.killCursors = function(ns, cursorState, callback) { - wireProtocol.killCursors(this, ns, cursorState, callback); -}; - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.insert = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'insert', ns, ops, options, callback)) return; - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - - // Execute write - return wireProtocol.insert(self, ns, ops, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.update = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'update', ns, ops, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, options)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - // Execute write - return wireProtocol.update(self, ns, ops, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.remove = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'remove', ns, ops, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, options)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - // Execute write - return wireProtocol.remove(self, ns, ops, options, callback); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -Server.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * Compare two server instances - * @method - * @param {Server} server Server to compare equality against - * @return {boolean} - */ -Server.prototype.equals = function(server) { - if (typeof server === 'string') return this.name.toLowerCase() === server.toLowerCase(); - if (server.name) return this.name.toLowerCase() === server.name.toLowerCase(); - return false; -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -Server.prototype.connections = function() { - return this.s.pool.allConnections(); -}; - -/** - * Selects a server - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Unused - * @param {ClientSession} [options.session] Unused - * @return {Server} - */ -Server.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') - (callback = options), (options = selector), (selector = undefined); - - callback(null, this); -}; - -var listeners = ['close', 'error', 'timeout', 'parseError', 'connect']; - -/** - * Destroy the server connection - * @method - * @param {boolean} [options.emitClose=false] Emit close event on destroy - * @param {boolean} [options.emitDestroy=false] Emit destroy event on destroy - * @param {boolean} [options.force=false] Force destroy the pool - */ -Server.prototype.destroy = function(options, callback) { - if (this._destroyed) { - if (typeof callback === 'function') callback(null, null); - return; - } - - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - var self = this; - - // Set the connections - if (serverAccounting) delete servers[this.id]; - - // Destroy the monitoring process if any - if (this.monitoringProcessId) { - clearTimeout(this.monitoringProcessId); - } - - // No pool, return - if (!self.s.pool) { - this._destroyed = true; - if (typeof callback === 'function') callback(null, null); - return; - } - - // Emit close event - if (options.emitClose) { - self.emit('close', self); - } - - // Emit destroy event - if (options.emitDestroy) { - self.emit('destroy', self); - } - - // Remove all listeners - listeners.forEach(function(event) { - self.s.pool.removeAllListeners(event); - }); - - // Emit opening server event - if (self.listeners('serverClosed').length > 0) - self.emit('serverClosed', { topologyId: topologyId(self), address: self.name }); - - // Emit toplogy opening event if not in topology - if (self.listeners('topologyClosed').length > 0 && !self.s.inTopology) { - self.emit('topologyClosed', { topologyId: topologyId(self) }); - } - - if (self.s.logger.isDebug()) { - self.s.logger.debug(f('destroy called on server %s', self.name)); - } - - // Destroy the pool - this.s.pool.destroy(options.force, callback); - this._destroyed = true; -}; - -/** - * A server connect event, used to verify that the connection is up and running - * - * @event Server#connect - * @type {Server} - */ - -/** - * A server reconnect event, used to verify that the server topology has reconnected - * - * @event Server#reconnect - * @type {Server} - */ - -/** - * A server opening SDAM monitoring event - * - * @event Server#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event Server#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event Server#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event Server#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event Server#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event Server#topologyDescriptionChanged - * @type {object} - */ - -/** - * Server reconnect failed - * - * @event Server#reconnectFailed - * @type {Error} - */ - -/** - * Server connection pool closed - * - * @event Server#close - * @type {object} - */ - -/** - * Server connection pool caused an error - * - * @event Server#error - * @type {Error} - */ - -/** - * Server destroyed was called - * - * @event Server#destroy - * @type {Server} - */ - -module.exports = Server; diff --git a/lib/core/utils.js b/lib/core/utils.js deleted file mode 100644 index ab778bf8db2..00000000000 --- a/lib/core/utils.js +++ /dev/null @@ -1,277 +0,0 @@ -'use strict'; -const os = require('os'); -const crypto = require('crypto'); -const requireOptional = require('require_optional'); - -/** - * Generate a UUIDv4 - */ -const uuidV4 = () => { - const result = crypto.randomBytes(16); - result[6] = (result[6] & 0x0f) | 0x40; - result[8] = (result[8] & 0x3f) | 0x80; - return result; -}; - -/** - * Returns the duration calculated from two high resolution timers in milliseconds - * - * @param {Object} started A high resolution timestamp created from `process.hrtime()` - * @returns {Number} The duration in milliseconds - */ -const calculateDurationInMs = started => { - const hrtime = process.hrtime(started); - return (hrtime[0] * 1e9 + hrtime[1]) / 1e6; -}; - -/** - * Relays events for a given listener and emitter - * - * @param {EventEmitter} listener the EventEmitter to listen to the events from - * @param {EventEmitter} emitter the EventEmitter to relay the events to - */ -function relayEvents(listener, emitter, events) { - events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event))); -} - -function retrieveKerberos() { - let kerberos; - - try { - kerberos = requireOptional('kerberos'); - } catch (err) { - if (err.code === 'MODULE_NOT_FOUND') { - throw new Error('The `kerberos` module was not found. Please install it and try again.'); - } - - throw err; - } - - return kerberos; -} - -// Throw an error if an attempt to use EJSON is made when it is not installed -const noEJSONError = function() { - throw new Error('The `mongodb-extjson` module was not found. Please install it and try again.'); -}; - -// Facilitate loading EJSON optionally -function retrieveEJSON() { - let EJSON = null; - try { - EJSON = requireOptional('mongodb-extjson'); - } catch (error) {} // eslint-disable-line - if (!EJSON) { - EJSON = { - parse: noEJSONError, - deserialize: noEJSONError, - serialize: noEJSONError, - stringify: noEJSONError, - setBSONModule: noEJSONError, - BSON: noEJSONError - }; - } - - return EJSON; -} - -/** - * A helper function for determining `maxWireVersion` between legacy and new topology - * instances - * - * @private - * @param {(Topology|Server)} topologyOrServer - */ -function maxWireVersion(topologyOrServer) { - if (topologyOrServer.ismaster) { - return topologyOrServer.ismaster.maxWireVersion; - } - - if (typeof topologyOrServer.lastIsMaster === 'function') { - const lastIsMaster = topologyOrServer.lastIsMaster(); - if (lastIsMaster) { - return lastIsMaster.maxWireVersion; - } - } - - if (topologyOrServer.description) { - return topologyOrServer.description.maxWireVersion; - } - - return null; -} - -/* - * Checks that collation is supported by server. - * - * @param {Server} [server] to check against - * @param {object} [cmd] object where collation may be specified - * @param {function} [callback] callback function - * @return true if server does not support collation - */ -function collationNotSupported(server, cmd) { - return cmd && cmd.collation && maxWireVersion(server) < 5; -} - -/** - * Checks if a given value is a Promise - * - * @param {*} maybePromise - * @return true if the provided value is a Promise - */ -function isPromiseLike(maybePromise) { - return maybePromise && typeof maybePromise.then === 'function'; -} - -/** - * Applies the function `eachFn` to each item in `arr`, in parallel. - * - * @param {array} arr an array of items to asynchronusly iterate over - * @param {function} eachFn A function to call on each item of the array. The callback signature is `(item, callback)`, where the callback indicates iteration is complete. - * @param {function} callback The callback called after every item has been iterated - */ -function eachAsync(arr, eachFn, callback) { - arr = arr || []; - - let idx = 0; - let awaiting = 0; - for (idx = 0; idx < arr.length; ++idx) { - awaiting++; - eachFn(arr[idx], eachCallback); - } - - if (awaiting === 0) { - callback(); - return; - } - - function eachCallback(err) { - awaiting--; - if (err) { - callback(err); - return; - } - - if (idx === arr.length && awaiting <= 0) { - callback(); - } - } -} - -function isUnifiedTopology(topology) { - return topology.description != null; -} - -function arrayStrictEqual(arr, arr2) { - if (!Array.isArray(arr) || !Array.isArray(arr2)) { - return false; - } - - return arr.length === arr2.length && arr.every((elt, idx) => elt === arr2[idx]); -} - -function tagsStrictEqual(tags, tags2) { - const tagsKeys = Object.keys(tags); - const tags2Keys = Object.keys(tags2); - return tagsKeys.length === tags2Keys.length && tagsKeys.every(key => tags2[key] === tags[key]); -} - -function errorStrictEqual(lhs, rhs) { - if (lhs === rhs) { - return true; - } - - if ((lhs == null && rhs != null) || (lhs != null && rhs == null)) { - return false; - } - - if (lhs.constructor.name !== rhs.constructor.name) { - return false; - } - - if (lhs.message !== rhs.message) { - return false; - } - - return true; -} - -function makeStateMachine(stateTable) { - return function stateTransition(target, newState) { - const legalStates = stateTable[target.s.state]; - if (legalStates && legalStates.indexOf(newState) < 0) { - throw new TypeError( - `illegal state transition from [${target.s.state}] => [${newState}], allowed: [${legalStates}]` - ); - } - - target.emit('stateChanged', target.s.state, newState); - target.s.state = newState; - }; -} - -function makeClientMetadata(options) { - options = options || {}; - - const metadata = { - driver: { - name: 'nodejs', - version: require('../../package.json').version - }, - os: { - type: os.type(), - name: process.platform, - architecture: process.arch, - version: os.release() - }, - platform: `'Node.js ${process.version}, ${os.endianness} (${ - options.useUnifiedTopology ? 'unified' : 'legacy' - })` - }; - - // support optionally provided wrapping driver info - if (options.driverInfo) { - if (options.driverInfo.name) { - metadata.driver.name = `${metadata.driver.name}|${options.driverInfo.name}`; - } - - if (options.driverInfo.version) { - metadata.version = `${metadata.driver.version}|${options.driverInfo.version}`; - } - - if (options.driverInfo.platform) { - metadata.platform = `${metadata.platform}|${options.driverInfo.platform}`; - } - } - - if (options.appname) { - // MongoDB requires the appname not exceed a byte length of 128 - const buffer = Buffer.from(options.appname); - metadata.application = { - name: buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname - }; - } - - return metadata; -} - -const noop = () => {}; - -module.exports = { - uuidV4, - calculateDurationInMs, - relayEvents, - collationNotSupported, - retrieveEJSON, - retrieveKerberos, - maxWireVersion, - isPromiseLike, - eachAsync, - isUnifiedTopology, - arrayStrictEqual, - tagsStrictEqual, - errorStrictEqual, - makeStateMachine, - makeClientMetadata, - noop -}; diff --git a/lib/core/wireprotocol/constants.js b/lib/core/wireprotocol/constants.js deleted file mode 100644 index df2293b515f..00000000000 --- a/lib/core/wireprotocol/constants.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict'; - -const MIN_SUPPORTED_SERVER_VERSION = '2.6'; -const MAX_SUPPORTED_SERVER_VERSION = '4.2'; -const MIN_SUPPORTED_WIRE_VERSION = 2; -const MAX_SUPPORTED_WIRE_VERSION = 8; - -module.exports = { - MIN_SUPPORTED_SERVER_VERSION, - MAX_SUPPORTED_SERVER_VERSION, - MIN_SUPPORTED_WIRE_VERSION, - MAX_SUPPORTED_WIRE_VERSION -}; diff --git a/lib/aggregation_cursor.js b/lib/cursor/aggregation_cursor.js similarity index 98% rename from lib/aggregation_cursor.js rename to lib/cursor/aggregation_cursor.js index b0977c69866..bb464e73146 100644 --- a/lib/aggregation_cursor.js +++ b/lib/cursor/aggregation_cursor.js @@ -1,9 +1,9 @@ 'use strict'; -const MongoError = require('./core').MongoError; +const { MongoError } = require('../error'); const Cursor = require('./cursor'); -const CursorState = require('./core/cursor').CursorState; -const deprecate = require('util').deprecate; +const { CursorState } = require('./core_cursor'); +const { deprecate } = require('util'); /** * @fileOverview The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB diff --git a/lib/command_cursor.js b/lib/cursor/command_cursor.js similarity index 97% rename from lib/command_cursor.js rename to lib/cursor/command_cursor.js index cd0f9d7a852..aea189f23c6 100644 --- a/lib/command_cursor.js +++ b/lib/cursor/command_cursor.js @@ -1,9 +1,9 @@ 'use strict'; -const ReadPreference = require('./core').ReadPreference; -const MongoError = require('./core').MongoError; +const ReadPreference = require('../read_preference'); +const { MongoError } = require('../error'); const Cursor = require('./cursor'); -const CursorState = require('./core/cursor').CursorState; +const { CursorState } = require('./core_cursor'); /** * @fileOverview The **CommandCursor** class is an internal class that embodies a diff --git a/lib/core/cursor.js b/lib/cursor/core_cursor.js similarity index 96% rename from lib/core/cursor.js rename to lib/cursor/core_cursor.js index 8b6aa3dfc71..903b57535e3 100644 --- a/lib/core/cursor.js +++ b/lib/cursor/core_cursor.js @@ -1,18 +1,12 @@ 'use strict'; -const Logger = require('./connection/logger'); -const retrieveBSON = require('./connection/utils').retrieveBSON; -const MongoError = require('./error').MongoError; -const MongoNetworkError = require('./error').MongoNetworkError; -const mongoErrorContextSymbol = require('./error').mongoErrorContextSymbol; -const collationNotSupported = require('./utils').collationNotSupported; -const ReadPreference = require('./topologies/read_preference'); -const isUnifiedTopology = require('./utils').isUnifiedTopology; +const Logger = require('../logger'); +const ReadPreference = require('../read_preference'); +const { retrieveBSON, collationNotSupported, SUPPORTS, MongoDBNamespace } = require('../utils'); const executeOperation = require('../operations/execute_operation'); -const Readable = require('stream').Readable; -const SUPPORTS = require('../utils').SUPPORTS; -const MongoDBNamespace = require('../utils').MongoDBNamespace; -const OperationBase = require('../operations/operation').OperationBase; +const { Readable } = require('stream'); +const { OperationBase } = require('../operations/operation'); +const { MongoError, MongoNetworkError, mongoErrorContextSymbol } = require('../error'); const BSON = retrieveBSON(); const Long = BSON.Long; @@ -428,7 +422,7 @@ class CoreCursor extends Readable { const cursor = this; // NOTE: this goes away once cursors use `executeOperation` - if (isUnifiedTopology(cursor.topology) && cursor.topology.shouldCheckForSessionSupport()) { + if (cursor.topology.shouldCheckForSessionSupport()) { cursor.topology.selectServer(ReadPreference.primaryPreferred, err => { if (err) { callback(err); diff --git a/lib/cursor.js b/lib/cursor/cursor.js similarity index 97% rename from lib/cursor.js rename to lib/cursor/cursor.js index 5115a15fc0f..0e6b19bcd65 100644 --- a/lib/cursor.js +++ b/lib/cursor/cursor.js @@ -1,20 +1,15 @@ 'use strict'; -const Transform = require('stream').Transform; -const PassThrough = require('stream').PassThrough; -const deprecate = require('util').deprecate; -const handleCallback = require('./utils').handleCallback; -const ReadPreference = require('./core').ReadPreference; -const MongoError = require('./core').MongoError; -const CoreCursor = require('./core/cursor').CoreCursor; -const CursorState = require('./core/cursor').CursorState; -const Map = require('./core').BSON.Map; -const maybePromise = require('./utils').maybePromise; -const executeOperation = require('./operations/execute_operation'); -const formattedOrderClause = require('./utils').formattedOrderClause; - -const each = require('./operations/cursor_ops').each; -const CountOperation = require('./operations/count'); +const ReadPreference = require('../read_preference'); +const { Transform, PassThrough } = require('stream'); +const { deprecate } = require('util'); +const { MongoError } = require('../error'); +const { CoreCursor, CursorState } = require('./core_cursor'); +const { Map } = require('../utils').retrieveBSON(); +const { handleCallback, maybePromise, formattedOrderClause } = require('../utils'); +const executeOperation = require('../operations/execute_operation'); +const { each } = require('../operations/cursor_ops'); +const CountOperation = require('../operations/count'); /** * @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB @@ -81,7 +76,6 @@ const fields = ['numberOfRetries', 'tailableRetryInterval']; * collection.find({}).filter({a:1}) // Set query on the cursor * collection.find({}).comment('add a comment') // Add a comment to the query, allowing to correlate queries * collection.find({}).addCursorFlag('tailable', true) // Set cursor as tailable - * collection.find({}).addCursorFlag('oplogReplay', true) // Set cursor as oplogReplay * collection.find({}).addCursorFlag('noCursorTimeout', true) // Set cursor as noCursorTimeout * collection.find({}).addCursorFlag('awaitData', true) // Set cursor as awaitData * collection.find({}).addCursorFlag('partial', true) // Set cursor as partial diff --git a/lib/cursor/index.js b/lib/cursor/index.js new file mode 100644 index 00000000000..8ca1c52aa82 --- /dev/null +++ b/lib/cursor/index.js @@ -0,0 +1,13 @@ +'use strict'; + +const { CoreCursor, CursorState } = require('./core_cursor'); + +module.exports = { + Cursor: require('./cursor'), + CommandCursor: require('./command_cursor'), + AggregationCursor: require('./aggregation_cursor'), + + // internal + CoreCursor, + CursorState +}; diff --git a/lib/db.js b/lib/db.js index e28d25a90f8..e638db335f5 100644 --- a/lib/db.js +++ b/lib/db.js @@ -1,35 +1,37 @@ 'use strict'; -const EventEmitter = require('events').EventEmitter; -const inherits = require('util').inherits; -const getSingleProperty = require('./utils').getSingleProperty; -const CommandCursor = require('./command_cursor'); -const handleCallback = require('./utils').handleCallback; -const filterOptions = require('./utils').filterOptions; -const toError = require('./utils').toError; -const ReadPreference = require('./core').ReadPreference; -const MongoError = require('./core').MongoError; -const ObjectID = require('./core').ObjectID; -const Logger = require('./core').Logger; +const EventEmitter = require('events'); +const { inherits, deprecate } = require('util'); +const { AggregationCursor, CommandCursor } = require('./cursor'); +const { ObjectID } = require('./utils').retrieveBSON(); +const ReadPreference = require('./read_preference'); +const { MongoError } = require('./error'); const Collection = require('./collection'); -const mergeOptionsAndWriteConcern = require('./utils').mergeOptionsAndWriteConcern; -const executeLegacyOperation = require('./utils').executeLegacyOperation; -const resolveReadPreference = require('./utils').resolveReadPreference; const ChangeStream = require('./change_stream'); -const deprecate = require('util').deprecate; -const deprecateOptions = require('./utils').deprecateOptions; -const MongoDBNamespace = require('./utils').MongoDBNamespace; const CONSTANTS = require('./constants'); const WriteConcern = require('./write_concern'); const ReadConcern = require('./read_concern'); -const AggregationCursor = require('./aggregation_cursor'); +const Logger = require('./logger'); +const { + getSingleProperty, + handleCallback, + filterOptions, + toError, + mergeOptionsAndWriteConcern, + executeLegacyOperation, + resolveReadPreference, + deprecateOptions, + MongoDBNamespace +} = require('./utils'); // Operations -const createListener = require('./operations/db_ops').createListener; -const ensureIndex = require('./operations/db_ops').ensureIndex; -const evaluate = require('./operations/db_ops').evaluate; -const profilingInfo = require('./operations/db_ops').profilingInfo; -const validateDatabaseName = require('./operations/db_ops').validateDatabaseName; +const { + createListener, + ensureIndex, + evaluate, + profilingInfo, + validateDatabaseName +} = require('./operations/db_ops'); const AggregateOperation = require('./operations/aggregate'); const AddUserOperation = require('./operations/add_user'); @@ -37,8 +39,7 @@ const CollectionsOperation = require('./operations/collections'); const CommandOperation = require('./operations/command'); const CreateCollectionOperation = require('./operations/create_collection'); const CreateIndexOperation = require('./operations/create_index'); -const DropCollectionOperation = require('./operations/drop').DropCollectionOperation; -const DropDatabaseOperation = require('./operations/drop').DropDatabaseOperation; +const { DropCollectionOperation, DropDatabaseOperation } = require('./operations/drop'); const ExecuteDbAdminCommandOperation = require('./operations/execute_db_admin_command'); const IndexInformationOperation = require('./operations/index_information'); const ListCollectionsOperation = require('./operations/list_collections'); diff --git a/lib/error.js b/lib/error.js index 4d104e9be8e..8c6ab42dbcf 100644 --- a/lib/error.js +++ b/lib/error.js @@ -1,14 +1,317 @@ 'use strict'; -const MongoNetworkError = require('./core').MongoNetworkError; -const mongoErrorContextSymbol = require('./core').mongoErrorContextSymbol; - +const mongoErrorContextSymbol = Symbol('mongoErrorContextSymbol'); +const kErrorLabels = Symbol('errorLabels'); const GET_MORE_NON_RESUMABLE_CODES = new Set([ 136, // CappedPositionLost 237, // CursorKilled 11601 // Interrupted ]); +/** + * Creates a new MongoError + * + * @augments Error + * @param {Error|string|object} message The error message + * @property {string} message The error message + * @property {string} stack The error call stack + */ +class MongoError extends Error { + constructor(message) { + if (message instanceof Error) { + super(message.message); + this.stack = message.stack; + } else { + if (typeof message === 'string') { + super(message); + } else { + super(message.message || message.errmsg || message.$err || 'n/a'); + for (let name in message) { + if (name === 'errorLabels') { + this[kErrorLabels] = new Set(message[name]); + continue; + } + + this[name] = message[name]; + } + } + + Error.captureStackTrace(this, this.constructor); + } + + this.name = 'MongoError'; + this[mongoErrorContextSymbol] = this[mongoErrorContextSymbol] || {}; + } + + /** + * Creates a new MongoError object + * + * @param {Error|string|object} options The options used to create the error. + * @return {MongoError} A MongoError instance + * @deprecated Use `new MongoError()` instead. + */ + static create(options) { + return new MongoError(options); + } + + /** + * Checks the error to see if it has an error label + * @param {string} label The error label to check for + * @returns {boolean} returns true if the error has the provided error label + */ + hasErrorLabel(label) { + if (this[kErrorLabels] == null) { + return false; + } + + return this[kErrorLabels].has(label); + } + + addErrorLabel(label) { + if (this[kErrorLabels] == null) { + this[kErrorLabels] = new Set(); + } + + this[kErrorLabels].add(label); + } + + get errorLabels() { + return this[kErrorLabels] ? Array.from(this[kErrorLabels]) : []; + } +} + +/** + * An error indicating an issue with the network, including TCP + * errors and timeouts. + * + * @param {Error|string|object} message The error message + * @property {string} message The error message + * @property {string} stack The error call stack + * @extends MongoError + */ +class MongoNetworkError extends MongoError { + constructor(message) { + super(message); + this.name = 'MongoNetworkError'; + } +} + +/** + * An error used when attempting to parse a value (like a connection string) + * + * @param {Error|string|object} message The error message + * @property {string} message The error message + * @extends MongoError + */ +class MongoParseError extends MongoError { + constructor(message) { + super(message); + this.name = 'MongoParseError'; + } +} + +/** + * An error signifying a client-side timeout event + * + * @param {Error|string|object} message The error message + * @param {string|object} [reason] The reason the timeout occured + * @property {string} message The error message + * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers + * @extends MongoError + */ +class MongoTimeoutError extends MongoError { + constructor(message, reason) { + if (reason && reason.error) { + super(reason.error.message || reason.error); + } else { + super(message); + } + + this.name = 'MongoTimeoutError'; + if (reason) { + this.reason = reason; + } + } +} + +/** + * An error signifying a client-side server selection error + * + * @param {Error|string|object} message The error message + * @param {string|object} [reason] The reason the timeout occured + * @property {string} message The error message + * @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers + * @extends MongoError + */ +class MongoServerSelectionError extends MongoTimeoutError { + constructor(message, reason) { + super(message, reason); + this.name = 'MongoServerSelectionError'; + } +} + +function makeWriteConcernResultObject(input) { + const output = Object.assign({}, input); + + if (output.ok === 0) { + output.ok = 1; + delete output.errmsg; + delete output.code; + delete output.codeName; + } + + return output; +} + +/** + * An error thrown when the server reports a writeConcernError + * + * @param {Error|string|object} message The error message + * @param {object} result The result document (provided if ok: 1) + * @property {string} message The error message + * @property {object} [result] The result document (provided if ok: 1) + * @extends MongoError + */ +class MongoWriteConcernError extends MongoError { + constructor(message, result) { + super(message); + this.name = 'MongoWriteConcernError'; + + if (result && Array.isArray(result.errorLabels)) { + this[kErrorLabels] = new Set(result.errorLabels); + } + + if (result != null) { + this.result = makeWriteConcernResultObject(result); + } + } +} + +// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms +const RETRYABLE_ERROR_CODES = new Set([ + 6, // HostUnreachable + 7, // HostNotFound + 89, // NetworkTimeout + 91, // ShutdownInProgress + 189, // PrimarySteppedDown + 9001, // SocketException + 10107, // NotMaster + 11600, // InterruptedAtShutdown + 11602, // InterruptedDueToReplStateChange + 13435, // NotMasterNoSlaveOk + 13436 // NotMasterOrSecondary +]); + +const RETRYABLE_WRITE_ERROR_CODES = new Set([ + 11600, // InterruptedAtShutdown + 11602, // InterruptedDueToReplStateChange + 10107, // NotMaster + 13435, // NotMasterNoSlaveOk + 13436, // NotMasterOrSecondary + 189, // PrimarySteppedDown + 91, // ShutdownInProgress + 7, // HostNotFound + 6, // HostUnreachable + 89, // NetworkTimeout + 9001, // SocketException + 262 // ExceededTimeLimit +]); + +function isRetryableWriteError(error) { + if (error instanceof MongoWriteConcernError) { + return ( + RETRYABLE_WRITE_ERROR_CODES.has(error.code) || + RETRYABLE_WRITE_ERROR_CODES.has(error.result.code) + ); + } + + return RETRYABLE_WRITE_ERROR_CODES.has(error.code); +} + +/** + * Determines whether an error is something the driver should attempt to retry + * + * @ignore + * @param {MongoError|Error} error + */ +function isRetryableError(error) { + return ( + RETRYABLE_ERROR_CODES.has(error.code) || + error instanceof MongoNetworkError || + error.message.match(/not master/) || + error.message.match(/node is recovering/) + ); +} + +const SDAM_RECOVERING_CODES = new Set([ + 91, // ShutdownInProgress + 189, // PrimarySteppedDown + 11600, // InterruptedAtShutdown + 11602, // InterruptedDueToReplStateChange + 13436 // NotMasterOrSecondary +]); + +const SDAM_NOTMASTER_CODES = new Set([ + 10107, // NotMaster + 13435 // NotMasterNoSlaveOk +]); + +const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([ + 11600, // InterruptedAtShutdown + 91 // ShutdownInProgress +]); + +function isRecoveringError(err) { + if (err.code && SDAM_RECOVERING_CODES.has(err.code)) { + return true; + } + + return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/); +} + +function isNotMasterError(err) { + if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) { + return true; + } + + if (isRecoveringError(err)) { + return false; + } + + return err.message.match(/not master/); +} + +function isNodeShuttingDownError(err) { + return err.code && SDAM_NODE_SHUTTING_DOWN_ERROR_CODES.has(err.code); +} + +/** + * Determines whether SDAM can recover from a given error. If it cannot + * then the pool will be cleared, and server state will completely reset + * locally. + * + * @ignore + * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering + * @param {MongoError|Error} error + */ +function isSDAMUnrecoverableError(error) { + // NOTE: null check is here for a strictly pre-CMAP world, a timeout or + // close event are considered unrecoverable + if (error instanceof MongoParseError || error == null) { + return true; + } + + if (isRecoveringError(error) || isNotMasterError(error)) { + return true; + } + + return false; +} + +function isNetworkTimeoutError(err) { + return err instanceof MongoNetworkError && err.message.match(/timed out/); +} + // From spec@https://github.com/mongodb/specifications/blob/7a2e93d85935ee4b1046a8d2ad3514c657dc74fa/source/change-streams/change-streams.rst#resumable-error: // // An error is considered resumable if it meets any of the following criteria: @@ -42,4 +345,19 @@ function isResumableError(error) { ); } -module.exports = { GET_MORE_NON_RESUMABLE_CODES, isResumableError }; +module.exports = { + GET_MORE_NON_RESUMABLE_CODES, + MongoError, + MongoNetworkError, + MongoParseError, + MongoTimeoutError, + MongoServerSelectionError, + MongoWriteConcernError, + mongoErrorContextSymbol, + isRetryableError, + isSDAMUnrecoverableError, + isNodeShuttingDownError, + isNetworkTimeoutError, + isRetryableWriteError, + isResumableError +}; diff --git a/lib/gridfs-stream/index.js b/lib/gridfs-stream/index.js index 65098395187..b44f72a6505 100644 --- a/lib/gridfs-stream/index.js +++ b/lib/gridfs-stream/index.js @@ -3,7 +3,6 @@ var Emitter = require('events').EventEmitter; var GridFSBucketReadStream = require('./download'); var GridFSBucketWriteStream = require('./upload'); -var shallowClone = require('../utils').shallowClone; var toError = require('../utils').toError; var util = require('util'); var executeLegacyOperation = require('../utils').executeLegacyOperation; @@ -33,7 +32,7 @@ function GridFSBucket(db, options) { this.setMaxListeners(0); if (options && typeof options === 'object') { - options = shallowClone(options); + options = Object.assign({}, options); var keys = Object.keys(DEFAULT_GRIDFS_BUCKET_OPTIONS); for (var i = 0; i < keys.length; ++i) { if (!options[keys[i]]) { @@ -85,7 +84,7 @@ util.inherits(GridFSBucket, Emitter); GridFSBucket.prototype.openUploadStream = function(filename, options) { if (options) { - options = shallowClone(options); + options = Object.assign({}, options); } else { options = {}; } @@ -113,7 +112,7 @@ GridFSBucket.prototype.openUploadStream = function(filename, options) { GridFSBucket.prototype.openUploadStreamWithId = function(id, filename, options) { if (options) { - options = shallowClone(options); + options = Object.assign({}, options); } else { options = {}; } diff --git a/lib/gridfs-stream/upload.js b/lib/gridfs-stream/upload.js index 578949a53d7..e39de81fe20 100644 --- a/lib/gridfs-stream/upload.js +++ b/lib/gridfs-stream/upload.js @@ -1,10 +1,10 @@ 'use strict'; -var core = require('../core'); var crypto = require('crypto'); var stream = require('stream'); var util = require('util'); var Buffer = require('safe-buffer').Buffer; +const { ObjectId } = require('../utils').retrieveBSON(); var ERROR_NAMESPACE_NOT_FOUND = 26; @@ -40,7 +40,7 @@ function GridFSBucketWriteStream(bucket, filename, options) { // Signals the write is all done this.done = false; - this.id = options.id ? options.id : core.BSON.ObjectId(); + this.id = options.id ? options.id : ObjectId(); this.chunkSizeBytes = this.options.chunkSizeBytes; this.bufToStore = Buffer.alloc(this.chunkSizeBytes); this.length = 0; @@ -193,7 +193,7 @@ function __handleError(_this, error, callback) { function createChunkDoc(filesId, n, data) { return { - _id: core.BSON.ObjectId(), + _id: ObjectId(), files_id: filesId, n: n, data: data diff --git a/lib/gridfs/chunk.js b/lib/gridfs/chunk.js index d276d720476..ae95f2d653c 100644 --- a/lib/gridfs/chunk.js +++ b/lib/gridfs/chunk.js @@ -1,9 +1,7 @@ 'use strict'; -var Binary = require('../core').BSON.Binary, - ObjectID = require('../core').BSON.ObjectID; - -var Buffer = require('safe-buffer').Buffer; +const { Binary, ObjectID } = require('../utils').retrieveBSON(); +const { Buffer } = require('safe-buffer'); /** * Class for representing a single chunk in GridFS. diff --git a/lib/gridfs/grid_store.js b/lib/gridfs/grid_store.js index 9d9ff25f3af..8880fb550c9 100644 --- a/lib/gridfs/grid_store.js +++ b/lib/gridfs/grid_store.js @@ -36,18 +36,14 @@ * }); */ const Chunk = require('./chunk'); -const ObjectID = require('../core').BSON.ObjectID; -const ReadPreference = require('../core').ReadPreference; -const Buffer = require('safe-buffer').Buffer; +const ReadPreference = require('../read_preference'); +const { ObjectID } = require('../utils').retrieveBSON(); +const { Buffer } = require('safe-buffer'); const fs = require('fs'); -const f = require('util').format; -const util = require('util'); -const MongoError = require('../core').MongoError; -const inherits = util.inherits; -const Duplex = require('stream').Duplex; -const shallowClone = require('../utils').shallowClone; -const executeLegacyOperation = require('../utils').executeLegacyOperation; -const deprecate = require('util').deprecate; +const { format: f, inherits, deprecate } = require('util'); +const { MongoError } = require('../error'); +const { Duplex } = require('stream'); +const { executeLegacyOperation } = require('../utils'); var REFERENCE_BY_FILENAME = 0, REFERENCE_BY_ID = 1; @@ -221,7 +217,7 @@ var open = function(self, options, callback) { // Get chunk collection var chunkCollection = self.chunkCollection(); // Make an unique index for compatibility with mongo-cxx-driver:legacy - var chunkIndexOptions = shallowClone(writeConcern); + var chunkIndexOptions = Object.assign({}, writeConcern); chunkIndexOptions.unique = true; // Ensure index on chunk collection chunkCollection.ensureIndex( diff --git a/lib/core/connection/logger.js b/lib/logger.js similarity index 98% rename from lib/core/connection/logger.js rename to lib/logger.js index 3b0be90d6fd..d5e34f134fb 100644 --- a/lib/core/connection/logger.js +++ b/lib/logger.js @@ -1,7 +1,7 @@ 'use strict'; -var f = require('util').format, - MongoError = require('../error').MongoError; +const { format: f } = require('util'); +const { MongoError } = require('./error'); // Filters for classes var classFilters = {}; diff --git a/lib/mongo_client.js b/lib/mongo_client.js index 4a0d88c914d..0369f5256d2 100644 --- a/lib/mongo_client.js +++ b/lib/mongo_client.js @@ -1,18 +1,15 @@ 'use strict'; -const ChangeStream = require('./change_stream'); const Db = require('./db'); -const EventEmitter = require('events').EventEmitter; -const inherits = require('util').inherits; -const MongoError = require('./core').MongoError; -const deprecate = require('util').deprecate; +const EventEmitter = require('events'); +const ChangeStream = require('./change_stream'); +const ReadPreference = require('./read_preference'); +const { MongoError } = require('./error'); const WriteConcern = require('./write_concern'); -const MongoDBNamespace = require('./utils').MongoDBNamespace; -const ReadPreference = require('./core/topologies/read_preference'); -const maybePromise = require('./utils').maybePromise; const NativeTopology = require('./topologies/native_topology'); -const connect = require('./operations/connect').connect; -const validOptions = require('./operations/connect').validOptions; +const { maybePromise, MongoDBNamespace } = require('./utils'); +const { inherits, deprecate } = require('util'); +const { connect, validOptions } = require('./operations/connect'); /** * @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB. @@ -142,7 +139,6 @@ const validOptions = require('./operations/connect').validOptions; * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this client * @param {number} [options.minSize] If present, the connection pool will be initialized with minSize connections, and will never dip below minSize connections * @param {boolean} [options.useNewUrlParser=true] Determines whether or not to use the new url parser. Enables the new, spec-compliant, url parser shipped in the core driver. This url parser fixes a number of problems with the original parser, and aims to outright replace that parser in the near future. Defaults to true, and must be explicitly set to false to use the legacy url parser. - * @param {boolean} [options.useUnifiedTopology] Enables the new unified topology layer * @param {AutoEncrypter~AutoEncryptionOptions} [options.autoEncryption] Optionally enable client side auto encryption * @param {DriverInfoOptions} [options.driverInfo] Allows a wrapping driver to amend the client metadata generated by the driver to include information about the wrapping driver * @param {MongoClient~connectCallback} [callback] The command result callback diff --git a/lib/operations/aggregate.js b/lib/operations/aggregate.js index e0f2da84e10..430cd4a4f03 100644 --- a/lib/operations/aggregate.js +++ b/lib/operations/aggregate.js @@ -1,11 +1,10 @@ 'use strict'; const CommandOperationV2 = require('./command_v2'); -const MongoError = require('../core').MongoError; -const maxWireVersion = require('../core/utils').maxWireVersion; -const ReadPreference = require('../core').ReadPreference; -const Aspect = require('./operation').Aspect; -const defineAspects = require('./operation').defineAspects; +const ReadPreference = require('../read_preference'); +const { MongoError } = require('../error'); +const { maxWireVersion } = require('../utils'); +const { Aspect, defineAspects } = require('./operation'); const DB_AGGREGATE_COLLECTION = 1; const MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT = 8; diff --git a/lib/operations/bulk_write.js b/lib/operations/bulk_write.js index 8f14f0217c4..943ba3ba58a 100644 --- a/lib/operations/bulk_write.js +++ b/lib/operations/bulk_write.js @@ -1,9 +1,8 @@ 'use strict'; -const applyRetryableWrites = require('../utils').applyRetryableWrites; -const applyWriteConcern = require('../utils').applyWriteConcern; -const MongoError = require('../core').MongoError; -const OperationBase = require('./operation').OperationBase; +const { applyRetryableWrites, applyWriteConcern } = require('../utils'); +const { MongoError } = require('../error'); +const { OperationBase } = require('./operation'); class BulkWriteOperation extends OperationBase { constructor(collection, operations, options) { diff --git a/lib/operations/collection_ops.js b/lib/operations/collection_ops.js index df5995d766e..d27ba8deaa4 100644 --- a/lib/operations/collection_ops.js +++ b/lib/operations/collection_ops.js @@ -1,22 +1,24 @@ 'use strict'; -const applyWriteConcern = require('../utils').applyWriteConcern; -const Code = require('../core').BSON.Code; -const createIndexDb = require('./db_ops').createIndex; -const decorateWithCollation = require('../utils').decorateWithCollation; -const decorateWithReadConcern = require('../utils').decorateWithReadConcern; -const ensureIndexDb = require('./db_ops').ensureIndex; -const evaluate = require('./db_ops').evaluate; -const executeCommand = require('./db_ops').executeCommand; -const resolveReadPreference = require('../utils').resolveReadPreference; -const handleCallback = require('../utils').handleCallback; -const indexInformationDb = require('./db_ops').indexInformation; -const Long = require('../core').BSON.Long; -const MongoError = require('../core').MongoError; -const ReadPreference = require('../core').ReadPreference; -const toError = require('../utils').toError; -const insertDocuments = require('./common_functions').insertDocuments; -const updateDocuments = require('./common_functions').updateDocuments; +const ReadPreference = require('../read_preference'); +const { Code, Long } = require('../utils').retrieveBSON(); +const { MongoError } = require('../error'); +const { insertDocuments, updateDocuments } = require('./common_functions'); +const { + applyWriteConcern, + decorateWithCollation, + decorateWithReadConcern, + resolveReadPreference, + handleCallback, + toError +} = require('../utils'); +const { + createIndex: createIndexDb, + ensureIndex: ensureIndexDb, + evaluate, + executeCommand, + indexInformation: indexInformationDb +} = require('./db_ops'); /** * Group function helper diff --git a/lib/operations/command.js b/lib/operations/command.js index 3c795bef79e..8df9572958e 100644 --- a/lib/operations/command.js +++ b/lib/operations/command.js @@ -1,14 +1,15 @@ 'use strict'; -const Aspect = require('./operation').Aspect; -const OperationBase = require('./operation').OperationBase; -const applyWriteConcern = require('../utils').applyWriteConcern; -const debugOptions = require('../utils').debugOptions; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; -const ReadPreference = require('../core').ReadPreference; -const resolveReadPreference = require('../utils').resolveReadPreference; -const MongoDBNamespace = require('../utils').MongoDBNamespace; +const ReadPreference = require('../read_preference'); +const { Aspect, OperationBase } = require('./operation'); +const { MongoError } = require('../error'); +const { + applyWriteConcern, + debugOptions, + handleCallback, + resolveReadPreference, + MongoDBNamespace +} = require('../utils'); const debugFields = [ 'authSource', diff --git a/lib/operations/command_v2.js b/lib/operations/command_v2.js index 8081d90c2b6..96f91dddfb5 100644 --- a/lib/operations/command_v2.js +++ b/lib/operations/command_v2.js @@ -1,13 +1,11 @@ 'use strict'; -const Aspect = require('./operation').Aspect; -const OperationBase = require('./operation').OperationBase; -const resolveReadPreference = require('../utils').resolveReadPreference; +const { Aspect, OperationBase } = require('./operation'); const ReadConcern = require('../read_concern'); const WriteConcern = require('../write_concern'); -const maxWireVersion = require('../core/utils').maxWireVersion; -const commandSupportsReadConcern = require('../core/sessions').commandSupportsReadConcern; -const MongoError = require('../error').MongoError; +const { maxWireVersion, resolveReadPreference } = require('../utils'); +const { commandSupportsReadConcern } = require('../sessions'); +const { MongoError } = require('../error'); const SUPPORTS_WRITE_CONCERN_AND_COLLATION = 5; diff --git a/lib/operations/common_functions.js b/lib/operations/common_functions.js index c027697526b..330a3643e77 100644 --- a/lib/operations/common_functions.js +++ b/lib/operations/common_functions.js @@ -1,16 +1,18 @@ 'use strict'; -const applyRetryableWrites = require('../utils').applyRetryableWrites; -const applyWriteConcern = require('../utils').applyWriteConcern; -const decorateWithCollation = require('../utils').decorateWithCollation; -const decorateWithReadConcern = require('../utils').decorateWithReadConcern; -const executeCommand = require('./db_ops').executeCommand; -const formattedOrderClause = require('../utils').formattedOrderClause; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; -const ReadPreference = require('../core').ReadPreference; -const toError = require('../utils').toError; -const CursorState = require('../core/cursor').CursorState; +const ReadPreference = require('../read_preference'); +const { executeCommand } = require('./db_ops'); +const { MongoError } = require('../error'); +const { CursorState } = require('../cursor'); +const { + applyRetryableWrites, + applyWriteConcern, + decorateWithCollation, + decorateWithReadConcern, + formattedOrderClause, + handleCallback, + toError +} = require('../utils'); /** * Build the count command. diff --git a/lib/operations/connect.js b/lib/operations/connect.js index bcdd3eb5b50..64005b0af1b 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -1,74 +1,34 @@ 'use strict'; -const deprecate = require('util').deprecate; -const Logger = require('../core').Logger; -const MongoCredentials = require('../core').MongoCredentials; -const MongoError = require('../core').MongoError; -const Mongos = require('../topologies/mongos'); +const fs = require('fs'); +const Logger = require('../logger'); +const ReadPreference = require('../read_preference'); +const { MongoError } = require('../error'); const NativeTopology = require('../topologies/native_topology'); -const parse = require('../core').parseConnectionString; +const { parseConnectionString } = require('../connection_string'); const ReadConcern = require('../read_concern'); -const ReadPreference = require('../core').ReadPreference; -const ReplSet = require('../topologies/replset'); -const Server = require('../topologies/server'); -const ServerSessionPool = require('../core').Sessions.ServerSessionPool; -const emitDeprecationWarning = require('../utils').emitDeprecationWarning; -const fs = require('fs'); -const BSON = require('../core/connection/utils').retrieveBSON(); -const CMAP_EVENT_NAMES = require('../cmap/events').CMAP_EVENT_NAMES; - -let client; -function loadClient() { - if (!client) { - client = require('../mongo_client'); - } - return client; -} - -const legacyParse = deprecate( - require('../url_parser'), - 'current URL string parser is deprecated, and will be removed in a future version. ' + - 'To use the new parser, pass option { useNewUrlParser: true } to MongoClient.connect.' -); +const { ServerSessionPool } = require('../sessions'); +const { emitDeprecationWarning } = require('../utils'); +const BSON = require('../utils').retrieveBSON(); +const { CMAP_EVENT_NAMES } = require('../cmap/events'); +const { MongoCredentials } = require('../cmap/auth/mongo_credentials'); const AUTH_MECHANISM_INTERNAL_MAP = { DEFAULT: 'default', - 'MONGODB-CR': 'mongocr', PLAIN: 'plain', + 'MONGODB-CR': 'mongocr', 'MONGODB-X509': 'x509', + 'MONGODB-AWS': 'mongodb-aws', 'SCRAM-SHA-1': 'scram-sha-1', 'SCRAM-SHA-256': 'scram-sha-256' }; -const monitoringEvents = [ - 'timeout', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed', - 'joined', - 'left', - 'ping', - 'ha', - 'all', - 'fullsetup', - 'open' -]; - const VALID_AUTH_MECHANISMS = new Set([ 'DEFAULT', - 'MONGODB-CR', 'PLAIN', + 'MONGODB-CR', 'MONGODB-X509', + 'MONGODB-AWS', 'SCRAM-SHA-1', 'SCRAM-SHA-256', 'GSSAPI' @@ -135,7 +95,6 @@ const validOptionNames = [ 'retryWrites', 'retryReads', 'useNewUrlParser', - 'useUnifiedTopology', 'serverSelectionTimeoutMS', 'useRecoveryToken', 'autoEncryption', @@ -208,31 +167,6 @@ function assignTopology(client, topology) { } } -// Clear out all events -function clearAllEvents(topology) { - monitoringEvents.forEach(event => topology.removeAllListeners(event)); -} - -// Collect all events in order from SDAM -function collectEvents(mongoClient, topology) { - let MongoClient = loadClient(); - const collectedEvents = []; - - if (mongoClient instanceof MongoClient) { - monitoringEvents.forEach(event => { - topology.on(event, (object1, object2) => { - if (event === 'open') { - collectedEvents.push({ event: event, object1: mongoClient }); - } else { - collectedEvents.push({ event: event, object1: object1, object2: object2 }); - } - }); - }); - } - - return collectedEvents; -} - function resolveTLSOptions(options) { if (options.tls == null) { return; @@ -245,9 +179,6 @@ function resolveTLSOptions(options) { }); } -const emitDeprecationForNonUnifiedTopology = deprecate(() => {}, -'current Server Discovery and Monitoring engine is deprecated, and will be removed in a future version. ' + 'To use the new Server Discover and Monitoring engine, pass option { useUnifiedTopology: true } to the MongoClient constructor.'); - function connect(mongoClient, url, options, callback) { options = Object.assign({}, options); @@ -259,22 +190,12 @@ function connect(mongoClient, url, options, callback) { let didRequestAuthentication = false; const logger = Logger('MongoClient', options); - // Did we pass in a Server/ReplSet/Mongos - if (url instanceof Server || url instanceof ReplSet || url instanceof Mongos) { - return connectWithUrl(mongoClient, url, options, connectCallback); - } - - const useNewUrlParser = options.useNewUrlParser !== false; - - const parseFn = useNewUrlParser ? parse : legacyParse; - const transform = useNewUrlParser ? transformUrlOptions : legacyTransformUrlOptions; - - parseFn(url, options, (err, _object) => { + parseConnectionString(url, options, (err, _object) => { // Do not attempt to connect if parsing error if (err) return callback(err); // Flatten - const object = transform(_object); + const object = transformUrlOptions(_object); // Parse the string const _finalOptions = createUnifiedOptions(object, options); @@ -315,20 +236,7 @@ function connect(mongoClient, url, options, callback) { } } - if (_finalOptions.useUnifiedTopology) { - return createTopology(mongoClient, 'unified', _finalOptions, connectCallback); - } - - emitDeprecationForNonUnifiedTopology(); - - // Do we have a replicaset then skip discovery and go straight to connectivity - if (_finalOptions.replicaSet || _finalOptions.rs_name) { - return createTopology(mongoClient, 'replicaset', _finalOptions, connectCallback); - } else if (object.servers.length > 1) { - return createTopology(mongoClient, 'mongos', _finalOptions, connectCallback); - } else { - return createServer(mongoClient, _finalOptions, connectCallback); - } + return createTopology(mongoClient, _finalOptions, connectCallback); }); function connectCallback(err, topology) { @@ -351,42 +259,6 @@ function connect(mongoClient, url, options, callback) { } } -function connectWithUrl(mongoClient, url, options, connectCallback) { - // Set the topology - assignTopology(mongoClient, url); - - // Add listeners - addListeners(mongoClient, url); - - // Propagate the events to the client - relayEvents(mongoClient, url); - - let finalOptions = Object.assign({}, options); - - // If we have a readPreference passed in by the db options, convert it from a string - if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') { - finalOptions.readPreference = new ReadPreference( - options.readPreference || options.read_preference - ); - } - - const isDoingAuth = finalOptions.user || finalOptions.password || finalOptions.authMechanism; - if (isDoingAuth && !finalOptions.credentials) { - try { - finalOptions.credentials = generateCredentials( - mongoClient, - finalOptions.user, - finalOptions.password, - finalOptions - ); - } catch (err) { - return connectCallback(err, url); - } - } - - return url.connect(finalOptions, connectCallback); -} - function createListener(mongoClient, event) { const eventSet = new Set(['all', 'fullsetup', 'open', 'reconnect']); return (v1, v2) => { @@ -398,52 +270,6 @@ function createListener(mongoClient, event) { }; } -function createServer(mongoClient, options, callback) { - // Pass in the promise library - options.promiseLibrary = mongoClient.s.promiseLibrary; - - // Set default options - const servers = translateOptions(options); - - const server = servers[0]; - - // Propagate the events to the client - const collectedEvents = collectEvents(mongoClient, server); - - // Connect to topology - server.connect(options, (err, topology) => { - if (err) { - server.close(true); - return callback(err); - } - // Clear out all the collected event listeners - clearAllEvents(server); - - // Relay all the events - relayEvents(mongoClient, server); - // Add listeners - addListeners(mongoClient, server); - // Check if we are really speaking to a mongos - const ismaster = topology.lastIsMaster(); - - // Set the topology - assignTopology(mongoClient, topology); - - // Do we actually have a mongos - if (ismaster && ismaster.msg === 'isdbgrid') { - // Destroy the current connection - topology.close(); - // Create mongos connection instead - return createTopology(mongoClient, 'mongos', options, callback); - } - - // Fire all the events - replayEvents(mongoClient, collectedEvents); - // Otherwise callback - callback(err, topology); - }); -} - const DEPRECATED_UNIFIED_EVENTS = new Set([ 'reconnect', 'reconnectFailed', @@ -468,15 +294,16 @@ function registerDeprecatedEventNotifiers(client) { }); } -function createTopology(mongoClient, topologyType, options, callback) { +function createTopology(mongoClient, options, callback) { // Pass in the promise library options.promiseLibrary = mongoClient.s.promiseLibrary; - const translationOptions = {}; - if (topologyType === 'unified') translationOptions.createServers = false; + const translationOptions = { + createServers: false + }; // Set default options - const servers = translateOptions(options, translationOptions); + translateOptions(options, translationOptions); // determine CSFLE support if (options.autoEncryption != null) { @@ -535,15 +362,8 @@ function createTopology(mongoClient, topologyType, options, callback) { } // Create the topology - let topology; - if (topologyType === 'mongos') { - topology = new Mongos(servers, options); - } else if (topologyType === 'replicaset') { - topology = new ReplSet(servers, options); - } else if (topologyType === 'unified') { - topology = new NativeTopology(options.servers, options); - registerDeprecatedEventNotifiers(mongoClient); - } + const topology = new NativeTopology(options.servers, options); + registerDeprecatedEventNotifiers(mongoClient); // Add listeners addListeners(mongoClient, topology); @@ -633,6 +453,7 @@ function generateCredentials(client, username, password, options) { // authMechanism const authMechanismRaw = options.authMechanism || 'DEFAULT'; const authMechanism = authMechanismRaw.toUpperCase(); + const mechanismProperties = options.authMechanismProperties; if (!VALID_AUTH_MECHANISMS.has(authMechanism)) { throw MongoError.create({ @@ -653,16 +474,13 @@ function generateCredentials(client, username, password, options) { return new MongoCredentials({ mechanism: AUTH_MECHANISM_INTERNAL_MAP[authMechanism], + mechanismProperties, source, username, password }); } -function legacyTransformUrlOptions(object) { - return mergeOptions(createUnifiedOptions({}, object), object, false); -} - function mergeOptions(target, source, flatten) { for (const name in source) { if (source[name] && typeof source[name] === 'object' && flatten) { @@ -707,15 +525,6 @@ function relayEvents(mongoClient, topology) { }); } -// -// Replay any events due to single server connection switching to Mongos -// -function replayEvents(mongoClient, events) { - for (let i = 0; i < events.length; i++) { - mongoClient.emit(events[i].event, events[i].object1, events[i].object2); - } -} - function transformUrlOptions(_object) { let object = Object.assign({ servers: _object.hosts }, _object.options); for (let name in object) { @@ -761,9 +570,7 @@ function transformUrlOptions(_object) { return object; } -function translateOptions(options, translationOptions) { - translationOptions = Object.assign({}, { createServers: true }, translationOptions); - +function translateOptions(options) { // If we have a readPreference passed in by the db options if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') { options.readPreference = new ReadPreference(options.readPreference || options.read_preference); @@ -782,17 +589,6 @@ function translateOptions(options, translationOptions) { // Set the socket and connection timeouts if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000; if (options.connectTimeoutMS == null) options.connectTimeoutMS = 10000; - - if (!translationOptions.createServers) { - return; - } - - // Create server instances - return options.servers.map(serverObj => { - return serverObj.domain_socket - ? new Server(serverObj.domain_socket, 27017, options) - : new Server(serverObj.host, serverObj.port, options); - }); } module.exports = { validOptions, connect }; diff --git a/lib/operations/create_collection.js b/lib/operations/create_collection.js index d8ab4aebf8d..31bc83e4d61 100644 --- a/lib/operations/create_collection.js +++ b/lib/operations/create_collection.js @@ -1,13 +1,11 @@ 'use strict'; -const Aspect = require('./operation').Aspect; -const defineAspects = require('./operation').defineAspects; const CommandOperation = require('./command'); -const applyWriteConcern = require('../utils').applyWriteConcern; -const handleCallback = require('../utils').handleCallback; -const loadCollection = require('../dynamic_loaders').loadCollection; -const MongoError = require('../core').MongoError; -const ReadPreference = require('../core').ReadPreference; +const ReadPreference = require('../read_preference'); +const { Aspect, defineAspects } = require('./operation'); +const { applyWriteConcern, handleCallback } = require('../utils'); +const { loadCollection } = require('../dynamic_loaders'); +const { MongoError } = require('../error'); // Filter out any write concern options const illegalCommandFields = [ diff --git a/lib/operations/create_index.js b/lib/operations/create_index.js index 98bba71e54a..d1e10aa7bb0 100644 --- a/lib/operations/create_index.js +++ b/lib/operations/create_index.js @@ -1,11 +1,9 @@ 'use strict'; -const Aspect = require('./operation').Aspect; const CommandOperation = require('./command'); -const defineAspects = require('./operation').defineAspects; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; -const parseIndexOptions = require('../utils').parseIndexOptions; +const { Aspect, defineAspects } = require('./operation'); +const { handleCallback, parseIndexOptions } = require('../utils'); +const { MongoError } = require('../error'); const keysToOmit = new Set([ 'name', diff --git a/lib/operations/create_indexes.js b/lib/operations/create_indexes.js index 46228e8cfbe..f71a5d75e6f 100644 --- a/lib/operations/create_indexes.js +++ b/lib/operations/create_indexes.js @@ -1,11 +1,9 @@ 'use strict'; -const Aspect = require('./operation').Aspect; -const defineAspects = require('./operation').defineAspects; -const OperationBase = require('./operation').OperationBase; -const executeCommand = require('./db_ops').executeCommand; -const MongoError = require('../core').MongoError; -const ReadPreference = require('../core').ReadPreference; +const ReadPreference = require('../read_preference'); +const { Aspect, defineAspects, OperationBase } = require('./operation'); +const { executeCommand } = require('./db_ops'); +const { MongoError } = require('../error'); class CreateIndexesOperation extends OperationBase { constructor(collection, indexSpecs, options) { diff --git a/lib/operations/cursor_ops.js b/lib/operations/cursor_ops.js index 98df606d8f8..66e728be9cf 100644 --- a/lib/operations/cursor_ops.js +++ b/lib/operations/cursor_ops.js @@ -1,10 +1,10 @@ 'use strict'; -const buildCountCommand = require('./collection_ops').buildCountCommand; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; +const { buildCountCommand } = require('./collection_ops'); +const { handleCallback } = require('../utils'); +const { MongoError } = require('../error'); +const { CursorState } = require('../cursor/core_cursor'); const push = Array.prototype.push; -const CursorState = require('../core/cursor').CursorState; /** * Get the count of documents for this cursor. diff --git a/lib/operations/db_ops.js b/lib/operations/db_ops.js index ada6c9aced4..c1e6abef236 100644 --- a/lib/operations/db_ops.js +++ b/lib/operations/db_ops.js @@ -1,22 +1,20 @@ 'use strict'; -const applyWriteConcern = require('../utils').applyWriteConcern; -const Code = require('../core').BSON.Code; -const resolveReadPreference = require('../utils').resolveReadPreference; const crypto = require('crypto'); -const debugOptions = require('../utils').debugOptions; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; -const parseIndexOptions = require('../utils').parseIndexOptions; -const ReadPreference = require('../core').ReadPreference; -const toError = require('../utils').toError; +const ReadPreference = require('../read_preference'); +const { Code } = require('../utils').retrieveBSON(); +const { MongoError } = require('../error'); const CONSTANTS = require('../constants'); -const MongoDBNamespace = require('../utils').MongoDBNamespace; - -const count = require('./collection_ops').count; -const findOne = require('./collection_ops').findOne; -const remove = require('./collection_ops').remove; -const updateOne = require('./collection_ops').updateOne; +const { count, findOne, remove, updateOne } = require('./collection_ops'); +const { + applyWriteConcern, + resolveReadPreference, + debugOptions, + handleCallback, + parseIndexOptions, + toError, + MongoDBNamespace +} = require('../utils'); let collection; function loadCollection() { diff --git a/lib/operations/execute_db_admin_command.js b/lib/operations/execute_db_admin_command.js index d15fc8e660e..01126366fee 100644 --- a/lib/operations/execute_db_admin_command.js +++ b/lib/operations/execute_db_admin_command.js @@ -1,9 +1,8 @@ 'use strict'; -const OperationBase = require('./operation').OperationBase; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; -const MongoDBNamespace = require('../utils').MongoDBNamespace; +const { OperationBase } = require('./operation'); +const { handleCallback, MongoDBNamespace } = require('../utils'); +const { MongoError } = require('../error'); class ExecuteDbAdminCommandOperation extends OperationBase { constructor(db, selector, options) { diff --git a/lib/operations/execute_operation.js b/lib/operations/execute_operation.js index 80d57857e86..661755bd7dc 100644 --- a/lib/operations/execute_operation.js +++ b/lib/operations/execute_operation.js @@ -1,12 +1,9 @@ 'use strict'; -const MongoError = require('../core/error').MongoError; -const Aspect = require('./operation').Aspect; -const OperationBase = require('./operation').OperationBase; -const ReadPreference = require('../core/topologies/read_preference'); -const isRetryableError = require('../core/error').isRetryableError; -const maxWireVersion = require('../core/utils').maxWireVersion; -const isUnifiedTopology = require('../core/utils').isUnifiedTopology; +const ReadPreference = require('../read_preference'); +const { MongoError, isRetryableError } = require('../error'); +const { Aspect, OperationBase } = require('./operation'); +const { maxWireVersion } = require('../utils'); /** * Executes the given operation with provided arguments. @@ -30,7 +27,7 @@ function executeOperation(topology, operation, callback) { throw new TypeError('This method requires a valid operation instance'); } - if (isUnifiedTopology(topology) && topology.shouldCheckForSessionSupport()) { + if (topology.shouldCheckForSessionSupport()) { return selectServerForSessionSupport(topology, operation, callback); } diff --git a/lib/operations/find_and_modify.js b/lib/operations/find_and_modify.js index 8965eb4857c..389197e4f68 100644 --- a/lib/operations/find_and_modify.js +++ b/lib/operations/find_and_modify.js @@ -1,13 +1,17 @@ 'use strict'; -const OperationBase = require('./operation').OperationBase; -const applyRetryableWrites = require('../utils').applyRetryableWrites; -const applyWriteConcern = require('../utils').applyWriteConcern; -const decorateWithCollation = require('../utils').decorateWithCollation; -const executeCommand = require('./db_ops').executeCommand; -const formattedOrderClause = require('../utils').formattedOrderClause; -const handleCallback = require('../utils').handleCallback; -const ReadPreference = require('../core').ReadPreference; +const ReadPreference = require('../read_preference'); +const { OperationBase } = require('./operation'); +const { + maxWireVersion, + applyRetryableWrites, + decorateWithCollation, + applyWriteConcern, + formattedOrderClause, + handleCallback +} = require('../utils'); +const { executeCommand } = require('./db_ops'); +const { MongoError } = require('../error'); class FindAndModifyOperation extends OperationBase { constructor(collection, query, sort, doc, options) { @@ -86,6 +90,21 @@ class FindAndModifyOperation extends OperationBase { return callback(err, null); } + if (options.hint) { + // TODO: once this method becomes a CommandOperationV2 we will have the server + // in place to check. + const topology = coll.s.topology; + if (maxWireVersion(topology) < 8) { + callback( + new MongoError('The current topology does not support a hint on findAndModify commands') + ); + + return; + } + + queryObject.hint = options.hint; + } + // Execute the command executeCommand(coll.s.db, queryObject, options, (err, result) => { if (err) return handleCallback(callback, err, null); diff --git a/lib/operations/insert_many.js b/lib/operations/insert_many.js index 460a535d665..76eaee09e24 100644 --- a/lib/operations/insert_many.js +++ b/lib/operations/insert_many.js @@ -1,9 +1,9 @@ 'use strict'; -const OperationBase = require('./operation').OperationBase; +const { OperationBase } = require('./operation'); const BulkWriteOperation = require('./bulk_write'); -const MongoError = require('../core').MongoError; -const prepareDocs = require('./common_functions').prepareDocs; +const { MongoError } = require('../error'); +const { prepareDocs } = require('./common_functions'); class InsertManyOperation extends OperationBase { constructor(collection, docs, options) { diff --git a/lib/operations/insert_one.js b/lib/operations/insert_one.js index 5e708801d53..298ac4c981a 100644 --- a/lib/operations/insert_one.js +++ b/lib/operations/insert_one.js @@ -1,8 +1,8 @@ 'use strict'; -const MongoError = require('../core').MongoError; -const OperationBase = require('./operation').OperationBase; -const insertDocuments = require('./common_functions').insertDocuments; +const { MongoError } = require('../error'); +const { OperationBase } = require('./operation'); +const { insertDocuments } = require('./common_functions'); class InsertOneOperation extends OperationBase { constructor(collection, doc, options) { diff --git a/lib/operations/list_collections.js b/lib/operations/list_collections.js index ee01d31e85c..3f331267978 100644 --- a/lib/operations/list_collections.js +++ b/lib/operations/list_collections.js @@ -1,9 +1,8 @@ 'use strict'; const CommandOperationV2 = require('./command_v2'); -const Aspect = require('./operation').Aspect; -const defineAspects = require('./operation').defineAspects; -const maxWireVersion = require('../core/utils').maxWireVersion; +const { Aspect, defineAspects } = require('./operation'); +const { maxWireVersion } = require('../utils'); const CONSTANTS = require('../constants'); const LIST_COLLECTIONS_WIRE_VERSION = 3; diff --git a/lib/operations/list_indexes.js b/lib/operations/list_indexes.js index 302a31b7c8c..84953e89fa8 100644 --- a/lib/operations/list_indexes.js +++ b/lib/operations/list_indexes.js @@ -1,9 +1,8 @@ 'use strict'; const CommandOperationV2 = require('./command_v2'); -const Aspect = require('./operation').Aspect; -const defineAspects = require('./operation').defineAspects; -const maxWireVersion = require('../core/utils').maxWireVersion; +const { Aspect, defineAspects } = require('./operation'); +const { maxWireVersion } = require('../utils'); const LIST_INDEXES_WIRE_VERSION = 3; diff --git a/lib/operations/map_reduce.js b/lib/operations/map_reduce.js index 4ea2ac79ae6..8436de88a34 100644 --- a/lib/operations/map_reduce.js +++ b/lib/operations/map_reduce.js @@ -1,16 +1,18 @@ 'use strict'; -const applyWriteConcern = require('../utils').applyWriteConcern; -const Code = require('../core').BSON.Code; -const decorateWithCollation = require('../utils').decorateWithCollation; -const decorateWithReadConcern = require('../utils').decorateWithReadConcern; -const executeCommand = require('./db_ops').executeCommand; -const handleCallback = require('../utils').handleCallback; -const isObject = require('../utils').isObject; -const loadDb = require('../dynamic_loaders').loadDb; -const OperationBase = require('./operation').OperationBase; -const resolveReadPreference = require('../utils').resolveReadPreference; -const toError = require('../utils').toError; +const { Code } = require('../utils').retrieveBSON(); +const { executeCommand } = require('./db_ops'); +const { loadDb } = require('../dynamic_loaders'); +const { OperationBase } = require('./operation'); +const { + applyWriteConcern, + decorateWithCollation, + decorateWithReadConcern, + handleCallback, + isObject, + resolveReadPreference, + toError +} = require('../utils'); const exclusionList = [ 'readPreference', @@ -60,7 +62,7 @@ class MapReduceOperation extends OperationBase { let options = this.options; const mapCommandHash = { - mapreduce: coll.collectionName, + mapReduce: coll.collectionName, map: map, reduce: reduce }; diff --git a/lib/operations/options_operation.js b/lib/operations/options_operation.js index 9a739a51932..204f2738045 100644 --- a/lib/operations/options_operation.js +++ b/lib/operations/options_operation.js @@ -1,8 +1,8 @@ 'use strict'; -const OperationBase = require('./operation').OperationBase; -const handleCallback = require('../utils').handleCallback; -const MongoError = require('../core').MongoError; +const { OperationBase } = require('./operation'); +const { handleCallback } = require('../utils'); +const { MongoError } = require('../error'); class OptionsOperation extends OperationBase { constructor(collection, options) { diff --git a/lib/operations/validate_collection.js b/lib/operations/validate_collection.js index 133c6c4b566..5c578d430e8 100644 --- a/lib/operations/validate_collection.js +++ b/lib/operations/validate_collection.js @@ -14,8 +14,7 @@ class ValidateCollectionOperation extends CommandOperation { } super(admin.s.db, options, null, command); - - this.collectionName; + this.collectionName = collectionName; } execute(callback) { diff --git a/lib/core/topologies/read_preference.js b/lib/read_preference.js similarity index 100% rename from lib/core/topologies/read_preference.js rename to lib/read_preference.js diff --git a/lib/core/sdam/common.js b/lib/sdam/common.js similarity index 72% rename from lib/core/sdam/common.js rename to lib/sdam/common.js index c99b01c5698..e5602e40440 100644 --- a/lib/core/sdam/common.js +++ b/lib/sdam/common.js @@ -29,7 +29,6 @@ const ServerType = { }; const TOPOLOGY_DEFAULTS = { - useUnifiedTopology: true, localThresholdMS: 15, serverSelectionTimeoutMS: 30000, heartbeatFrequencyMS: 10000, @@ -46,6 +45,22 @@ function clearAndRemoveTimerFrom(timer, timers) { return timers.delete(timer); } +/** + * Shared function to determine clusterTime for a given topology + * + * @param {*} topology + * @param {*} clusterTime + */ +function resolveClusterTime(topology, $clusterTime) { + if (topology.clusterTime == null) { + topology.clusterTime = $clusterTime; + } else { + if ($clusterTime.clusterTime.greaterThan(topology.clusterTime.clusterTime)) { + topology.clusterTime = $clusterTime; + } + } +} + module.exports = { STATE_CLOSING, STATE_CLOSED, @@ -55,5 +70,6 @@ module.exports = { TopologyType, ServerType, drainTimerQueue, - clearAndRemoveTimerFrom + clearAndRemoveTimerFrom, + resolveClusterTime }; diff --git a/lib/core/sdam/events.js b/lib/sdam/events.js similarity index 100% rename from lib/core/sdam/events.js rename to lib/sdam/events.js diff --git a/lib/core/sdam/monitor.js b/lib/sdam/monitor.js similarity index 83% rename from lib/core/sdam/monitor.js rename to lib/sdam/monitor.js index a5ddff85a39..c2883b2c51c 100644 --- a/lib/core/sdam/monitor.js +++ b/lib/sdam/monitor.js @@ -1,18 +1,16 @@ 'use strict'; -const ServerType = require('./common').ServerType; -const calculateDurationInMs = require('../utils').calculateDurationInMs; +const { ServerType, STATE_CLOSED, STATE_CLOSING } = require('./common'); +const { makeStateMachine, calculateDurationInMs } = require('../utils'); const EventEmitter = require('events'); -const connect = require('../connection/connect'); -const Connection = require('../../cmap/connection').Connection; -const common = require('./common'); -const makeStateMachine = require('../utils').makeStateMachine; -const MongoError = require('../error').MongoError; - -const sdamEvents = require('./events'); -const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent; -const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent; -const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent; +const connect = require('../cmap/connect'); +const { Connection } = require('../cmap/connection'); +const { MongoError } = require('../error'); +const { + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerHeartbeatFailedEvent +} = require('./events'); const kServer = Symbol('server'); const kMonitorId = Symbol('monitorId'); @@ -20,8 +18,6 @@ const kConnection = Symbol('connection'); const kCancellationToken = Symbol('cancellationToken'); const kLastCheckTime = Symbol('lastCheckTime'); -const STATE_CLOSED = common.STATE_CLOSED; -const STATE_CLOSING = common.STATE_CLOSING; const STATE_IDLE = 'idle'; const STATE_MONITORING = 'monitoring'; const stateTransition = makeStateMachine({ @@ -61,27 +57,29 @@ class Monitor extends EventEmitter { // TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration const addressParts = server.description.address.split(':'); - this.connectOptions = Object.freeze( - Object.assign( - { - id: '', - host: addressParts[0], - port: parseInt(addressParts[1], 10), - bson: server.s.bson, - connectionType: Connection - }, - server.s.options, - this.options, - - // force BSON serialization options - { - raw: false, - promoteLongs: true, - promoteValues: true, - promoteBuffers: true - } - ) + const connectOptions = Object.assign( + { + id: '', + host: addressParts[0], + port: parseInt(addressParts[1], 10), + bson: server.s.bson, + connectionType: Connection + }, + server.s.options, + this.options, + + // force BSON serialization options + { + raw: false, + promoteLongs: true, + promoteValues: true, + promoteBuffers: true + } ); + + // ensure no authentication is used for monitoring + delete connectOptions.credentials; + this.connectOptions = Object.freeze(connectOptions); } connect() { diff --git a/lib/core/sdam/server.js b/lib/sdam/server.js similarity index 89% rename from lib/core/sdam/server.js rename to lib/sdam/server.js index b371a035d7b..b44974cd79c 100644 --- a/lib/core/sdam/server.js +++ b/lib/sdam/server.js @@ -1,23 +1,34 @@ 'use strict'; const EventEmitter = require('events'); -const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool; -const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; -const MongoError = require('../error').MongoError; -const relayEvents = require('../utils').relayEvents; -const BSON = require('../connection/utils').retrieveBSON(); -const Logger = require('../connection/logger'); -const ServerDescription = require('./server_description').ServerDescription; -const ReadPreference = require('../topologies/read_preference'); -const Monitor = require('./monitor').Monitor; -const MongoNetworkError = require('../error').MongoNetworkError; -const collationNotSupported = require('../utils').collationNotSupported; -const debugOptions = require('../connection/utils').debugOptions; -const isSDAMUnrecoverableError = require('../error').isSDAMUnrecoverableError; -const isNetworkTimeoutError = require('../error').isNetworkTimeoutError; -const isNodeShuttingDownError = require('../error').isNodeShuttingDownError; -const maxWireVersion = require('../utils').maxWireVersion; -const makeStateMachine = require('../utils').makeStateMachine; -const common = require('./common'); +const BSON = require('../utils').retrieveBSON(); +const Logger = require('../logger'); +const ReadPreference = require('../read_preference'); +const { ConnectionPool } = require('../cmap/connection_pool'); +const { CMAP_EVENT_NAMES } = require('../cmap/events'); +const { ServerDescription } = require('./server_description'); +const { Monitor } = require('./monitor'); +const { + relayEvents, + collationNotSupported, + debugOptions, + makeStateMachine, + maxWireVersion +} = require('../utils'); +const { + ServerType, + STATE_CLOSED, + STATE_CLOSING, + STATE_CONNECTING, + STATE_CONNECTED +} = require('./common'); +const { + MongoError, + MongoNetworkError, + isSDAMUnrecoverableError, + isNetworkTimeoutError, + isRetryableWriteError, + isNodeShuttingDownError +} = require('../error'); // Used for filtering out fields for logging const DEBUG_FIELDS = [ @@ -47,10 +58,6 @@ const DEBUG_FIELDS = [ 'servername' ]; -const STATE_CLOSING = common.STATE_CLOSING; -const STATE_CLOSED = common.STATE_CLOSED; -const STATE_CONNECTING = common.STATE_CONNECTING; -const STATE_CONNECTED = common.STATE_CONNECTED; const stateTransition = makeStateMachine({ [STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING], [STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED], @@ -414,6 +421,14 @@ Object.defineProperty(Server.prototype, 'clusterTime', { } }); +function supportsRetryableWrites(server) { + return ( + server.description.maxWireVersion >= 6 && + server.description.logicalSessionTimeoutMinutes && + server.description.type !== ServerType.Standalone + ); +} + function calculateRoundTripTime(oldRtt, duration) { const alpha = 0.2; return alpha * duration + (1 - alpha) * oldRtt; @@ -470,17 +485,28 @@ function makeOperationHandler(server, options, callback) { options.session.serverSession.isDirty = true; } + if (supportsRetryableWrites(server)) { + err.addErrorLabel('RetryableWriteError'); + } + if (!isNetworkTimeoutError(err)) { markServerUnknown(server, err); server.s.pool.clear(); } - } else if (isSDAMUnrecoverableError(err)) { - if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) { - server.s.pool.clear(); + } else { + // if pre-4.4 server, then add error label if its a retryable write error + if (maxWireVersion(server) < 9 && isRetryableWriteError(err)) { + err.addErrorLabel('RetryableWriteError'); } - markServerUnknown(server, err); - process.nextTick(() => server.requestCheck()); + if (isSDAMUnrecoverableError(err)) { + if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) { + server.s.pool.clear(); + } + + markServerUnknown(server, err); + process.nextTick(() => server.requestCheck()); + } } } diff --git a/lib/core/sdam/server_description.js b/lib/sdam/server_description.js similarity index 95% rename from lib/core/sdam/server_description.js rename to lib/sdam/server_description.js index 1a26c0609fc..c485a1b48e4 100644 --- a/lib/core/sdam/server_description.js +++ b/lib/sdam/server_description.js @@ -1,9 +1,7 @@ 'use strict'; -const arrayStrictEqual = require('../utils').arrayStrictEqual; -const tagsStrictEqual = require('../utils').tagsStrictEqual; -const errorStrictEqual = require('../utils').errorStrictEqual; -const ServerType = require('./common').ServerType; +const { arrayStrictEqual, tagsStrictEqual, errorStrictEqual } = require('../utils'); +const { ServerType } = require('./common'); const WRITABLE_SERVER_TYPES = new Set([ ServerType.RSPrimary, diff --git a/lib/core/sdam/server_selection.js b/lib/sdam/server_selection.js similarity index 97% rename from lib/core/sdam/server_selection.js rename to lib/sdam/server_selection.js index 5647f941c8d..4247006e908 100644 --- a/lib/core/sdam/server_selection.js +++ b/lib/sdam/server_selection.js @@ -1,8 +1,7 @@ 'use strict'; -const ServerType = require('./common').ServerType; -const TopologyType = require('./common').TopologyType; -const ReadPreference = require('../topologies/read_preference'); -const MongoError = require('../error').MongoError; +const { ServerType, TopologyType } = require('./common'); +const ReadPreference = require('../read_preference'); +const { MongoError } = require('../error'); // max staleness constants const IDLE_WRITE_PERIOD = 10000; diff --git a/lib/core/sdam/srv_polling.js b/lib/sdam/srv_polling.js similarity index 96% rename from lib/core/sdam/srv_polling.js rename to lib/sdam/srv_polling.js index 2c0b6ee2ef3..b63993c6341 100644 --- a/lib/core/sdam/srv_polling.js +++ b/lib/sdam/srv_polling.js @@ -1,7 +1,7 @@ 'use strict'; -const Logger = require('../connection/logger'); -const EventEmitter = require('events').EventEmitter; +const Logger = require('../logger'); +const EventEmitter = require('events'); const dns = require('dns'); /** * Determines whether a provided address matches the provided parent domain in order diff --git a/lib/core/sdam/topology.js b/lib/sdam/topology.js similarity index 87% rename from lib/core/sdam/topology.js rename to lib/sdam/topology.js index ed1df2d3f00..56c07691d10 100644 --- a/lib/core/sdam/topology.js +++ b/lib/sdam/topology.js @@ -1,40 +1,44 @@ 'use strict'; const Denque = require('denque'); const EventEmitter = require('events'); -const ServerDescription = require('./server_description').ServerDescription; -const ServerType = require('./common').ServerType; -const TopologyDescription = require('./topology_description').TopologyDescription; -const TopologyType = require('./common').TopologyType; -const events = require('./events'); -const Server = require('./server').Server; -const relayEvents = require('../utils').relayEvents; -const ReadPreference = require('../topologies/read_preference'); -const isRetryableWritesSupported = require('../topologies/shared').isRetryableWritesSupported; -const CoreCursor = require('../cursor').CoreCursor; -const deprecate = require('util').deprecate; -const BSON = require('../connection/utils').retrieveBSON(); -const createCompressionInfo = require('../topologies/shared').createCompressionInfo; -const isRetryableError = require('../error').isRetryableError; -const ClientSession = require('../sessions').ClientSession; -const MongoError = require('../error').MongoError; -const MongoServerSelectionError = require('../error').MongoServerSelectionError; -const resolveClusterTime = require('../topologies/shared').resolveClusterTime; -const SrvPoller = require('./srv_polling').SrvPoller; -const getMMAPError = require('../topologies/shared').getMMAPError; -const makeStateMachine = require('../utils').makeStateMachine; -const eachAsync = require('../utils').eachAsync; -const emitDeprecationWarning = require('../../utils').emitDeprecationWarning; -const ServerSessionPool = require('../sessions').ServerSessionPool; -const makeClientMetadata = require('../utils').makeClientMetadata; -const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES; - -const common = require('./common'); -const drainTimerQueue = common.drainTimerQueue; -const clearAndRemoveTimerFrom = common.clearAndRemoveTimerFrom; - -const serverSelection = require('./server_selection'); -const readPreferenceServerSelector = serverSelection.readPreferenceServerSelector; -const writableServerSelector = serverSelection.writableServerSelector; +const BSON = require('../utils').retrieveBSON(); +const ReadPreference = require('../read_preference'); +const { TopologyType, ServerType } = require('./common'); +const { ServerDescription } = require('./server_description'); +const { TopologyDescription } = require('./topology_description'); +const { Server } = require('./server'); +const { CoreCursor } = require('../cursor'); +const { ClientSession, ServerSessionPool } = require('../sessions'); +const { SrvPoller } = require('./srv_polling'); +const { CMAP_EVENT_NAMES } = require('../cmap/events'); +const { MongoError, MongoServerSelectionError } = require('../error'); +const { readPreferenceServerSelector, writableServerSelector } = require('./server_selection'); +const { deprecate } = require('util'); +const { + relayEvents, + makeStateMachine, + eachAsync, + makeClientMetadata, + emitDeprecationWarning +} = require('../utils'); +const { + resolveClusterTime, + drainTimerQueue, + clearAndRemoveTimerFrom, + STATE_CLOSED, + STATE_CLOSING, + STATE_CONNECTING, + STATE_CONNECTED, + TOPOLOGY_DEFAULTS +} = require('./common'); +const { + ServerOpeningEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + TopologyOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent +} = require('./events'); // Global state let globalTopologyCounter = 0; @@ -55,10 +59,6 @@ const SERVER_RELAY_EVENTS = [ // all events we listen to from `Server` instances const LOCAL_SERVER_EVENTS = ['connect', 'descriptionReceived', 'close', 'ended']; -const STATE_CLOSING = common.STATE_CLOSING; -const STATE_CLOSED = common.STATE_CLOSED; -const STATE_CONNECTING = common.STATE_CONNECTING; -const STATE_CONNECTED = common.STATE_CONNECTED; const stateTransition = makeStateMachine({ [STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING], [STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED], @@ -73,6 +73,10 @@ const DEPRECATED_OPTIONS = new Set([ 'bufferMaxEntries' ]); +const MMAPv1_RETRY_WRITES_ERROR_CODE = 20; +const MMAPv1_RETRY_WRITES_ERROR_MESSAGE = + 'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.'; + const kCancelled = Symbol('cancelled'); const kWaitQueue = Symbol('waitQueue'); @@ -116,11 +120,11 @@ class Topology extends EventEmitter { seedlist = parseStringSeedlist(seedlist); } - options = Object.assign({}, common.TOPOLOGY_DEFAULTS, options); + options = Object.assign({}, TOPOLOGY_DEFAULTS, options); options = Object.freeze( Object.assign(options, { metadata: makeClientMetadata(options), - compression: { compressors: createCompressionInfo(options) } + compression: { compressors: makeCompressionInfo(options) } }) ); @@ -260,12 +264,12 @@ class Topology extends EventEmitter { stateTransition(this, STATE_CONNECTING); // emit SDAM monitoring events - this.emit('topologyOpening', new events.TopologyOpeningEvent(this.s.id)); + this.emit('topologyOpening', new TopologyOpeningEvent(this.s.id)); // emit an event for the topology change this.emit( 'topologyDescriptionChanged', - new events.TopologyDescriptionChangedEvent( + new TopologyDescriptionChangedEvent( this.s.id, new TopologyDescription(TopologyType.Unknown), // initial is always Unknown this.s.description @@ -277,7 +281,7 @@ class Topology extends EventEmitter { translateReadPreference(options); const readPreference = options.readPreference || ReadPreference.primary; - this.selectServer(readPreferenceServerSelector(readPreference), options, err => { + const connectHandler = err => { if (err) { this.close(); @@ -295,7 +299,15 @@ class Topology extends EventEmitter { this.emit('connect', this); if (typeof callback === 'function') callback(err, this); - }); + }; + + // TODO: NODE-2471 + if (this.s.credentials) { + this.command('admin.$cmd', { ping: 1 }, { readPreference }, connectHandler); + return; + } + + this.selectServer(readPreferenceServerSelector(readPreference), options, connectHandler); } /** @@ -347,7 +359,7 @@ class Topology extends EventEmitter { this.s.servers.clear(); // emit an event for close - this.emit('topologyClosed', new events.TopologyClosedEvent(this.s.id)); + this.emit('topologyClosed', new TopologyClosedEvent(this.s.id)); stateTransition(this, STATE_CLOSED); this.emit('close'); @@ -519,10 +531,10 @@ class Topology extends EventEmitter { } // If we already know all the information contained in this updated description, then - // we don't need to update anything or emit SDAM events - if (previousServerDescription && previousServerDescription.equals(serverDescription)) { - return; - } + // we don't need to emit SDAM events, but still need to update the description, in order + // to keep client-tracked attributes like last update time and round trip time up to date + const equalDescriptions = + previousServerDescription && previousServerDescription.equals(serverDescription); // first update the TopologyDescription this.s.description = this.s.description.update(serverDescription); @@ -532,15 +544,17 @@ class Topology extends EventEmitter { } // emit monitoring events for this change - this.emit( - 'serverDescriptionChanged', - new events.ServerDescriptionChangedEvent( - this.s.id, - serverDescription.address, - previousServerDescription, - this.s.description.servers.get(serverDescription.address) - ) - ); + if (!equalDescriptions) { + this.emit( + 'serverDescriptionChanged', + new ServerDescriptionChangedEvent( + this.s.id, + serverDescription.address, + previousServerDescription, + this.s.description.servers.get(serverDescription.address) + ) + ); + } // update server list from updated descriptions updateServers(this, serverDescription); @@ -550,14 +564,16 @@ class Topology extends EventEmitter { processWaitQueue(this); } - this.emit( - 'topologyDescriptionChanged', - new events.TopologyDescriptionChangedEvent( - this.s.id, - previousTopologyDescription, - this.s.description - ) - ); + if (!equalDescriptions) { + this.emit( + 'topologyDescriptionChanged', + new TopologyDescriptionChangedEvent( + this.s.id, + previousTopologyDescription, + this.s.description + ) + ); + } } auth(credentials, callback) { @@ -660,7 +676,7 @@ class Topology extends EventEmitter { const cb = (err, result) => { if (!err) return callback(null, result); - if (!isRetryableError(err)) { + if (!shouldRetryOperation(err)) { return callback(err); } @@ -775,10 +791,7 @@ function destroyServer(server, topology, options, callback) { LOCAL_SERVER_EVENTS.forEach(event => server.removeAllListeners(event)); server.destroy(options, () => { - topology.emit( - 'serverClosed', - new events.ServerClosedEvent(topology.s.id, server.description.address) - ); + topology.emit('serverClosed', new ServerClosedEvent(topology.s.id, server.description.address)); SERVER_RELAY_EVENTS.forEach(event => server.removeAllListeners(event)); if (typeof callback === 'function') { @@ -811,10 +824,7 @@ function randomSelection(array) { } function createAndConnectServer(topology, serverDescription, connectDelay) { - topology.emit( - 'serverOpening', - new events.ServerOpeningEvent(topology.s.id, serverDescription.address) - ); + topology.emit('serverOpening', new ServerOpeningEvent(topology.s.id, serverDescription.address)); const server = new Server(serverDescription, topology.s.options, topology); relayEvents(server, topology, SERVER_RELAY_EVENTS); @@ -905,8 +915,22 @@ function executeWriteOperation(args, options, callback) { const handler = (err, result) => { if (!err) return callback(null, result); - if (!isRetryableError(err)) { - err = getMMAPError(err); + if (!shouldRetryOperation(err)) { + if ( + err.code === MMAPv1_RETRY_WRITES_ERROR_CODE && + err.errmsg.includes('Transaction numbers') + ) { + callback( + new MongoError({ + message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, + errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, + originalError: err + }) + ); + + return; + } + return callback(err); } @@ -933,6 +957,10 @@ function executeWriteOperation(args, options, callback) { }); } +function shouldRetryOperation(err) { + return err instanceof MongoError && err.hasErrorLabel('RetryableWriteError'); +} + function translateReadPreference(options) { if (options.readPreference == null) { return; @@ -968,7 +996,7 @@ function srvPollingHandler(topology) { topology.emit( 'topologyDescriptionChanged', - new events.TopologyDescriptionChangedEvent( + new TopologyDescriptionChangedEvent( topology.s.id, previousTopologyDescription, topology.s.description @@ -1036,6 +1064,45 @@ function processWaitQueue(topology) { } } +function makeCompressionInfo(options) { + if (!options.compression || !options.compression.compressors) { + return []; + } + + // Check that all supplied compressors are valid + options.compression.compressors.forEach(function(compressor) { + if (compressor !== 'snappy' && compressor !== 'zlib') { + throw new Error('compressors must be at least one of snappy or zlib'); + } + }); + + return options.compression.compressors; +} + +const RETRYABLE_WIRE_VERSION = 6; + +/** + * Determines whether the provided topology supports retryable writes + * + * @param {Mongos|Replset} topology + */ +const isRetryableWritesSupported = function(topology) { + const maxWireVersion = topology.lastIsMaster().maxWireVersion; + if (maxWireVersion < RETRYABLE_WIRE_VERSION) { + return false; + } + + if (!topology.logicalSessionTimeoutMinutes) { + return false; + } + + if (topology.description.type === TopologyType.Single) { + return false; + } + + return true; +}; + /** * A server opening SDAM monitoring event * diff --git a/lib/core/sdam/topology_description.js b/lib/sdam/topology_description.js similarity index 98% rename from lib/core/sdam/topology_description.js rename to lib/sdam/topology_description.js index d1beb22c7cd..b8f4efd7a05 100644 --- a/lib/core/sdam/topology_description.js +++ b/lib/sdam/topology_description.js @@ -1,8 +1,7 @@ 'use strict'; -const ServerType = require('./common').ServerType; -const ServerDescription = require('./server_description').ServerDescription; -const WIRE_CONSTANTS = require('../wireprotocol/constants'); -const TopologyType = require('./common').TopologyType; +const { ServerDescription } = require('./server_description'); +const WIRE_CONSTANTS = require('../cmap/wire_protocol/constants'); +const { TopologyType, ServerType } = require('./common'); // contstants related to compatability checks const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; diff --git a/lib/core/sessions.js b/lib/sessions.js similarity index 93% rename from lib/core/sessions.js rename to lib/sessions.js index fcd3384645e..9cf52283374 100644 --- a/lib/core/sessions.js +++ b/lib/sessions.js @@ -1,22 +1,19 @@ 'use strict'; -const retrieveBSON = require('./connection/utils').retrieveBSON; const EventEmitter = require('events'); -const BSON = retrieveBSON(); +const BSON = require('./utils').retrieveBSON(); const Binary = BSON.Binary; -const uuidV4 = require('./utils').uuidV4; -const MongoError = require('./error').MongoError; -const isRetryableError = require('././error').isRetryableError; -const MongoNetworkError = require('./error').MongoNetworkError; -const MongoWriteConcernError = require('./error').MongoWriteConcernError; -const Transaction = require('./transactions').Transaction; -const TxnState = require('./transactions').TxnState; -const isPromiseLike = require('./utils').isPromiseLike; -const ReadPreference = require('./topologies/read_preference'); -const isTransactionCommand = require('./transactions').isTransactionCommand; -const resolveClusterTime = require('./topologies/shared').resolveClusterTime; -const isSharded = require('./wireprotocol/shared').isSharded; -const maxWireVersion = require('./utils').maxWireVersion; +const ReadPreference = require('./read_preference'); +const { isTransactionCommand, TxnState, Transaction } = require('./transactions'); +const { resolveClusterTime } = require('./sdam/common'); +const { isSharded } = require('./cmap/wire_protocol/shared'); +const { isPromiseLike, uuidV4, maxWireVersion } = require('./utils'); +const { + MongoError, + isRetryableError, + MongoNetworkError, + MongoWriteConcernError +} = require('./error'); const minWireVersionForShardedTransactions = 8; @@ -374,10 +371,7 @@ function attemptTransaction(session, startTime, fn, options) { } if (isMaxTimeMSExpiredError(err)) { - if (err.errorLabels == null) { - err.errorLabels = []; - } - err.errorLabels.push('UnknownTransactionCommitResult'); + err.addErrorLabel('UnknownTransactionCommitResult'); } throw err; @@ -476,17 +470,8 @@ function endTransaction(session, commandName, callback) { isRetryableError(e) || isMaxTimeMSExpiredError(e)) ) { - if (e.errorLabels) { - const idx = e.errorLabels.indexOf('TransientTransactionError'); - if (idx !== -1) { - e.errorLabels.splice(idx, 1); - } - } else { - e.errorLabels = []; - } - if (isUnknownTransactionCommitResult(e)) { - e.errorLabels.push('UnknownTransactionCommitResult'); + e.addErrorLabel('UnknownTransactionCommitResult'); // per txns spec, must unpin session in this case session.transaction.unpinServer(); @@ -673,7 +658,12 @@ function commandSupportsReadConcern(command, options) { return true; } - if (command.mapReduce && options.out && (options.out.inline === 1 || options.out === 'inline')) { + if ( + command.mapReduce && + options && + options.out && + (options.out.inline === 1 || options.out === 'inline') + ) { return true; } @@ -704,7 +694,7 @@ function applySession(session, command, options) { // first apply non-transaction-specific sessions data const inTransaction = session.inTransaction() || isTransactionCommand(command); const isRetryableWrite = options.willRetryWrite; - const shouldApplyReadConcern = commandSupportsReadConcern(command); + const shouldApplyReadConcern = commandSupportsReadConcern(command, options); if (serverSession.txnNumber && (isRetryableWrite || inTransaction)) { command.txnNumber = BSON.Long.fromNumber(serverSession.txnNumber); diff --git a/lib/topologies/mongos.js b/lib/topologies/mongos.js deleted file mode 100644 index 10e66d2151b..00000000000 --- a/lib/topologies/mongos.js +++ /dev/null @@ -1,445 +0,0 @@ -'use strict'; - -const TopologyBase = require('./topology_base').TopologyBase; -const MongoError = require('../core').MongoError; -const CMongos = require('../core').Mongos; -const Cursor = require('../cursor'); -const Server = require('./server'); -const Store = require('./topology_base').Store; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is - * used to construct connections. - * - * **Mongos Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'acceptableLatencyMS', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCRL', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'promiseLibrary', - 'monitorCommands' -]; - -/** - * Creates a new Mongos instance - * @class - * @deprecated - * @param {Server[]} servers A seedlist of servers participating in the replicaset. - * @param {object} [options] Optional settings. - * @param {booelan} [options.ha=true] Turn on high availability monitoring. - * @param {number} [options.haInterval=5000] Time between each replicaset status check. - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for MongoS proxy selection - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires Mongos#connect - * @fires Mongos#ha - * @fires Mongos#joined - * @fires Mongos#left - * @fires Mongos#fullsetup - * @fires Mongos#open - * @fires Mongos#close - * @fires Mongos#error - * @fires Mongos#timeout - * @fires Mongos#parseError - * @fires Mongos#commandStarted - * @fires Mongos#commandSucceeded - * @fires Mongos#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {Mongos} a Mongos instance. - */ -class Mongos extends TopologyBase { - constructor(servers, options) { - super(); - - options = options || {}; - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Ensure all the instances are Server - for (var i = 0; i < servers.length; i++) { - if (!(servers[i] instanceof Server)) { - throw MongoError.create({ - message: 'all seed list instances must be of the Server type', - driver: true - }); - } - } - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Build seed list - var seedlist = servers.map(function(x) { - return { host: x.host, port: x.port }; - }); - - // Get the reconnect option - var reconnect = typeof options.auto_reconnect === 'boolean' ? options.auto_reconnect : true; - reconnect = typeof options.autoReconnect === 'boolean' ? options.autoReconnect : reconnect; - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: reconnect, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Internal state - this.s = { - // Create the Mongos - coreTopology: new CMongos(seedlist, clonedOptions), - // Server capabilities - sCapabilities: null, - // Debug turned on - debug: clonedOptions.debug, - // Store option defaults - storeOptions: storeOptions, - // Cloned options - clonedOptions: clonedOptions, - // Actual store of callbacks - store: store, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: options.promiseLibrary || Promise - }; - } - - // Connect - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = {}; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - // Remove all event handlers - var events = ['timeout', 'error', 'close']; - events.forEach(function(e) { - self.removeListener(e, connectErrorHandler); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - // Force close the topology - self.close(true); - - // Try to callback - try { - callback(err); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Error handler - var reconnectHandler = function() { - self.emit('reconnect'); - self.s.store.execute(); - }; - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Connect handler - var connectHandler = function() { - // Clear out all the current handlers left over - var events = ['timeout', 'error', 'close', 'fullsetup']; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up listeners - self.s.coreTopology.on('timeout', errorHandler('timeout')); - self.s.coreTopology.on('error', errorHandler('error')); - self.s.coreTopology.on('close', errorHandler('close')); - - // Set up serverConfig listeners - self.s.coreTopology.on('fullsetup', function() { - self.emit('fullsetup', self); - }); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Clear out all the current handlers left over - var events = [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed' - ]; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - - // Set up listeners - self.s.coreTopology.once('timeout', connectErrorHandler('timeout')); - self.s.coreTopology.once('error', connectErrorHandler('error')); - self.s.coreTopology.once('close', connectErrorHandler('close')); - self.s.coreTopology.once('connect', connectHandler); - // Join and leave events - self.s.coreTopology.on('joined', relay('joined')); - self.s.coreTopology.on('left', relay('left')); - - // Reconnect server - self.s.coreTopology.on('reconnect', reconnectHandler); - - // Start connection - self.s.coreTopology.connect(_options); - } -} - -Object.defineProperty(Mongos.prototype, 'haInterval', { - enumerable: true, - get: function() { - return this.s.coreTopology.s.haInterval; - } -}); - -/** - * A mongos connect event, used to verify that the connection is up and running - * - * @event Mongos#connect - * @type {Mongos} - */ - -/** - * The mongos high availability event - * - * @event Mongos#ha - * @type {function} - * @param {string} type The stage in the high availability event (start|end) - * @param {boolean} data.norepeat This is a repeating high availability process or a single execution only - * @param {number} data.id The id for this high availability request - * @param {object} data.state An object containing the information about the current replicaset - */ - -/** - * A server member left the mongos set - * - * @event Mongos#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the mongos set - * - * @event Mongos#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * Mongos fullsetup event, emitted when all proxies in the topology have been connected to. - * - * @event Mongos#fullsetup - * @type {Mongos} - */ - -/** - * Mongos open event, emitted when mongos can start processing commands. - * - * @event Mongos#open - * @type {Mongos} - */ - -/** - * Mongos close event - * - * @event Mongos#close - * @type {object} - */ - -/** - * Mongos error event, emitted if there is an error listener. - * - * @event Mongos#error - * @type {MongoError} - */ - -/** - * Mongos timeout event - * - * @event Mongos#timeout - * @type {object} - */ - -/** - * Mongos parseError event - * - * @event Mongos#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Mongos#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Mongos#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Mongos#commandFailed - * @type {object} - */ - -module.exports = Mongos; diff --git a/lib/topologies/native_topology.js b/lib/topologies/native_topology.js index 778ddc9fab7..b171ad6de6d 100644 --- a/lib/topologies/native_topology.js +++ b/lib/topologies/native_topology.js @@ -1,9 +1,9 @@ 'use strict'; -const Topology = require('../core').Topology; -const ServerCapabilities = require('./topology_base').ServerCapabilities; -const Cursor = require('../cursor'); -const translateOptions = require('../utils').translateOptions; +const { Topology } = require('../sdam/topology'); +const { ServerCapabilities } = require('./topology_base'); +const { Cursor } = require('../cursor'); +const { translateOptions } = require('../utils'); class NativeTopology extends Topology { constructor(servers, options) { diff --git a/lib/topologies/replset.js b/lib/topologies/replset.js deleted file mode 100644 index 69df26d19e0..00000000000 --- a/lib/topologies/replset.js +++ /dev/null @@ -1,489 +0,0 @@ -'use strict'; - -const Server = require('./server'); -const Cursor = require('../cursor'); -const MongoError = require('../core').MongoError; -const TopologyBase = require('./topology_base').TopologyBase; -const Store = require('./topology_base').Store; -const CReplSet = require('../core').ReplSet; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **ReplSet** class is a class that represents a Replicaset topology and is - * used to construct connections. - * - * **ReplSet Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'replicaSet', - 'rs_name', - 'secondaryAcceptableLatencyMS', - 'connectWithNoPrimary', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslCRL', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'strategy', - 'debug', - 'family', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'maxStalenessSeconds', - 'promiseLibrary', - 'minSize', - 'monitorCommands' -]; - -/** - * Creates a new ReplSet instance - * @class - * @deprecated - * @param {Server[]} servers A seedlist of servers participating in the replicaset. - * @param {object} [options] Optional settings. - * @param {boolean} [options.ha=true] Turn on high availability monitoring. - * @param {number} [options.haInterval=10000] Time between each replicaset status check. - * @param {string} [options.replicaSet] The name of the replicaset to connect to. - * @param {number} [options.secondaryAcceptableLatencyMS=15] Sets the range of servers to pick when using NEAREST (lowest ping ms + the latency fence, ex: range of 1 to (1 + 15) ms) - * @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher. - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed); - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires ReplSet#connect - * @fires ReplSet#ha - * @fires ReplSet#joined - * @fires ReplSet#left - * @fires ReplSet#fullsetup - * @fires ReplSet#open - * @fires ReplSet#close - * @fires ReplSet#error - * @fires ReplSet#timeout - * @fires ReplSet#parseError - * @fires ReplSet#commandStarted - * @fires ReplSet#commandSucceeded - * @fires ReplSet#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {ReplSet} a ReplSet instance. - */ -class ReplSet extends TopologyBase { - constructor(servers, options) { - super(); - - options = options || {}; - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Ensure all the instances are Server - for (var i = 0; i < servers.length; i++) { - if (!(servers[i] instanceof Server)) { - throw MongoError.create({ - message: 'all seed list instances must be of the Server type', - driver: true - }); - } - } - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Build seed list - var seedlist = servers.map(function(x) { - return { host: x.host, port: x.port }; - }); - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: false, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Create the ReplSet - var coreTopology = new CReplSet(seedlist, clonedOptions); - - // Listen to reconnect event - coreTopology.on('reconnect', function() { - self.emit('reconnect'); - store.execute(); - }); - - // Internal state - this.s = { - // Replicaset - coreTopology: coreTopology, - // Server capabilities - sCapabilities: null, - // Debug tag - tag: options.tag, - // Store options - storeOptions: storeOptions, - // Cloned options - clonedOptions: clonedOptions, - // Store - store: store, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: options.promiseLibrary || Promise - }; - - // Debug - if (clonedOptions.debug) { - // Last ismaster - Object.defineProperty(this, 'replset', { - enumerable: true, - get: function() { - return coreTopology; - } - }); - } - } - - // Connect method - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = {}; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Clear out all the current handlers left over - var events = [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed', - 'joined', - 'left', - 'ping', - 'ha' - ]; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Replset events relay - var replsetRelay = function(event) { - return function(t, server) { - self.emit(event, t, server.lastIsMaster(), server); - }; - }; - - // Relay ha - var relayHa = function(t, state) { - self.emit('ha', t, state); - - if (t === 'start') { - self.emit('ha_connect', t, state); - } else if (t === 'end') { - self.emit('ha_ismaster', t, state); - } - }; - - // Set up serverConfig listeners - self.s.coreTopology.on('joined', replsetRelay('joined')); - self.s.coreTopology.on('left', relay('left')); - self.s.coreTopology.on('ping', relay('ping')); - self.s.coreTopology.on('ha', relayHa); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - - self.s.coreTopology.on('fullsetup', function() { - self.emit('fullsetup', self, self); - }); - - self.s.coreTopology.on('all', function() { - self.emit('all', null, self); - }); - - // Connect handler - var connectHandler = function() { - // Set up listeners - self.s.coreTopology.once('timeout', errorHandler('timeout')); - self.s.coreTopology.once('error', errorHandler('error')); - self.s.coreTopology.once('close', errorHandler('close')); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - ['timeout', 'error', 'close'].forEach(function(e) { - self.s.coreTopology.removeListener(e, connectErrorHandler); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - // Destroy the replset - self.s.coreTopology.destroy(); - - // Try to callback - try { - callback(err); - } catch (err) { - if (!self.s.coreTopology.isConnected()) - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Set up listeners - self.s.coreTopology.once('timeout', connectErrorHandler('timeout')); - self.s.coreTopology.once('error', connectErrorHandler('error')); - self.s.coreTopology.once('close', connectErrorHandler('close')); - self.s.coreTopology.once('connect', connectHandler); - - // Start connection - self.s.coreTopology.connect(_options); - } - - close(forceClosed, callback) { - ['timeout', 'error', 'close', 'joined', 'left'].forEach(e => this.removeAllListeners(e)); - super.close(forceClosed, callback); - } -} - -Object.defineProperty(ReplSet.prototype, 'haInterval', { - enumerable: true, - get: function() { - return this.s.coreTopology.s.haInterval; - } -}); - -/** - * A replset connect event, used to verify that the connection is up and running - * - * @event ReplSet#connect - * @type {ReplSet} - */ - -/** - * The replset high availability event - * - * @event ReplSet#ha - * @type {function} - * @param {string} type The stage in the high availability event (start|end) - * @param {boolean} data.norepeat This is a repeating high availability process or a single execution only - * @param {number} data.id The id for this high availability request - * @param {object} data.state An object containing the information about the current replicaset - */ - -/** - * A server member left the replicaset - * - * @event ReplSet#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the replicaset - * - * @event ReplSet#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * ReplSet open event, emitted when replicaset can start processing commands. - * - * @event ReplSet#open - * @type {Replset} - */ - -/** - * ReplSet fullsetup event, emitted when all servers in the topology have been connected to. - * - * @event ReplSet#fullsetup - * @type {Replset} - */ - -/** - * ReplSet close event - * - * @event ReplSet#close - * @type {object} - */ - -/** - * ReplSet error event, emitted if there is an error listener. - * - * @event ReplSet#error - * @type {MongoError} - */ - -/** - * ReplSet timeout event - * - * @event ReplSet#timeout - * @type {object} - */ - -/** - * ReplSet parseError event - * - * @event ReplSet#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event ReplSet#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event ReplSet#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event ReplSet#commandFailed - * @type {object} - */ - -module.exports = ReplSet; diff --git a/lib/topologies/server.js b/lib/topologies/server.js deleted file mode 100644 index 3079cb9953e..00000000000 --- a/lib/topologies/server.js +++ /dev/null @@ -1,448 +0,0 @@ -'use strict'; - -const CServer = require('../core').Server; -const Cursor = require('../cursor'); -const TopologyBase = require('./topology_base').TopologyBase; -const Store = require('./topology_base').Store; -const MongoError = require('../core').MongoError; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **Server** class is a class that represents a single server topology and is - * used to construct connections. - * - * **Server Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'acceptableLatencyMS', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCRL', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'family', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'reconnectInterval', - 'monitoring', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'compression', - 'promiseLibrary', - 'monitorCommands' -]; - -/** - * Creates a new Server instance - * @class - * @deprecated - * @param {string} host The host for the server, can be either an IP4, IP6 or domain socket style host. - * @param {number} [port] The server port if IP4. - * @param {object} [options] Optional settings. - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.autoReconnect=true] Reconnect on error. - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times - * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries - * @param {boolean} [options.monitoring=true] Triggers the server instance to call ismaster - * @param {number} [options.haInterval=10000] The interval of calling ismaster when monitoring is enabled. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires Server#connect - * @fires Server#close - * @fires Server#error - * @fires Server#timeout - * @fires Server#parseError - * @fires Server#reconnect - * @fires Server#commandStarted - * @fires Server#commandSucceeded - * @fires Server#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {Server} a Server instance. - */ -class Server extends TopologyBase { - constructor(host, port, options) { - super(); - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Promise library - const promiseLibrary = options.promiseLibrary; - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Detect if we have a socket connection - if (host.indexOf('/') !== -1) { - if (port != null && typeof port === 'object') { - options = port; - port = null; - } - } else if (port == null) { - throw MongoError.create({ message: 'port must be specified', driver: true }); - } - - // Get the reconnect option - var reconnect = typeof options.auto_reconnect === 'boolean' ? options.auto_reconnect : true; - reconnect = typeof options.autoReconnect === 'boolean' ? options.autoReconnect : reconnect; - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - host: host, - port: port, - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: reconnect, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Define the internal properties - this.s = { - // Create an instance of a server instance from core module - coreTopology: new CServer(clonedOptions), - // Server capabilities - sCapabilities: null, - // Cloned options - clonedOptions: clonedOptions, - // Reconnect - reconnect: clonedOptions.reconnect, - // Emit error - emitError: clonedOptions.emitError, - // Pool size - poolSize: clonedOptions.size, - // Store Options - storeOptions: storeOptions, - // Store - store: store, - // Host - host: host, - // Port - port: port, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: promiseLibrary || Promise - }; - } - - // Connect - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = this.s.clonedOptions; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - // Remove all event handlers - var events = ['timeout', 'error', 'close']; - events.forEach(function(e) { - self.s.coreTopology.removeListener(e, connectHandlers[e]); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - - // Try to callback - try { - callback(err); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Error handler - var reconnectHandler = function() { - self.emit('reconnect', self); - self.s.store.execute(); - }; - - // Reconnect failed - var reconnectFailedHandler = function(err) { - self.emit('reconnectFailed', err); - self.s.store.flush(err); - }; - - // Destroy called on topology, perform cleanup - var destroyHandler = function() { - self.s.store.flush(); - }; - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Connect handler - var connectHandler = function() { - // Clear out all the current handlers left over - ['timeout', 'error', 'close', 'destroy'].forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up listeners - self.s.coreTopology.on('timeout', errorHandler('timeout')); - self.s.coreTopology.once('error', errorHandler('error')); - self.s.coreTopology.on('close', errorHandler('close')); - // Only called on destroy - self.s.coreTopology.on('destroy', destroyHandler); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Set up listeners - var connectHandlers = { - timeout: connectErrorHandler('timeout'), - error: connectErrorHandler('error'), - close: connectErrorHandler('close') - }; - - // Clear out all the current handlers left over - [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed' - ].forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Add the event handlers - self.s.coreTopology.once('timeout', connectHandlers.timeout); - self.s.coreTopology.once('error', connectHandlers.error); - self.s.coreTopology.once('close', connectHandlers.close); - self.s.coreTopology.once('connect', connectHandler); - // Reconnect server - self.s.coreTopology.on('reconnect', reconnectHandler); - self.s.coreTopology.on('reconnectFailed', reconnectFailedHandler); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - self.s.coreTopology.on('attemptReconnect', relay('attemptReconnect')); - self.s.coreTopology.on('monitoring', relay('monitoring')); - - // Start connection - self.s.coreTopology.connect(_options); - } -} - -Object.defineProperty(Server.prototype, 'poolSize', { - enumerable: true, - get: function() { - return this.s.coreTopology.connections().length; - } -}); - -Object.defineProperty(Server.prototype, 'autoReconnect', { - enumerable: true, - get: function() { - return this.s.reconnect; - } -}); - -Object.defineProperty(Server.prototype, 'host', { - enumerable: true, - get: function() { - return this.s.host; - } -}); - -Object.defineProperty(Server.prototype, 'port', { - enumerable: true, - get: function() { - return this.s.port; - } -}); - -/** - * Server connect event - * - * @event Server#connect - * @type {object} - */ - -/** - * Server close event - * - * @event Server#close - * @type {object} - */ - -/** - * Server reconnect event - * - * @event Server#reconnect - * @type {object} - */ - -/** - * Server error event - * - * @event Server#error - * @type {MongoError} - */ - -/** - * Server timeout event - * - * @event Server#timeout - * @type {object} - */ - -/** - * Server parseError event - * - * @event Server#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Server#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Server#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Server#commandFailed - * @type {object} - */ - -module.exports = Server; diff --git a/lib/topologies/topology_base.js b/lib/topologies/topology_base.js index 967b4cd4627..c6619150d8d 100644 --- a/lib/topologies/topology_base.js +++ b/lib/topologies/topology_base.js @@ -1,10 +1,10 @@ 'use strict'; -const EventEmitter = require('events'), - MongoError = require('../core').MongoError, - f = require('util').format, - translateReadPreference = require('../utils').translateReadPreference, - ClientSession = require('../core').Sessions.ClientSession; +const EventEmitter = require('events'); +const { MongoError } = require('../error'); +const { format: f } = require('util'); +const { translateReadPreference } = require('../utils'); +const { ClientSession } = require('../sessions'); // The store of ops var Store = function(topology, storeOptions) { diff --git a/lib/core/transactions.js b/lib/transactions.js similarity index 95% rename from lib/core/transactions.js rename to lib/transactions.js index d0b0b73d6e7..0ad05f87716 100644 --- a/lib/core/transactions.js +++ b/lib/transactions.js @@ -1,8 +1,8 @@ 'use strict'; -const MongoError = require('./error').MongoError; -const ReadPreference = require('./topologies/read_preference'); -const ReadConcern = require('../read_concern'); -const WriteConcern = require('../write_concern'); +const ReadPreference = require('./read_preference'); +const { MongoError } = require('./error'); +const ReadConcern = require('./read_concern'); +const WriteConcern = require('./write_concern'); let TxnState; let stateMachine; diff --git a/lib/url_parser.js b/lib/url_parser.js deleted file mode 100644 index c0f10b467d4..00000000000 --- a/lib/url_parser.js +++ /dev/null @@ -1,623 +0,0 @@ -'use strict'; - -const ReadPreference = require('./core').ReadPreference, - parser = require('url'), - f = require('util').format, - Logger = require('./core').Logger, - dns = require('dns'); -const ReadConcern = require('./read_concern'); - -module.exports = function(url, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - let result; - try { - result = parser.parse(url, true); - } catch (e) { - return callback(new Error('URL malformed, cannot be parsed')); - } - - if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') { - return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`')); - } - - if (result.protocol === 'mongodb:') { - return parseHandler(url, options, callback); - } - - // Otherwise parse this as an SRV record - if (result.hostname.split('.').length < 3) { - return callback(new Error('URI does not have hostname, domain name and tld')); - } - - result.domainLength = result.hostname.split('.').length; - - if (result.pathname && result.pathname.match(',')) { - return callback(new Error('Invalid URI, cannot contain multiple hostnames')); - } - - if (result.port) { - return callback(new Error('Ports not accepted with `mongodb+srv` URIs')); - } - - let srvAddress = `_mongodb._tcp.${result.host}`; - dns.resolveSrv(srvAddress, function(err, addresses) { - if (err) return callback(err); - - if (addresses.length === 0) { - return callback(new Error('No addresses found at host')); - } - - for (let i = 0; i < addresses.length; i++) { - if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) { - return callback(new Error('Server record does not share hostname with parent URI')); - } - } - - let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`; - let connectionStrings = addresses.map(function(address, i) { - if (i === 0) return `${base}${address.name}:${address.port}`; - else return `${address.name}:${address.port}`; - }); - - let connectionString = connectionStrings.join(',') + '/'; - let connectionStringOptions = []; - - // Add the default database if needed - if (result.path) { - let defaultDb = result.path.slice(1); - if (defaultDb.indexOf('?') !== -1) { - defaultDb = defaultDb.slice(0, defaultDb.indexOf('?')); - } - - connectionString += defaultDb; - } - - // Default to SSL true - if (!options.ssl && !result.search) { - connectionStringOptions.push('ssl=true'); - } else if (!options.ssl && result.search && !result.search.match('ssl')) { - connectionStringOptions.push('ssl=true'); - } - - // Keep original uri options - if (result.search) { - connectionStringOptions.push(result.search.replace('?', '')); - } - - dns.resolveTxt(result.host, function(err, record) { - if (err && err.code !== 'ENODATA') return callback(err); - if (err && err.code === 'ENODATA') record = null; - - if (record) { - if (record.length > 1) { - return callback(new Error('Multiple text records not allowed')); - } - - record = record[0]; - if (record.length > 1) record = record.join(''); - else record = record[0]; - - if (!record.includes('authSource') && !record.includes('replicaSet')) { - return callback(new Error('Text record must only set `authSource` or `replicaSet`')); - } - - connectionStringOptions.push(record); - } - - // Add any options to the connection string - if (connectionStringOptions.length) { - connectionString += `?${connectionStringOptions.join('&')}`; - } - - parseHandler(connectionString, options, callback); - }); - }); -}; - -function matchesParentDomain(srvAddress, parentDomain) { - let regex = /^.*?\./; - let srv = `.${srvAddress.replace(regex, '')}`; - let parent = `.${parentDomain.replace(regex, '')}`; - if (srv.endsWith(parent)) return true; - else return false; -} - -function parseHandler(address, options, callback) { - let result, err; - try { - result = parseConnectionString(address, options); - } catch (e) { - err = e; - } - - return err ? callback(err, null) : callback(null, result); -} - -function parseConnectionString(url, options) { - // Variables - let connection_part = ''; - let auth_part = ''; - let query_string_part = ''; - let dbName = 'admin'; - - // Url parser result - let result = parser.parse(url, true); - if ((result.hostname == null || result.hostname === '') && url.indexOf('.sock') === -1) { - throw new Error('No hostname or hostnames provided in connection string'); - } - - if (result.port === '0') { - throw new Error('Invalid port (zero) with hostname'); - } - - if (!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) { - throw new Error('Invalid port (larger than 65535) with hostname'); - } - - if ( - result.path && - result.path.length > 0 && - result.path[0] !== '/' && - url.indexOf('.sock') === -1 - ) { - throw new Error('Missing delimiting slash between hosts and options'); - } - - if (result.query) { - for (let name in result.query) { - if (name.indexOf('::') !== -1) { - throw new Error('Double colon in host identifier'); - } - - if (result.query[name] === '') { - throw new Error('Query parameter ' + name + ' is an incomplete value pair'); - } - } - } - - if (result.auth) { - let parts = result.auth.split(':'); - if (url.indexOf(result.auth) !== -1 && parts.length > 2) { - throw new Error('Username with password containing an unescaped colon'); - } - - if (url.indexOf(result.auth) !== -1 && result.auth.indexOf('@') !== -1) { - throw new Error('Username containing an unescaped at-sign'); - } - } - - // Remove query - let clean = url.split('?').shift(); - - // Extract the list of hosts - let strings = clean.split(','); - let hosts = []; - - for (let i = 0; i < strings.length; i++) { - let hostString = strings[i]; - - if (hostString.indexOf('mongodb') !== -1) { - if (hostString.indexOf('@') !== -1) { - hosts.push(hostString.split('@').pop()); - } else { - hosts.push(hostString.substr('mongodb://'.length)); - } - } else if (hostString.indexOf('/') !== -1) { - hosts.push(hostString.split('/').shift()); - } else if (hostString.indexOf('/') === -1) { - hosts.push(hostString.trim()); - } - } - - for (let i = 0; i < hosts.length; i++) { - let r = parser.parse(f('mongodb://%s', hosts[i].trim())); - if (r.path && r.path.indexOf('.sock') !== -1) continue; - if (r.path && r.path.indexOf(':') !== -1) { - // Not connecting to a socket so check for an extra slash in the hostname. - // Using String#split as perf is better than match. - if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) { - throw new Error('Slash in host identifier'); - } else { - throw new Error('Double colon in host identifier'); - } - } - } - - // If we have a ? mark cut the query elements off - if (url.indexOf('?') !== -1) { - query_string_part = url.substr(url.indexOf('?') + 1); - connection_part = url.substring('mongodb://'.length, url.indexOf('?')); - } else { - connection_part = url.substring('mongodb://'.length); - } - - // Check if we have auth params - if (connection_part.indexOf('@') !== -1) { - auth_part = connection_part.split('@')[0]; - connection_part = connection_part.split('@')[1]; - } - - // Check there is not more than one unescaped slash - if (connection_part.split('/').length > 2) { - throw new Error( - "Unsupported host '" + - connection_part.split('?')[0] + - "', hosts must be URL encoded and contain at most one unencoded slash" - ); - } - - // Check if the connection string has a db - if (connection_part.indexOf('.sock') !== -1) { - if (connection_part.indexOf('.sock/') !== -1) { - dbName = connection_part.split('.sock/')[1]; - // Check if multiple database names provided, or just an illegal trailing backslash - if (dbName.indexOf('/') !== -1) { - if (dbName.split('/').length === 2 && dbName.split('/')[1].length === 0) { - throw new Error('Illegal trailing backslash after database name'); - } - throw new Error('More than 1 database name in URL'); - } - connection_part = connection_part.split( - '/', - connection_part.indexOf('.sock') + '.sock'.length - ); - } - } else if (connection_part.indexOf('/') !== -1) { - // Check if multiple database names provided, or just an illegal trailing backslash - if (connection_part.split('/').length > 2) { - if (connection_part.split('/')[2].length === 0) { - throw new Error('Illegal trailing backslash after database name'); - } - throw new Error('More than 1 database name in URL'); - } - dbName = connection_part.split('/')[1]; - connection_part = connection_part.split('/')[0]; - } - - // URI decode the host information - connection_part = decodeURIComponent(connection_part); - - // Result object - let object = {}; - - // Pick apart the authentication part of the string - let authPart = auth_part || ''; - let auth = authPart.split(':', 2); - - // Decode the authentication URI components and verify integrity - let user = decodeURIComponent(auth[0]); - if (auth[0] !== encodeURIComponent(user)) { - throw new Error('Username contains an illegal unescaped character'); - } - auth[0] = user; - - if (auth[1]) { - let pass = decodeURIComponent(auth[1]); - if (auth[1] !== encodeURIComponent(pass)) { - throw new Error('Password contains an illegal unescaped character'); - } - auth[1] = pass; - } - - // Add auth to final object if we have 2 elements - if (auth.length === 2) object.auth = { user: auth[0], password: auth[1] }; - // if user provided auth options, use that - if (options && options.auth != null) object.auth = options.auth; - - // Variables used for temporary storage - let hostPart; - let urlOptions; - let servers; - let compression; - let serverOptions = { socketOptions: {} }; - let dbOptions = { read_preference_tags: [] }; - let replSetServersOptions = { socketOptions: {} }; - let mongosOptions = { socketOptions: {} }; - // Add server options to final object - object.server_options = serverOptions; - object.db_options = dbOptions; - object.rs_options = replSetServersOptions; - object.mongos_options = mongosOptions; - - // Let's check if we are using a domain socket - if (url.match(/\.sock/)) { - // Split out the socket part - let domainSocket = url.substring( - url.indexOf('mongodb://') + 'mongodb://'.length, - url.lastIndexOf('.sock') + '.sock'.length - ); - // Clean out any auth stuff if any - if (domainSocket.indexOf('@') !== -1) domainSocket = domainSocket.split('@')[1]; - domainSocket = decodeURIComponent(domainSocket); - servers = [{ domain_socket: domainSocket }]; - } else { - // Split up the db - hostPart = connection_part; - // Deduplicate servers - let deduplicatedServers = {}; - - // Parse all server results - servers = hostPart - .split(',') - .map(function(h) { - let _host, _port, ipv6match; - //check if it matches [IPv6]:port, where the port number is optional - if ((ipv6match = /\[([^\]]+)\](?::(.+))?/.exec(h))) { - _host = ipv6match[1]; - _port = parseInt(ipv6match[2], 10) || 27017; - } else { - //otherwise assume it's IPv4, or plain hostname - let hostPort = h.split(':', 2); - _host = hostPort[0] || 'localhost'; - _port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017; - // Check for localhost?safe=true style case - if (_host.indexOf('?') !== -1) _host = _host.split(/\?/)[0]; - } - - // No entry returned for duplicate server - if (deduplicatedServers[_host + '_' + _port]) return null; - deduplicatedServers[_host + '_' + _port] = 1; - - // Return the mapped object - return { host: _host, port: _port }; - }) - .filter(function(x) { - return x != null; - }); - } - - // Get the db name - object.dbName = dbName || 'admin'; - // Split up all the options - urlOptions = (query_string_part || '').split(/[&;]/); - // Ugh, we have to figure out which options go to which constructor manually. - urlOptions.forEach(function(opt) { - if (!opt) return; - var splitOpt = opt.split('='), - name = splitOpt[0], - value = splitOpt[1]; - - // Options implementations - switch (name) { - case 'slaveOk': - case 'slave_ok': - serverOptions.slave_ok = value === 'true'; - dbOptions.slaveOk = value === 'true'; - break; - case 'maxPoolSize': - case 'poolSize': - serverOptions.poolSize = parseInt(value, 10); - replSetServersOptions.poolSize = parseInt(value, 10); - break; - case 'appname': - object.appname = decodeURIComponent(value); - break; - case 'autoReconnect': - case 'auto_reconnect': - serverOptions.auto_reconnect = value === 'true'; - break; - case 'ssl': - if (value === 'prefer') { - serverOptions.ssl = value; - replSetServersOptions.ssl = value; - mongosOptions.ssl = value; - break; - } - serverOptions.ssl = value === 'true'; - replSetServersOptions.ssl = value === 'true'; - mongosOptions.ssl = value === 'true'; - break; - case 'sslValidate': - serverOptions.sslValidate = value === 'true'; - replSetServersOptions.sslValidate = value === 'true'; - mongosOptions.sslValidate = value === 'true'; - break; - case 'replicaSet': - case 'rs_name': - replSetServersOptions.rs_name = value; - break; - case 'reconnectWait': - replSetServersOptions.reconnectWait = parseInt(value, 10); - break; - case 'retries': - replSetServersOptions.retries = parseInt(value, 10); - break; - case 'readSecondary': - case 'read_secondary': - replSetServersOptions.read_secondary = value === 'true'; - break; - case 'fsync': - dbOptions.fsync = value === 'true'; - break; - case 'journal': - dbOptions.j = value === 'true'; - break; - case 'safe': - dbOptions.safe = value === 'true'; - break; - case 'nativeParser': - case 'native_parser': - dbOptions.native_parser = value === 'true'; - break; - case 'readConcernLevel': - dbOptions.readConcern = new ReadConcern(value); - break; - case 'connectTimeoutMS': - serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - mongosOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - break; - case 'socketTimeoutMS': - serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - mongosOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - break; - case 'w': - dbOptions.w = parseInt(value, 10); - if (isNaN(dbOptions.w)) dbOptions.w = value; - break; - case 'authSource': - dbOptions.authSource = value; - break; - case 'gssapiServiceName': - dbOptions.gssapiServiceName = value; - break; - case 'authMechanism': - if (value === 'GSSAPI') { - // If no password provided decode only the principal - if (object.auth == null) { - let urlDecodeAuthPart = decodeURIComponent(authPart); - if (urlDecodeAuthPart.indexOf('@') === -1) - throw new Error('GSSAPI requires a provided principal'); - object.auth = { user: urlDecodeAuthPart, password: null }; - } else { - object.auth.user = decodeURIComponent(object.auth.user); - } - } else if (value === 'MONGODB-X509') { - object.auth = { user: decodeURIComponent(authPart) }; - } - - // Only support GSSAPI or MONGODB-CR for now - if ( - value !== 'GSSAPI' && - value !== 'MONGODB-X509' && - value !== 'MONGODB-CR' && - value !== 'DEFAULT' && - value !== 'SCRAM-SHA-1' && - value !== 'SCRAM-SHA-256' && - value !== 'PLAIN' - ) - throw new Error( - 'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism' - ); - - // Authentication mechanism - dbOptions.authMechanism = value; - break; - case 'authMechanismProperties': - { - // Split up into key, value pairs - let values = value.split(','); - let o = {}; - // For each value split into key, value - values.forEach(function(x) { - let v = x.split(':'); - o[v[0]] = v[1]; - }); - - // Set all authMechanismProperties - dbOptions.authMechanismProperties = o; - // Set the service name value - if (typeof o.SERVICE_NAME === 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME; - if (typeof o.SERVICE_REALM === 'string') dbOptions.gssapiServiceRealm = o.SERVICE_REALM; - if (typeof o.CANONICALIZE_HOST_NAME === 'string') - dbOptions.gssapiCanonicalizeHostName = - o.CANONICALIZE_HOST_NAME === 'true' ? true : false; - } - break; - case 'wtimeoutMS': - dbOptions.wtimeout = parseInt(value, 10); - break; - case 'readPreference': - if (!ReadPreference.isValid(value)) - throw new Error( - 'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest' - ); - dbOptions.readPreference = value; - break; - case 'maxStalenessSeconds': - dbOptions.maxStalenessSeconds = parseInt(value, 10); - break; - case 'readPreferenceTags': - { - // Decode the value - value = decodeURIComponent(value); - // Contains the tag object - let tagObject = {}; - if (value == null || value === '') { - dbOptions.read_preference_tags.push(tagObject); - break; - } - - // Split up the tags - let tags = value.split(/,/); - for (let i = 0; i < tags.length; i++) { - let parts = tags[i].trim().split(/:/); - tagObject[parts[0]] = parts[1]; - } - - // Set the preferences tags - dbOptions.read_preference_tags.push(tagObject); - } - break; - case 'compressors': - { - compression = serverOptions.compression || {}; - let compressors = value.split(','); - if ( - !compressors.every(function(compressor) { - return compressor === 'snappy' || compressor === 'zlib'; - }) - ) { - throw new Error('Compressors must be at least one of snappy or zlib'); - } - - compression.compressors = compressors; - serverOptions.compression = compression; - } - break; - case 'zlibCompressionLevel': - { - compression = serverOptions.compression || {}; - let zlibCompressionLevel = parseInt(value, 10); - if (zlibCompressionLevel < -1 || zlibCompressionLevel > 9) { - throw new Error('zlibCompressionLevel must be an integer between -1 and 9'); - } - - compression.zlibCompressionLevel = zlibCompressionLevel; - serverOptions.compression = compression; - } - break; - case 'retryWrites': - dbOptions.retryWrites = value === 'true'; - break; - case 'minSize': - dbOptions.minSize = parseInt(value, 10); - break; - default: - { - let logger = Logger('URL Parser'); - logger.warn(`${name} is not supported as a connection string option`); - } - break; - } - }); - - // No tags: should be null (not []) - if (dbOptions.read_preference_tags.length === 0) { - dbOptions.read_preference_tags = null; - } - - // Validate if there are an invalid write concern combinations - if ( - (dbOptions.w === -1 || dbOptions.w === 0) && - (dbOptions.journal === true || dbOptions.fsync === true || dbOptions.safe === true) - ) - throw new Error('w set to -1 or 0 cannot be combined with safe/w/journal/fsync'); - - // If no read preference set it to primary - if (!dbOptions.readPreference) { - dbOptions.readPreference = 'primary'; - } - - // make sure that user-provided options are applied with priority - dbOptions = Object.assign(dbOptions, options); - - // Add servers to result - object.servers = servers; - - // Returned parsed object - return object; -} diff --git a/lib/utils.js b/lib/utils.js index ed50a5d9abb..44eb28c77c7 100644 --- a/lib/utils.js +++ b/lib/utils.js @@ -1,14 +1,10 @@ 'use strict'; -const MongoError = require('./core/error').MongoError; -const ReadPreference = require('./core/topologies/read_preference'); +const os = require('os'); +const crypto = require('crypto'); +const { MongoError } = require('./error'); +const ReadPreference = require('./read_preference'); const WriteConcern = require('./write_concern'); -var shallowClone = function(obj) { - var copy = {}; - for (var name in obj) copy[name] = obj[name]; - return copy; -}; - // Figure out the read preference var translateReadPreference = function(options) { var r = null; @@ -729,11 +725,297 @@ function maybePromise(callback, fn) { return result; } +function databaseNamespace(ns) { + return ns.split('.')[0]; +} +function collectionNamespace(ns) { + return ns + .split('.') + .slice(1) + .join('.'); +} + +/** + * Generate a UUIDv4 + */ +const uuidV4 = () => { + const result = crypto.randomBytes(16); + result[6] = (result[6] & 0x0f) | 0x40; + result[8] = (result[8] & 0x3f) | 0x80; + return result; +}; + +/** + * Returns the duration calculated from two high resolution timers in milliseconds + * + * @param {Object} started A high resolution timestamp created from `process.hrtime()` + * @returns {Number} The duration in milliseconds + */ +const calculateDurationInMs = started => { + const hrtime = process.hrtime(started); + return (hrtime[0] * 1e9 + hrtime[1]) / 1e6; +}; + +/** + * Relays events for a given listener and emitter + * + * @param {EventEmitter} listener the EventEmitter to listen to the events from + * @param {EventEmitter} emitter the EventEmitter to relay the events to + */ +function relayEvents(listener, emitter, events) { + events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event))); +} + +function retrieveKerberos() { + let kerberos; + + try { + kerberos = require('kerberos'); + } catch (err) { + if (err.code === 'MODULE_NOT_FOUND') { + throw new Error('The `kerberos` module was not found. Please install it and try again.'); + } + + throw err; + } + + return kerberos; +} + +// Throw an error if an attempt to use EJSON is made when it is not installed +const noEJSONError = function() { + throw new Error('The `mongodb-extjson` module was not found. Please install it and try again.'); +}; + +// Facilitate loading EJSON optionally +function retrieveEJSON() { + let EJSON = null; + try { + EJSON = require('mongodb-extjson'); + } catch (error) {} // eslint-disable-line + if (!EJSON) { + EJSON = { + parse: noEJSONError, + deserialize: noEJSONError, + serialize: noEJSONError, + stringify: noEJSONError, + setBSONModule: noEJSONError, + BSON: noEJSONError + }; + } + + return EJSON; +} + +function retrieveBSON() { + var BSON = require('bson'); + BSON.native = false; + + try { + var optionalBSON = require('bson-ext'); + if (optionalBSON) { + optionalBSON.native = true; + return optionalBSON; + } + } catch (err) {} // eslint-disable-line + + return BSON; +} + +// Throw an error if an attempt to use Snappy is made when Snappy is not installed +function noSnappyWarning() { + throw new Error( + 'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.' + ); +} + +// Facilitate loading Snappy optionally +function retrieveSnappy() { + var snappy = null; + try { + snappy = require('snappy'); + } catch (error) {} // eslint-disable-line + if (!snappy) { + snappy = { + compress: noSnappyWarning, + uncompress: noSnappyWarning, + compressSync: noSnappyWarning, + uncompressSync: noSnappyWarning + }; + } + return snappy; +} + +/** + * A helper function for determining `maxWireVersion` between legacy and new topology + * instances + * + * @private + * @param {(Topology|Server)} topologyOrServer + */ +function maxWireVersion(topologyOrServer) { + if (topologyOrServer.ismaster) { + return topologyOrServer.ismaster.maxWireVersion; + } + + if (typeof topologyOrServer.lastIsMaster === 'function') { + const lastIsMaster = topologyOrServer.lastIsMaster(); + if (lastIsMaster) { + return lastIsMaster.maxWireVersion; + } + } + + if (topologyOrServer.description) { + return topologyOrServer.description.maxWireVersion; + } + + return null; +} + +/* + * Checks that collation is supported by server. + * + * @param {Server} [server] to check against + * @param {object} [cmd] object where collation may be specified + * @param {function} [callback] callback function + * @return true if server does not support collation + */ +function collationNotSupported(server, cmd) { + return cmd && cmd.collation && maxWireVersion(server) < 5; +} + +/** + * Applies the function `eachFn` to each item in `arr`, in parallel. + * + * @param {array} arr an array of items to asynchronusly iterate over + * @param {function} eachFn A function to call on each item of the array. The callback signature is `(item, callback)`, where the callback indicates iteration is complete. + * @param {function} callback The callback called after every item has been iterated + */ +function eachAsync(arr, eachFn, callback) { + arr = arr || []; + + let idx = 0; + let awaiting = 0; + for (idx = 0; idx < arr.length; ++idx) { + awaiting++; + eachFn(arr[idx], eachCallback); + } + + if (awaiting === 0) { + callback(); + return; + } + + function eachCallback(err) { + awaiting--; + if (err) { + callback(err); + return; + } + + if (idx === arr.length && awaiting <= 0) { + callback(); + } + } +} + +function arrayStrictEqual(arr, arr2) { + if (!Array.isArray(arr) || !Array.isArray(arr2)) { + return false; + } + + return arr.length === arr2.length && arr.every((elt, idx) => elt === arr2[idx]); +} + +function tagsStrictEqual(tags, tags2) { + const tagsKeys = Object.keys(tags); + const tags2Keys = Object.keys(tags2); + return tagsKeys.length === tags2Keys.length && tagsKeys.every(key => tags2[key] === tags[key]); +} + +function errorStrictEqual(lhs, rhs) { + if (lhs === rhs) { + return true; + } + + if ((lhs == null && rhs != null) || (lhs != null && rhs == null)) { + return false; + } + + if (lhs.constructor.name !== rhs.constructor.name) { + return false; + } + + if (lhs.message !== rhs.message) { + return false; + } + + return true; +} + +function makeStateMachine(stateTable) { + return function stateTransition(target, newState) { + const legalStates = stateTable[target.s.state]; + if (legalStates && legalStates.indexOf(newState) < 0) { + throw new TypeError( + `illegal state transition from [${target.s.state}] => [${newState}], allowed: [${legalStates}]` + ); + } + + target.emit('stateChanged', target.s.state, newState); + target.s.state = newState; + }; +} + +function makeClientMetadata(options) { + options = options || {}; + + const metadata = { + driver: { + name: 'nodejs', + version: require('../package.json').version + }, + os: { + type: os.type(), + name: process.platform, + architecture: process.arch, + version: os.release() + }, + platform: `'Node.js ${process.version}, ${os.endianness} (unified)` + }; + + // support optionally provided wrapping driver info + if (options.driverInfo) { + if (options.driverInfo.name) { + metadata.driver.name = `${metadata.driver.name}|${options.driverInfo.name}`; + } + + if (options.driverInfo.version) { + metadata.version = `${metadata.driver.version}|${options.driverInfo.version}`; + } + + if (options.driverInfo.platform) { + metadata.platform = `${metadata.platform}|${options.driverInfo.platform}`; + } + } + + if (options.appname) { + // MongoDB requires the appname not exceed a byte length of 128 + const buffer = Buffer.from(options.appname); + metadata.application = { + name: buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname + }; + } + + return metadata; +} + +const noop = () => {}; + module.exports = { filterOptions, mergeOptions, translateOptions, - shallowClone, getSingleProperty, checkCollectionName, toError, @@ -759,5 +1041,23 @@ module.exports = { resolveReadPreference, emitDeprecationWarning, makeCounter, - maybePromise + maybePromise, + databaseNamespace, + collectionNamespace, + uuidV4, + calculateDurationInMs, + relayEvents, + collationNotSupported, + retrieveEJSON, + retrieveKerberos, + retrieveBSON, + retrieveSnappy, + maxWireVersion, + eachAsync, + arrayStrictEqual, + tagsStrictEqual, + errorStrictEqual, + makeStateMachine, + makeClientMetadata, + noop }; diff --git a/lib/write_concern.js b/lib/write_concern.js index 79b0f092552..1ba0380c5f9 100644 --- a/lib/write_concern.js +++ b/lib/write_concern.js @@ -1,5 +1,7 @@ 'use strict'; +const kWriteConcernKeys = new Set(['w', 'wtimeout', 'j', 'fsync']); + /** * The **WriteConcern** class is a class that represents a MongoDB WriteConcern. * @class @@ -51,6 +53,14 @@ class WriteConcern { } if (options.writeConcern) { + if (typeof options.writeConcern === 'string') { + return new WriteConcern(options.writeConcern); + } + + if (!Object.keys(options.writeConcern).some(key => kWriteConcernKeys.has(key))) { + return; + } + return new WriteConcern( options.writeConcern.w, options.writeConcern.wtimeout, diff --git a/package-lock.json b/package-lock.json index c55e34b217f..396ef5fedf6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4,6 +4,11 @@ "lockfileVersion": 1, "requires": true, "dependencies": { + "@babel/parser": { + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.7.tgz", + "integrity": "sha512-9JWls8WilDXFGxs0phaXAZgpxTZhSk/yOYH2hTHC0X1yC7Z78IJfvR1vJ+rmJKq3I35td2XzXzN6ZLYlna+r/A==" + }, "@sinonjs/commons": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.6.0.tgz", @@ -39,6 +44,40 @@ "integrity": "sha512-+iTbntw2IZPb/anVDbypzfQa+ay64MW0Zo8aJ8gZPWMMK6/OubMVb6lUPMagqjOPnmtauXnFCACVl3O7ogjeqQ==", "dev": true }, + "@types/color-name": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", + "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" + }, + "@typescript-eslint/typescript-estree": { + "version": "2.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-2.22.0.tgz", + "integrity": "sha512-2HFZW2FQc4MhIBB8WhDm9lVFaBDy6h9jGrJ4V2Uzxe/ON29HCHBTj3GkgcsgMWfsl2U5as+pTOr30Nibaw7qRQ==", + "requires": { + "debug": "^4.1.1", + "eslint-visitor-keys": "^1.1.0", + "glob": "^7.1.6", + "is-glob": "^4.0.1", + "lodash": "^4.17.15", + "semver": "^6.3.0", + "tsutils": "^3.17.1" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, "JSONStream": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", @@ -100,7 +139,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", - "dev": true, "optional": true }, "ansi-escapes": { @@ -119,11 +157,15 @@ "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, "requires": { "color-convert": "^1.9.0" } }, + "app-module-path": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/app-module-path/-/app-module-path-2.2.0.tgz", + "integrity": "sha1-ZBqlXft9am8KgUHEucCqULbCTdU=" + }, "aproba": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", @@ -194,6 +236,11 @@ "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", "dev": true }, + "ast-module-types": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/ast-module-types/-/ast-module-types-2.6.0.tgz", + "integrity": "sha512-zXSoVaMrf2R+r+ISid5/9a8SXm1LLdkhHzh6pSRhj9jklzruOOl1hva1YmFT33wAstg/f9ZndJAlq1BSrFLSGA==" + }, "async": { "version": "1.5.2", "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz", @@ -265,8 +312,7 @@ "balanced-match": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", - "dev": true + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" }, "bcrypt-pbkdf": { "version": "1.0.2", @@ -314,7 +360,6 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -408,7 +453,6 @@ "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -448,6 +492,11 @@ "restore-cursor": "^2.0.0" } }, + "cli-spinners": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.2.0.tgz", + "integrity": "sha512-tgU3fKwzYjiLEQgPMD9Jt+JjHVL9kW93FiIMX/l7rivvOD4/LL0Mf7gda3+4U2KJBloybwgj5KEoQgGRioMiKQ==" + }, "cli-width": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz", @@ -487,6 +536,11 @@ } } }, + "clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=" + }, "co": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", @@ -503,7 +557,6 @@ "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, "requires": { "color-name": "1.1.3" } @@ -511,8 +564,7 @@ "color-name": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "combined-stream": { "version": "1.0.8", @@ -526,8 +578,12 @@ "commander": { "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "dev": true + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" }, "compare-func": { "version": "1.3.2", @@ -542,8 +598,7 @@ "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, "concat-stream": { "version": "1.6.2", @@ -938,6 +993,21 @@ } } }, + "decomment": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/decomment/-/decomment-0.9.2.tgz", + "integrity": "sha512-sblyUmOJZxiL7oJ2ogJS6jtl/67+CTOW87SrYE/96u3PhDYikYoLCdLzcnceToiQejOLlqNnLCkaxx/+nE/ehg==", + "requires": { + "esprima": "4.0.1" + }, + "dependencies": { + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + } + } + }, "decompress-response": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-4.2.1.tgz", @@ -959,14 +1029,20 @@ "deep-extend": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" }, "deep-is": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + }, + "defaults": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", + "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", + "requires": { + "clone": "^1.0.2" + } }, "delayed-stream": { "version": "1.0.0", @@ -985,12 +1061,159 @@ "resolved": "https://registry.npmjs.org/denque/-/denque-1.4.1.tgz", "integrity": "sha512-OfzPuSZKGcgr96rf1oODnfjqBFmr1DVoc/TrItj3Ohe0Ah1C5WX5Baquw/9U9KovnQ88EqmJbD66rKYUQYN1tQ==" }, + "dependency-tree": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/dependency-tree/-/dependency-tree-7.2.1.tgz", + "integrity": "sha512-nBxnjkqDW4LqAzBazy60V4lE0mAtIQ+oers/GIIvVvGYVdCD9+RNNd4G9jjstyz7ZFVg/j/OiYCvK5MjoVqA2w==", + "requires": { + "commander": "^2.19.0", + "debug": "^4.1.1", + "filing-cabinet": "^2.5.1", + "precinct": "^6.2.0", + "typescript": "^3.7.5" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, "detect-libc": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=", "dev": true }, + "detective-amd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/detective-amd/-/detective-amd-3.0.0.tgz", + "integrity": "sha512-kOpKHyabdSKF9kj7PqYHLeHPw+TJT8q2u48tZYMkIcas28el1CYeLEJ42Nm+563/Fq060T5WknfwDhdX9+kkBQ==", + "requires": { + "ast-module-types": "^2.3.1", + "escodegen": "^1.8.0", + "get-amd-module-type": "^3.0.0", + "node-source-walk": "^4.0.0" + } + }, + "detective-cjs": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/detective-cjs/-/detective-cjs-3.1.1.tgz", + "integrity": "sha512-JQtNTBgFY6h8uT6pgph5QpV3IyxDv+z3qPk/FZRDT9TlFfm5dnRtpH39WtQEr1khqsUxVqXzKjZHpdoQvQbllg==", + "requires": { + "ast-module-types": "^2.4.0", + "node-source-walk": "^4.0.0" + } + }, + "detective-es6": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detective-es6/-/detective-es6-2.1.0.tgz", + "integrity": "sha512-QSHqKGOp/YBIfmIqKXaXeq2rlL+bp3bcIQMfZ+0PvKzRlELSOSZxKRvpxVcxlLuocQv4QnOfuWGniGrmPbz8MQ==", + "requires": { + "node-source-walk": "^4.0.0" + } + }, + "detective-less": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/detective-less/-/detective-less-1.0.2.tgz", + "integrity": "sha512-Rps1xDkEEBSq3kLdsdnHZL1x2S4NGDcbrjmd4q+PykK5aJwDdP5MBgrJw1Xo+kyUHuv3JEzPqxr+Dj9ryeDRTA==", + "requires": { + "debug": "^4.0.0", + "gonzales-pe": "^4.2.3", + "node-source-walk": "^4.0.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, + "detective-postcss": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/detective-postcss/-/detective-postcss-3.0.1.tgz", + "integrity": "sha512-tfTS2GdpUal5NY0aCqI4dpEy8Xfr88AehYKB0iBIZvo8y2g3UsrcDnrp9PR2FbzoW7xD5Rip3NJW7eCSvtqdUw==", + "requires": { + "debug": "^4.1.1", + "is-url": "^1.2.4", + "postcss": "^7.0.2", + "postcss-values-parser": "^1.5.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, + "detective-sass": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/detective-sass/-/detective-sass-3.0.1.tgz", + "integrity": "sha512-oSbrBozRjJ+QFF4WJFbjPQKeakoaY1GiR380NPqwdbWYd5wfl5cLWv0l6LsJVqrgWfFN1bjFqSeo32Nxza8Lbw==", + "requires": { + "debug": "^4.1.1", + "gonzales-pe": "^4.2.3", + "node-source-walk": "^4.0.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, + "detective-scss": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/detective-scss/-/detective-scss-2.0.1.tgz", + "integrity": "sha512-VveyXW4WQE04s05KlJ8K0bG34jtHQVgTc9InspqoQxvnelj/rdgSAy7i2DXAazyQNFKlWSWbS+Ro2DWKFOKTPQ==", + "requires": { + "debug": "^4.1.1", + "gonzales-pe": "^4.2.3", + "node-source-walk": "^4.0.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, + "detective-stylus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/detective-stylus/-/detective-stylus-1.0.0.tgz", + "integrity": "sha1-UK7n24uruZA4HwEMY/q7pbWOVM0=" + }, + "detective-typescript": { + "version": "5.7.0", + "resolved": "https://registry.npmjs.org/detective-typescript/-/detective-typescript-5.7.0.tgz", + "integrity": "sha512-4SQeACXWAjIOsd2kJykPL8gWC9nVA+z8w7KtAdtd/7BCpDfrpI2ZA7pdhsmHv/zxf3ofeqpYi72vCkZ65bAjtA==", + "requires": { + "@typescript-eslint/typescript-estree": "^2.4.0", + "ast-module-types": "^2.5.0", + "node-source-walk": "^4.2.0", + "typescript": "^3.6.4" + } + }, "diff": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", @@ -1050,11 +1273,20 @@ "once": "^1.4.0" } }, + "enhanced-resolve": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz", + "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==", + "requires": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.5.0", + "tapable": "^1.0.0" + } + }, "errno": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz", "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==", - "dev": true, "requires": { "prr": "~1.0.1" } @@ -1071,14 +1303,12 @@ "escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, "escodegen": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.8.1.tgz", "integrity": "sha1-WltTr0aTEQvrsIZ6o0MN07cKEBg=", - "dev": true, "requires": { "esprima": "^2.7.1", "estraverse": "^1.9.1", @@ -1090,8 +1320,7 @@ "estraverse": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-1.9.3.tgz", - "integrity": "sha1-r2fy3JIlgkFZUJJgkaQAXSnJu0Q=", - "dev": true + "integrity": "sha1-r2fy3JIlgkFZUJJgkaQAXSnJu0Q=" } } }, @@ -1197,8 +1426,7 @@ "eslint-visitor-keys": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", - "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==", - "dev": true + "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==" }, "espree": { "version": "3.5.4", @@ -1213,8 +1441,7 @@ "esprima": { "version": "2.7.3", "resolved": "https://registry.npmjs.org/esprima/-/esprima-2.7.3.tgz", - "integrity": "sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE=", - "dev": true + "integrity": "sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE=" }, "esquery": { "version": "1.0.1", @@ -1243,8 +1470,7 @@ "esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" }, "execa": { "version": "0.7.0", @@ -1311,8 +1537,7 @@ "fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" }, "figures": { "version": "2.0.0", @@ -1333,12 +1558,63 @@ "object-assign": "^4.0.1" } }, + "file-exists-dazinatorfork": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/file-exists-dazinatorfork/-/file-exists-dazinatorfork-1.0.2.tgz", + "integrity": "sha512-r70c72ln2YHzQINNfxDp02hAhbGkt1HffZ+Du8oetWDLjDtFja/Lm10lUaSh9e+wD+7VDvPee0b0C9SAy8pWZg==" + }, "file-uri-to-path": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", "dev": true }, + "filing-cabinet": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/filing-cabinet/-/filing-cabinet-2.5.1.tgz", + "integrity": "sha512-GWOdObzou2L0HrJUk8MpJa01q0ZOwuTwTssM2+P+ABJWEGlVWd6ueEatANFdin94/3rdkVSdqpH14VqCNqp3RA==", + "requires": { + "app-module-path": "^2.2.0", + "commander": "^2.13.0", + "debug": "^4.1.1", + "decomment": "^0.9.2", + "enhanced-resolve": "^4.1.0", + "is-relative-path": "^1.0.2", + "module-definition": "^3.0.0", + "module-lookup-amd": "^6.1.0", + "resolve": "^1.11.1", + "resolve-dependency-path": "^2.0.0", + "sass-lookup": "^3.0.0", + "stylus-lookup": "^3.0.1", + "typescript": "^3.0.3" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + }, + "resolve": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", + "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", + "requires": { + "path-parse": "^1.0.6" + } + } + } + }, + "find": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/find/-/find-0.3.0.tgz", + "integrity": "sha512-iSd+O4OEYV/I36Zl8MdYJO0xD82wH528SaCieTVHhclgiYNe9y+yPKSwK+A7/WsmHL1EZ+pYUJBXWTL5qofksw==", + "requires": { + "traverse-chain": "~0.1.0" + } + }, "find-up": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", @@ -1360,6 +1636,11 @@ "write": "^0.2.1" } }, + "flatten": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.3.tgz", + "integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==" + }, "forever-agent": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", @@ -1395,8 +1676,7 @@ "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "functional-red-black-tree": { "version": "1.0.1", @@ -1460,6 +1740,15 @@ "is-property": "^1.0.0" } }, + "get-amd-module-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/get-amd-module-type/-/get-amd-module-type-3.0.0.tgz", + "integrity": "sha512-99Q7COuACPfVt18zH9N4VAMyb81S6TUgJm2NgV6ERtkh9VIkAaByZkW530wl3lLN5KTtSrK9jVLxYsoP5hQKsw==", + "requires": { + "ast-module-types": "^2.3.2", + "node-source-walk": "^4.0.0" + } + }, "get-caller-file": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.3.tgz", @@ -1472,6 +1761,11 @@ "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", "dev": true }, + "get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, "get-pkg-repo": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/get-pkg-repo/-/get-pkg-repo-1.4.0.tgz", @@ -1650,7 +1944,6 @@ "version": "7.1.6", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -1666,11 +1959,33 @@ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true }, + "gonzales-pe": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/gonzales-pe/-/gonzales-pe-4.2.4.tgz", + "integrity": "sha512-v0Ts/8IsSbh9n1OJRnSfa7Nlxi4AkXIsWB6vPept8FDbL4bXn3FNuxjYtO/nmBGu7GDkL9MFeGebeSu6l55EPQ==", + "requires": { + "minimist": "1.1.x" + }, + "dependencies": { + "minimist": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.1.3.tgz", + "integrity": "sha1-O+39kaktOQFvz6ocaB6Pqhoe/ag=" + } + } + }, "graceful-fs": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", - "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", - "dev": true + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==" + }, + "graphviz": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/graphviz/-/graphviz-0.0.9.tgz", + "integrity": "sha512-SmoY2pOtcikmMCqCSy2NO1YsRfu9OO0wpTlOYW++giGjfX1a6gax/m1Fo8IdUd0/3H15cTOfR1SMKwohj4LKsg==", + "requires": { + "temp": "~0.4.0" + } }, "growl": { "version": "1.10.5", @@ -1749,8 +2064,7 @@ "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, "has-unicode": { "version": "2.0.1", @@ -1826,11 +2140,15 @@ "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", "dev": true }, + "indexes-of": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", + "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=" + }, "inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -1844,8 +2162,7 @@ "ini": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", - "dev": true + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" }, "inquirer": { "version": "3.3.0", @@ -1898,6 +2215,11 @@ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", "dev": true }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" + }, "is-finite": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.0.2.tgz", @@ -1913,6 +2235,19 @@ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", "dev": true }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==" + }, "is-my-ip-valid": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-my-ip-valid/-/is-my-ip-valid-1.0.0.tgz", @@ -1935,8 +2270,7 @@ "is-obj": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=", - "dev": true + "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=" }, "is-plain-obj": { "version": "1.1.0", @@ -1956,6 +2290,16 @@ "integrity": "sha1-V/4cTkhHTt1lsJkR8msc1Ald2oQ=", "dev": true }, + "is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha1-/S2INUXEa6xaYz57mgnof6LLUGk=" + }, + "is-relative-path": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-relative-path/-/is-relative-path-1.0.2.tgz", + "integrity": "sha1-CRtGoNZ8HtD+hfH4z93gBrslHUY=" + }, "is-resolvable": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", @@ -1989,6 +2333,11 @@ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", "dev": true }, + "is-url": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", + "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" + }, "is-utf8": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", @@ -2217,7 +2566,6 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", - "dev": true, "requires": { "prelude-ls": "~1.1.2", "type-check": "~0.3.2" @@ -2248,8 +2596,7 @@ "lodash": { "version": "4.17.15", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", - "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==", - "dev": true + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" }, "lodash._reinterpolate": { "version": "3.0.0", @@ -2294,6 +2641,14 @@ "integrity": "sha1-euTsJXMC/XkNVXyxDJcQDYV7AFY=", "dev": true }, + "log-symbols": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-3.0.0.tgz", + "integrity": "sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ==", + "requires": { + "chalk": "^2.4.2" + } + }, "lolex": { "version": "2.7.5", "resolved": "https://registry.npmjs.org/lolex/-/lolex-2.7.5.tgz", @@ -2320,6 +2675,119 @@ "yallist": "^2.1.2" } }, + "madge": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/madge/-/madge-3.7.0.tgz", + "integrity": "sha512-Qxv0lLgarPIptxD8PHS2nGyuYcyu21Y2dOSBH8O7hF8UEAEkMjjYpECYquBTOMoKNe6hKo1VnC3mCE/fn313XA==", + "requires": { + "chalk": "^3.0.0", + "commander": "^4.0.1", + "commondir": "^1.0.1", + "debug": "^4.0.1", + "dependency-tree": "^7.0.2", + "detective-amd": "^3.0.0", + "detective-cjs": "^3.1.1", + "detective-es6": "^2.1.0", + "detective-less": "^1.0.2", + "detective-postcss": "^3.0.1", + "detective-sass": "^3.0.1", + "detective-scss": "^2.0.1", + "detective-stylus": "^1.0.0", + "detective-typescript": "^5.7.0", + "graphviz": "0.0.9", + "ora": "^4.0.2", + "pify": "^4.0.0", + "pluralize": "^8.0.0", + "pretty-ms": "^5.0.0", + "rc": "^1.2.7", + "walkdir": "^0.4.1" + }, + "dependencies": { + "ansi-styles": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", + "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "requires": { + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==" + }, + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" + }, + "pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==" + }, + "supports-color": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "dependencies": { + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" + } + } + }, "map-obj": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-2.0.0.tgz", @@ -2341,6 +2809,15 @@ "mimic-fn": "^1.0.0" } }, + "memory-fs": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", + "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", + "requires": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + }, "memory-pager": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/memory-pager/-/memory-pager-1.5.0.tgz", @@ -2418,7 +2895,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -2426,8 +2902,7 @@ "minimist": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", - "dev": true + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" }, "minimist-options": { "version": "3.0.2", @@ -2533,6 +3008,38 @@ "integrity": "sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==", "dev": true }, + "module-definition": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/module-definition/-/module-definition-3.3.0.tgz", + "integrity": "sha512-HTplA9xwDzH67XJFC1YvZMUElWJD28DV0dUq7lhTs+JKJamUOWA/CcYWSlhW5amJO66uWtY7XdltT+LfX0wIVg==", + "requires": { + "ast-module-types": "^2.6.0", + "node-source-walk": "^4.0.0" + } + }, + "module-lookup-amd": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/module-lookup-amd/-/module-lookup-amd-6.2.0.tgz", + "integrity": "sha512-uxHCj5Pw9psZiC1znjU2qPsubt6haCSsN9m7xmIdoTciEgfxUkE1vhtDvjHPuOXEZrVJhjKgkmkP+w73rRuelQ==", + "requires": { + "commander": "^2.8.1", + "debug": "^4.1.0", + "file-exists-dazinatorfork": "^1.0.2", + "find": "^0.3.0", + "requirejs": "^2.3.5", + "requirejs-config-file": "^3.1.1" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, "mongodb-extjson": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/mongodb-extjson/-/mongodb-extjson-2.1.4.tgz", @@ -2562,8 +3069,7 @@ "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "mute-stream": { "version": "0.0.7", @@ -2635,6 +3141,14 @@ "semver": "^5.4.1" } }, + "node-source-walk": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/node-source-walk/-/node-source-walk-4.2.0.tgz", + "integrity": "sha512-hPs/QMe6zS94f5+jG3kk9E7TNm4P2SulrKiLWMzKszBfNZvL/V6wseHlTd7IvfW0NZWqPtK3+9yYNr+3USGteA==", + "requires": { + "@babel/parser": "^7.0.0" + } + }, "noop-logger": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/noop-logger/-/noop-logger-0.1.1.tgz", @@ -2722,7 +3236,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, "requires": { "wrappy": "1" } @@ -2764,7 +3277,6 @@ "version": "0.8.3", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", - "dev": true, "requires": { "deep-is": "~0.1.3", "fast-levenshtein": "~2.0.6", @@ -2774,6 +3286,115 @@ "word-wrap": "~1.2.3" } }, + "ora": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/ora/-/ora-4.0.3.tgz", + "integrity": "sha512-fnDebVFyz309A73cqCipVL1fBZewq4vwgSHfxh43vVy31mbyoQ8sCH3Oeaog/owYOs/lLlGVPCISQonTneg6Pg==", + "requires": { + "chalk": "^3.0.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.2.0", + "is-interactive": "^1.0.0", + "log-symbols": "^3.0.0", + "mute-stream": "0.0.8", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" + }, + "ansi-styles": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", + "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "requires": { + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "requires": { + "restore-cursor": "^3.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" + }, + "mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" + }, + "onetime": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", + "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "requires": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "supports-color": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, "os-locale": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-2.1.0.tgz", @@ -2837,6 +3458,11 @@ "json-parse-better-errors": "^1.0.1" } }, + "parse-ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-2.1.0.tgz", + "integrity": "sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==" + }, "path-exists": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", @@ -2846,8 +3472,7 @@ "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" }, "path-is-inside": { "version": "1.0.2", @@ -2864,8 +3489,7 @@ "path-parse": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", - "dev": true + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" }, "path-to-regexp": { "version": "1.8.0", @@ -2926,6 +3550,41 @@ "integrity": "sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow==", "dev": true }, + "postcss": { + "version": "7.0.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.27.tgz", + "integrity": "sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ==", + "requires": { + "chalk": "^2.4.2", + "source-map": "^0.6.1", + "supports-color": "^6.1.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "postcss-values-parser": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/postcss-values-parser/-/postcss-values-parser-1.5.0.tgz", + "integrity": "sha512-3M3p+2gMp0AH3da530TlX8kiO1nxdTnc3C6vr8dMxRLIlh8UYkz0/wcwptSXjhtx2Fr0TySI7a+BHDQ8NL7LaQ==", + "requires": { + "flatten": "^1.0.2", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1" + } + }, "prebuild-install": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-5.3.3.tgz", @@ -2960,11 +3619,40 @@ } } }, + "precinct": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/precinct/-/precinct-6.2.0.tgz", + "integrity": "sha512-BCAmnOxZzobF3H1/h/gq70pEyvX/BVLWCrzi8beFD22dqu5Z14qOghNUsI24Wg8oaTsGFcIjOGtFX5L9ttmjVg==", + "requires": { + "commander": "^2.19.0", + "debug": "^4.1.1", + "detective-amd": "^3.0.0", + "detective-cjs": "^3.1.1", + "detective-es6": "^2.0.0", + "detective-less": "^1.0.2", + "detective-postcss": "^3.0.0", + "detective-sass": "^3.0.0", + "detective-scss": "^2.0.0", + "detective-stylus": "^1.0.0", + "detective-typescript": "^5.1.1", + "module-definition": "^3.3.0", + "node-source-walk": "^4.2.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } + }, "prelude-ls": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", - "dev": true + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=" }, "prettier": { "version": "1.19.1", @@ -2972,6 +3660,14 @@ "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", "dev": true }, + "pretty-ms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-5.1.0.tgz", + "integrity": "sha512-4gaK1skD2gwscCfkswYQRmddUb2GJZtzDGRjHWadVHtK/DIKFufa12MvES6/xu1tVbUYeia5bmLcwJtZJQUqnw==", + "requires": { + "parse-ms": "^2.1.0" + } + }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -2986,8 +3682,7 @@ "prr": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=", - "dev": true + "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=" }, "pseudomap": { "version": "1.0.2", @@ -3033,7 +3728,6 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, "requires": { "deep-extend": "^0.6.0", "ini": "~1.3.0", @@ -3237,13 +3931,26 @@ } } }, - "require_optional": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/require_optional/-/require_optional-1.0.1.tgz", - "integrity": "sha512-qhM/y57enGWHAe3v/NcwML6a3/vfESLe/sGM2dII+gEO0BpKRUkWZow/tyloNqJyN6kXSl3RyyM8Ll5D/sJP8g==", + "requirejs": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/requirejs/-/requirejs-2.3.6.tgz", + "integrity": "sha512-ipEzlWQe6RK3jkzikgCupiTbTvm4S0/CAU5GlgptkN5SO6F3u0UD0K18wy6ErDqiCyP4J4YYe1HuAShvsxePLg==" + }, + "requirejs-config-file": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/requirejs-config-file/-/requirejs-config-file-3.1.2.tgz", + "integrity": "sha512-sdLWywcDuNz7EIOhenSbRfT4YF84nItDv90coN2htbokjmU2QeyQuSBZILQUKNksepl8UPVU+hgYySFaDxbJPQ==", "requires": { - "resolve-from": "^2.0.0", - "semver": "^5.1.0" + "esprima": "^4.0.0", + "make-dir": "^2.1.0", + "stringify-object": "^3.2.1" + }, + "dependencies": { + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + } } }, "requizzle": { @@ -3261,10 +3968,10 @@ "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", "dev": true }, - "resolve-from": { + "resolve-dependency-path": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-2.0.0.tgz", - "integrity": "sha1-lICrIOlP+h2egKgEx+oUdhGWa1c=" + "resolved": "https://registry.npmjs.org/resolve-dependency-path/-/resolve-dependency-path-2.0.0.tgz", + "integrity": "sha512-DIgu+0Dv+6v2XwRaNWnumKu7GPufBBOr5I1gRPJHkvghrfCGOooJODFvgFimX/KRxk9j0whD2MnKHzM1jYvk9w==" }, "restore-cursor": { "version": "2.0.0", @@ -3335,6 +4042,14 @@ "sparse-bitfield": "^3.0.3" } }, + "sass-lookup": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/sass-lookup/-/sass-lookup-3.0.0.tgz", + "integrity": "sha512-TTsus8CfFRn1N44bvdEai1no6PqdmDiQUiqW5DlpmtT+tYnIt1tXtDIph5KA1efC+LmioJXSnCtUVpcK9gaKIg==", + "requires": { + "commander": "^2.16.0" + } + }, "semver": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", @@ -3364,8 +4079,7 @@ "signal-exit": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", - "dev": true + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=" }, "simple-concat": { "version": "1.0.0", @@ -3438,7 +4152,6 @@ "version": "0.2.0", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.2.0.tgz", "integrity": "sha1-2rc/vPwrqBm03gO9b26qSBZLP50=", - "dev": true, "optional": true, "requires": { "amdefine": ">=0.0.4" @@ -3707,6 +4420,16 @@ } } }, + "stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "requires": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + } + }, "stringstream": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.6.tgz", @@ -3743,14 +4466,31 @@ "strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "dev": true + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" + }, + "stylus-lookup": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stylus-lookup/-/stylus-lookup-3.0.2.tgz", + "integrity": "sha512-oEQGHSjg/AMaWlKe7gqsnYzan8DLcGIHe0dUaFkucZZ14z4zjENRlQMCHT4FNsiWnJf17YN9OvrCfCoi7VvOyg==", + "requires": { + "commander": "^2.8.1", + "debug": "^4.1.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + } + } }, "supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, "requires": { "has-flag": "^3.0.0" } @@ -3775,6 +4515,11 @@ "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=", "dev": true }, + "tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" + }, "tar-fs": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.0.0.tgz", @@ -3822,6 +4567,11 @@ } } }, + "temp": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/temp/-/temp-0.4.0.tgz", + "integrity": "sha1-ZxrWPVe+D+nXKUZks/xABjZnimA=" + }, "text-extensions": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-1.9.0.tgz", @@ -3868,6 +4618,11 @@ "punycode": "^1.4.1" } }, + "traverse-chain": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/traverse-chain/-/traverse-chain-0.1.0.tgz", + "integrity": "sha1-YdvC1Ttp/2CRoSoWj9fUMxB+QPE=" + }, "trim-newlines": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-2.0.0.tgz", @@ -3880,6 +4635,19 @@ "integrity": "sha1-n5up2e+odkw4dpi8v+sshI8RrbM=", "dev": true }, + "tslib": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", + "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==" + }, + "tsutils": { + "version": "3.17.1", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.17.1.tgz", + "integrity": "sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g==", + "requires": { + "tslib": "^1.8.1" + } + }, "tunnel-agent": { "version": "0.4.3", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.3.tgz", @@ -3896,7 +4664,6 @@ "version": "0.3.2", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", - "dev": true, "requires": { "prelude-ls": "~1.1.2" } @@ -3913,6 +4680,11 @@ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", "dev": true }, + "typescript": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.8.3.tgz", + "integrity": "sha512-MYlEfn5VrLNsgudQTVJeNaQFUAI7DkhnOjdpAp4T+ku1TfQClewlbSuTVHiA+8skNBgaf02TL/kLOvig4y3G8w==" + }, "uglify-js": { "version": "3.7.1", "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.7.1.tgz", @@ -3939,6 +4711,11 @@ "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=", "dev": true }, + "uniq": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", + "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" + }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -3979,6 +4756,19 @@ } } }, + "walkdir": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/walkdir/-/walkdir-0.4.1.tgz", + "integrity": "sha512-3eBwRyEln6E1MSzcxcVpQIhRG8Q1jLvEqRmCZqS3dsfXEDR/AhOF4d+jHg1qvDCpYaVRZjENPQyrVxAkQqxPgQ==" + }, + "wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", + "requires": { + "defaults": "^1.0.3" + } + }, "which": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", @@ -4012,8 +4802,7 @@ "word-wrap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==" }, "wordwrap": { "version": "1.0.0", @@ -4065,8 +4854,7 @@ "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "write": { "version": "0.2.1", diff --git a/package.json b/package.json index a9c37cd52c1..698e60684ce 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,7 @@ "bl": "^2.2.0", "bson": "^1.1.1", "denque": "^1.4.1", - "require_optional": "^1.0.1", + "madge": "^3.7.0", "safe-buffer": "^5.1.2" }, "devDependencies": { diff --git a/test/functional/authentication.test.js b/test/disabled/authentication.test.js similarity index 100% rename from test/functional/authentication.test.js rename to test/disabled/authentication.test.js diff --git a/test/functional/core/basic_single_server_auth.test.js b/test/disabled/basic_single_server_auth.test.js similarity index 99% rename from test/functional/core/basic_single_server_auth.test.js rename to test/disabled/basic_single_server_auth.test.js index 4ef4ff0ae15..9109d9bc684 100644 --- a/test/functional/core/basic_single_server_auth.test.js +++ b/test/disabled/basic_single_server_auth.test.js @@ -3,7 +3,7 @@ var expect = require('chai').expect, locateAuthMethod = require('./shared').locateAuthMethod, executeCommand = require('./shared').executeCommand, - Connection = require('../../../lib/core/connection/connection'), + Connection = require('../../../lib/cmap/connection'), Bson = require('bson'); const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; diff --git a/test/functional/core/connection.test.js b/test/disabled/connection.test.js similarity index 100% rename from test/functional/core/connection.test.js rename to test/disabled/connection.test.js diff --git a/test/functional/disconnect_handler.test.js b/test/disabled/disconnect_handler.test.js similarity index 100% rename from test/functional/disconnect_handler.test.js rename to test/disabled/disconnect_handler.test.js diff --git a/test/functional/jira_bug.test.js b/test/disabled/jira_bug.test.js similarity index 100% rename from test/functional/jira_bug.test.js rename to test/disabled/jira_bug.test.js diff --git a/test/functional/kerberos.test.js b/test/disabled/kerberos.test.js similarity index 100% rename from test/functional/kerberos.test.js rename to test/disabled/kerberos.test.js diff --git a/test/unit/core/mongos/events.test.js b/test/disabled/mongos/events.test.js similarity index 100% rename from test/unit/core/mongos/events.test.js rename to test/disabled/mongos/events.test.js diff --git a/test/unit/core/mongos/reconnect.test.js b/test/disabled/mongos/reconnect.test.js similarity index 100% rename from test/unit/core/mongos/reconnect.test.js rename to test/disabled/mongos/reconnect.test.js diff --git a/test/unit/core/mongos/retryable_writes.test.js b/test/disabled/mongos/retryable_writes.test.js similarity index 98% rename from test/unit/core/mongos/retryable_writes.test.js rename to test/disabled/mongos/retryable_writes.test.js index 6c03ea9f9ca..7fe48e933da 100644 --- a/test/unit/core/mongos/retryable_writes.test.js +++ b/test/disabled/mongos/retryable_writes.test.js @@ -152,7 +152,7 @@ describe('Retryable Writes (Mongos)', function() { } else if (doc.insert) { insertCount++; if (insertCount === 1) { - request.reply({ ok: 0, errmsg: 'not master' }); // simulate a stepdown + request.reply({ ok: 0, errmsg: 'not master', code: 10107 }); // simulate a stepdown } else { command = doc; request.reply({ ok: 1 }); diff --git a/test/unit/core/mongos/sessions.test.js b/test/disabled/mongos/sessions.test.js similarity index 100% rename from test/unit/core/mongos/sessions.test.js rename to test/disabled/mongos/sessions.test.js diff --git a/test/functional/core/mongos_mocks/mixed_seed_list.test.js b/test/disabled/mongos_mocks/mixed_seed_list.test.js similarity index 100% rename from test/functional/core/mongos_mocks/mixed_seed_list.test.js rename to test/disabled/mongos_mocks/mixed_seed_list.test.js diff --git a/test/functional/core/mongos_mocks/multiple_proxies.test.js b/test/disabled/mongos_mocks/multiple_proxies.test.js similarity index 100% rename from test/functional/core/mongos_mocks/multiple_proxies.test.js rename to test/disabled/mongos_mocks/multiple_proxies.test.js diff --git a/test/functional/core/mongos_mocks/proxy_failover.test.js b/test/disabled/mongos_mocks/proxy_failover.test.js similarity index 100% rename from test/functional/core/mongos_mocks/proxy_failover.test.js rename to test/disabled/mongos_mocks/proxy_failover.test.js diff --git a/test/functional/core/mongos_mocks/proxy_read_preference.test.js b/test/disabled/mongos_mocks/proxy_read_preference.test.js similarity index 100% rename from test/functional/core/mongos_mocks/proxy_read_preference.test.js rename to test/disabled/mongos_mocks/proxy_read_preference.test.js diff --git a/test/functional/core/mongos_mocks/single_proxy_connection.test.js b/test/disabled/mongos_mocks/single_proxy_connection.test.js similarity index 100% rename from test/functional/core/mongos_mocks/single_proxy_connection.test.js rename to test/disabled/mongos_mocks/single_proxy_connection.test.js diff --git a/test/unit/core/pool.test.js b/test/disabled/pool.test.js similarity index 96% rename from test/unit/core/pool.test.js rename to test/disabled/pool.test.js index 81fa43fa2a4..80dcc643f8b 100644 --- a/test/unit/core/pool.test.js +++ b/test/disabled/pool.test.js @@ -3,7 +3,7 @@ const expect = require('chai').expect; const mock = require('mongodb-mock-server'); const Server = require('../../../lib/core/topologies/server'); -const MongoWriteConcernError = require('../../../lib/core/error').MongoWriteConcernError; +const { MongoWriteConcernError } = require('../../../lib/error'); const sinon = require('sinon'); const test = {}; diff --git a/test/functional/reconnect.test.js b/test/disabled/reconnect.test.js similarity index 100% rename from test/functional/reconnect.test.js rename to test/disabled/reconnect.test.js diff --git a/test/functional/core/replset.test.js b/test/disabled/replset.test.js similarity index 100% rename from test/functional/core/replset.test.js rename to test/disabled/replset.test.js diff --git a/test/unit/core/replset/auth.test.js b/test/disabled/replset/auth.test.js similarity index 100% rename from test/unit/core/replset/auth.test.js rename to test/disabled/replset/auth.test.js diff --git a/test/unit/core/replset/compression.test.js b/test/disabled/replset/compression.test.js similarity index 100% rename from test/unit/core/replset/compression.test.js rename to test/disabled/replset/compression.test.js diff --git a/test/unit/core/replset/read_preference.test.js b/test/disabled/replset/read_preference.test.js similarity index 100% rename from test/unit/core/replset/read_preference.test.js rename to test/disabled/replset/read_preference.test.js diff --git a/test/unit/core/replset/retryable_writes.test.js b/test/disabled/replset/retryable_writes.test.js similarity index 98% rename from test/unit/core/replset/retryable_writes.test.js rename to test/disabled/replset/retryable_writes.test.js index 947e10bcc65..e1c4e7820c5 100644 --- a/test/unit/core/replset/retryable_writes.test.js +++ b/test/disabled/replset/retryable_writes.test.js @@ -146,7 +146,7 @@ describe('Retryable Writes (ReplSet)', function() { } else if (doc.insert) { insertCount++; if (insertCount === 1) { - request.reply({ ok: 0, errmsg: 'not master' }); // simulate a stepdown + request.reply({ ok: 0, errmsg: 'not master', code: 10107 }); // simulate a stepdown } else { command = doc; request.reply({ ok: 1 }); diff --git a/test/unit/core/replset/sessions.test.js b/test/disabled/replset/sessions.test.js similarity index 100% rename from test/unit/core/replset/sessions.test.js rename to test/disabled/replset/sessions.test.js diff --git a/test/unit/core/replset/step_down.test.js b/test/disabled/replset/step_down.test.js similarity index 98% rename from test/unit/core/replset/step_down.test.js rename to test/disabled/replset/step_down.test.js index 3fde1b28e84..9d6e485f4c0 100644 --- a/test/unit/core/replset/step_down.test.js +++ b/test/disabled/replset/step_down.test.js @@ -68,7 +68,8 @@ describe('Step Down (ReplSet)', function() { } else { return request.reply({ ok: 0, - errmsg: 'not master' + errmsg: 'not master', + code: 10107 }); } } diff --git a/test/unit/core/replset/transactions_feature_decoration.test.js b/test/disabled/replset/transactions_feature_decoration.test.js similarity index 100% rename from test/unit/core/replset/transactions_feature_decoration.test.js rename to test/disabled/replset/transactions_feature_decoration.test.js diff --git a/test/disabled/replset/utils.test.js b/test/disabled/replset/utils.test.js new file mode 100644 index 00000000000..6d61cdf6487 --- /dev/null +++ b/test/disabled/replset/utils.test.js @@ -0,0 +1,16 @@ +'use strict'; +const Timeout = require('../../../../lib/core/topologies/shared').Timeout; +const expect = require('chai').expect; + +describe('', function() { + it('should detect when a timer is finished running', function(done) { + let timeout; + function timeoutHandler() { + expect(timeout.isRunning()).to.be.false; + done(); + } + + timeout = new Timeout(timeoutHandler, 100); + timeout.start(); + }); +}); diff --git a/test/functional/replset_connection.test.js b/test/disabled/replset_connection.test.js similarity index 100% rename from test/functional/replset_connection.test.js rename to test/disabled/replset_connection.test.js diff --git a/test/functional/replset_failover.test.js b/test/disabled/replset_failover.test.js similarity index 100% rename from test/functional/replset_failover.test.js rename to test/disabled/replset_failover.test.js diff --git a/test/functional/replset_operations.test.js b/test/disabled/replset_operations.test.js similarity index 100% rename from test/functional/replset_operations.test.js rename to test/disabled/replset_operations.test.js diff --git a/test/functional/replset_read_preference.test.js b/test/disabled/replset_read_preference.test.js similarity index 100% rename from test/functional/replset_read_preference.test.js rename to test/disabled/replset_read_preference.test.js diff --git a/test/functional/core/rs_mocks/add_remove.test.js b/test/disabled/rs_mocks/add_remove.test.js similarity index 100% rename from test/functional/core/rs_mocks/add_remove.test.js rename to test/disabled/rs_mocks/add_remove.test.js diff --git a/test/functional/core/rs_mocks/all_servers_close.test.js b/test/disabled/rs_mocks/all_servers_close.test.js similarity index 100% rename from test/functional/core/rs_mocks/all_servers_close.test.js rename to test/disabled/rs_mocks/all_servers_close.test.js diff --git a/test/functional/core/rs_mocks/connection.test.js b/test/disabled/rs_mocks/connection.test.js similarity index 100% rename from test/functional/core/rs_mocks/connection.test.js rename to test/disabled/rs_mocks/connection.test.js diff --git a/test/functional/core/rs_mocks/failover.test.js b/test/disabled/rs_mocks/failover.test.js similarity index 100% rename from test/functional/core/rs_mocks/failover.test.js rename to test/disabled/rs_mocks/failover.test.js diff --git a/test/functional/core/rs_mocks/maintanance_mode.test.js b/test/disabled/rs_mocks/maintanance_mode.test.js similarity index 100% rename from test/functional/core/rs_mocks/maintanance_mode.test.js rename to test/disabled/rs_mocks/maintanance_mode.test.js diff --git a/test/functional/core/rs_mocks/monitoring.test.js b/test/disabled/rs_mocks/monitoring.test.js similarity index 100% rename from test/functional/core/rs_mocks/monitoring.test.js rename to test/disabled/rs_mocks/monitoring.test.js diff --git a/test/functional/core/rs_mocks/no_primary_found.test.js b/test/disabled/rs_mocks/no_primary_found.test.js similarity index 100% rename from test/functional/core/rs_mocks/no_primary_found.test.js rename to test/disabled/rs_mocks/no_primary_found.test.js diff --git a/test/functional/core/rs_mocks/operation.test.js b/test/disabled/rs_mocks/operation.test.js similarity index 100% rename from test/functional/core/rs_mocks/operation.test.js rename to test/disabled/rs_mocks/operation.test.js diff --git a/test/functional/core/rs_mocks/primary_loses_network.test.js b/test/disabled/rs_mocks/primary_loses_network.test.js similarity index 100% rename from test/functional/core/rs_mocks/primary_loses_network.test.js rename to test/disabled/rs_mocks/primary_loses_network.test.js diff --git a/test/functional/core/rs_mocks/read_preferences.test.js b/test/disabled/rs_mocks/read_preferences.test.js similarity index 100% rename from test/functional/core/rs_mocks/read_preferences.test.js rename to test/disabled/rs_mocks/read_preferences.test.js diff --git a/test/functional/core/rs_mocks/step_down.test.js b/test/disabled/rs_mocks/step_down.test.js similarity index 100% rename from test/functional/core/rs_mocks/step_down.test.js rename to test/disabled/rs_mocks/step_down.test.js diff --git a/test/functional/sdam.test.js b/test/disabled/sdam.test.js similarity index 100% rename from test/functional/sdam.test.js rename to test/disabled/sdam.test.js diff --git a/test/functional/core/sdam_monitoring_mocks/mongos_topology.test.js b/test/disabled/sdam_monitoring_mocks/mongos_topology.test.js similarity index 100% rename from test/functional/core/sdam_monitoring_mocks/mongos_topology.test.js rename to test/disabled/sdam_monitoring_mocks/mongos_topology.test.js diff --git a/test/functional/core/sdam_monitoring_mocks/replset_topology.test.js b/test/disabled/sdam_monitoring_mocks/replset_topology.test.js similarity index 100% rename from test/functional/core/sdam_monitoring_mocks/replset_topology.test.js rename to test/disabled/sdam_monitoring_mocks/replset_topology.test.js diff --git a/test/functional/core/sdam_monitoring_mocks/single_topology.test.js b/test/disabled/sdam_monitoring_mocks/single_topology.test.js similarity index 100% rename from test/functional/core/sdam_monitoring_mocks/single_topology.test.js rename to test/disabled/sdam_monitoring_mocks/single_topology.test.js diff --git a/test/functional/core/server.test.js b/test/disabled/server.test.js similarity index 74% rename from test/functional/core/server.test.js rename to test/disabled/server.test.js index c758ad4279f..aef0dc2696b 100644 --- a/test/functional/core/server.test.js +++ b/test/disabled/server.test.js @@ -403,147 +403,6 @@ describe('Server tests', function() { } }); - it('should correctly reconnect to server with automatic reconnect enabled', { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const config = this.configuration; - if (config.usingUnifiedTopology()) { - // The new SDAM layer always reconnects, so this test is no longer relevant - return this.skip(); - } - - var server = config.newTopology(this.configuration.host, this.configuration.port, { - reconnect: true, - size: 1, - reconnectInterval: 50 - }); - - // Test flags - var emittedClose = false; - - // Add event listeners - server.on('connect', function(_server) { - // Execute the command - _server.command( - 'system.$cmd', - { ismaster: true }, - { readPreference: new ReadPreference('primary') }, - function(err, result) { - expect(err).to.be.null; - _server.s.currentReconnectRetry = 10; - - // Write garbage, force socket closure - try { - var a = Buffer.alloc(100); - for (var i = 0; i < 100; i++) a[i] = i; - result.connection.write(a); - } catch (loopErr) { - console.log(loopErr); - } - - // Ensure the server died - setTimeout(function() { - // Attempt a proper command - _server.command( - 'system.$cmd', - { ismaster: true }, - { readPreference: new ReadPreference('primary') }, - function(cmdErr) { - expect(cmdErr).to.not.be.null; - } - ); - }, 100); - } - ); - }); - - server.once('close', function() { - emittedClose = true; - }); - - server.once('reconnect', function() { - expect(emittedClose).to.be.true; - expect(server.isConnected()).to.be.true; - expect(server.s.pool.retriesLeft).to.equal(30); - server.destroy(); - done(); - }); - - // Start connection - server.connect(); - } - }); - - it('should correctly reconnect to server with automatic reconnect disabled', { - metadata: { - requires: { - topology: 'single' - } - // ignore: { travis:true } - }, - - test: function(done) { - const config = this.configuration; - if (config.usingUnifiedTopology()) { - // The new SDAM layer always reconnects, so this test is no longer relevant - return this.skip(); - } - - var server = config.newTopology(this.configuration.host, this.configuration.port, { - reconnect: false, - size: 1 - }); - - // Test flags - var emittedClose = false; - - // Add event listeners - server.on('connect', function(_server) { - // Execute the command - _server.command( - 'system.$cmd', - { ismaster: true }, - { readPreference: new ReadPreference('primary') }, - function(err, result) { - expect(err).to.be.null; - // Write garbage, force socket closure - try { - result.connection.destroy(); - } catch (destroyErr) { - console.log(destroyErr); - } - - process.nextTick(function() { - // Attempt a proper command - _server.command( - 'system.$cmd', - { ismaster: true }, - { readPreference: new ReadPreference('primary') }, - function(cmdErr) { - expect(cmdErr).to.not.be.null; - } - ); - }); - } - ); - }); - - server.on('close', function() { - emittedClose = true; - }); - - setTimeout(function() { - expect(emittedClose).to.be.true; - expect(server.isConnected()).to.be.false; - server.destroy(); - done(); - }, 500); - - // Start connection - server.connect(); - } - }); - it('should reconnect when initial connection failed', { metadata: { requires: { @@ -586,72 +445,6 @@ describe('Server tests', function() { } }); - it('should correctly place new connections in available list on reconnect', { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const config = this.configuration; - if (config.usingUnifiedTopology()) { - // The new SDAM layer always reconnects, so this test is no longer relevant - return this.skip(); - } - - var server = config.newTopology(this.configuration.host, this.configuration.port, { - reconnect: true, - size: 1, - reconnectInterval: 50 - }); - - // Add event listeners - server.on('connect', function(_server) { - // Execute the command - _server.command( - 'system.$cmd', - { ismaster: true }, - { readPreference: new ReadPreference('primary') }, - function(err, result) { - expect(err).to.be.null; - _server.s.currentReconnectRetry = 10; - - // Write garbage, force socket closure - try { - var a = Buffer.alloc(100); - for (var i = 0; i < 100; i++) a[i] = i; - result.connection.write(a); - } catch (garbageErr) { - console.log(garbageErr); - } - } - ); - }); - - server.once('reconnect', function() { - for (var i = 0; i < 100; i++) { - server.command('system.$cmd', { ismaster: true }, function(err, result) { - expect(err).to.be.null; - expect(result).to.exist; - }); - } - - server.command('system.$cmd', { ismaster: true }, function(err, result) { - expect(err).to.be.null; - expect(result).to.exist; - - setTimeout(function() { - expect(server.s.pool.availableConnections.length).to.be.above(0); - expect(server.s.pool.inUseConnections.length).to.equal(0); - expect(server.s.pool.connectingConnections).to.equal(0); - - server.destroy(); - done(); - }, 1000); - }); - }); - - // Start connection - server.connect(); - } - }); - it('should not overflow the poolSize due to concurrent operations', { metadata: { requires: { @@ -931,55 +724,6 @@ describe('Server tests', function() { } ); - it( - 'should correctly connect server to single instance and execute insert with snappy compression', - { - metadata: { requires: { topology: ['single'], mongodb: '>=3.5.x' } }, - - test: function(done) { - const config = this.configuration; - if (config.usingUnifiedTopology()) { - // Disabled for inspection of properties only relevant to legacy topology - return this.skip(); - } - - var server = config.newTopology(this.configuration.host, this.configuration.port, { - bson: new Bson(), - compression: { - compressors: ['snappy', 'zlib'] - } - }); - - // Add event listeners - server.on('connect', function() { - // Check compression has been negotiated - expect(server.s.pool.options.agreedCompressor).to.equal('snappy'); - - server.insert('integration_tests.inserts', { a: 1 }, function(insertOneErr, insertOneR) { - expect(insertOneErr).to.be.null; - expect(insertOneR.result.n).to.equal(1); - expect(insertOneR.message.fromCompressed).to.be.true; - - server.insert('integration_tests.inserts', { a: 2 }, { ordered: false }, function( - err, - r - ) { - expect(err).to.be.null; - expect(r.result.n).to.equal(1); - expect(r.message.fromCompressed).to.be.true; - - server.destroy(); - done(); - }); - }); - }); - - // Start connection - server.connect(); - } - } - ); - describe('Unsupported wire protocols', function() { let server; beforeEach(() => mock.createServer().then(_server => (server = _server))); diff --git a/test/functional/sharding_failover.test.js b/test/disabled/sharding_failover.test.js similarity index 100% rename from test/functional/sharding_failover.test.js rename to test/disabled/sharding_failover.test.js diff --git a/test/functional/sharding_read_preference.test.js b/test/disabled/sharding_read_preference.test.js similarity index 100% rename from test/functional/sharding_read_preference.test.js rename to test/disabled/sharding_read_preference.test.js diff --git a/test/unit/core/single/sessions.test.js b/test/disabled/single/sessions.test.js similarity index 100% rename from test/unit/core/single/sessions.test.js rename to test/disabled/single/sessions.test.js diff --git a/test/functional/core/single_mocks/compression.test.js b/test/disabled/single_mocks/compression.test.js similarity index 100% rename from test/functional/core/single_mocks/compression.test.js rename to test/disabled/single_mocks/compression.test.js diff --git a/test/functional/sni.test.js b/test/disabled/sni.test.js similarity index 100% rename from test/functional/sni.test.js rename to test/disabled/sni.test.js diff --git a/test/functional/ssl_mongoclient.test.js b/test/disabled/ssl_mongoclient.test.js similarity index 100% rename from test/functional/ssl_mongoclient.test.js rename to test/disabled/ssl_mongoclient.test.js diff --git a/test/functional/ssl_validation.test.js b/test/disabled/ssl_validation.test.js similarity index 100% rename from test/functional/ssl_validation.test.js rename to test/disabled/ssl_validation.test.js diff --git a/test/unit/core/wire_protocol_test.js.test.js b/test/disabled/wire_protocol.test.js similarity index 84% rename from test/unit/core/wire_protocol_test.js.test.js rename to test/disabled/wire_protocol.test.js index 0a0cca1d570..c38cf78efb9 100644 --- a/test/unit/core/wire_protocol_test.js.test.js +++ b/test/disabled/wire_protocol.test.js @@ -4,8 +4,8 @@ const chai = require('chai'); const expect = chai.expect; const bson = require('bson'); const sinon = require('sinon'); -const Pool = require('../../../lib/core/connection/pool.js'); -const wireProtocol = require('../../../lib/core/wireprotocol'); +const { ConnectionPool } = require('../../../lib/cmap/connection_pool.js'); +const wireProtocol = require('../../../lib/cmap/wire_protocol'); describe('WireProtocol', function() { it('should only set bypassDocumentValidation to true if explicitly set by user to true', function() { @@ -17,7 +17,7 @@ describe('WireProtocol', function() { }); function testPoolWrite(bypassDocumentValidation, expected) { - const pool = sinon.createStubInstance(Pool); + const pool = sinon.createStubInstance(ConnectionPool); const fakeServer = { s: { pool, bson } }; const ns = 'fake.namespace'; const ops = [{ a: 1 }, { b: 2 }]; diff --git a/test/examples/transactions.js b/test/examples/transactions.js index 846d8197599..0376d981f31 100644 --- a/test/examples/transactions.js +++ b/test/examples/transactions.js @@ -42,7 +42,7 @@ describe('examples(transactions):', function() { console.log('Transaction aborted. Caught exception during transaction.'); // If transient error, retry the whole transaction - if (error.errorLabels && error.errorLabels.indexOf('TransientTransactionError') >= 0) { + if (error.hasErrorLabel('TransientTransactionError')) { console.log('TransientTransactionError, retrying transaction ...'); await runTransactionWithRetry(txnFunc, client, session); } else { @@ -98,10 +98,7 @@ describe('examples(transactions):', function() { await session.commitTransaction(); console.log('Transaction committed.'); } catch (error) { - if ( - error.errorLabels && - error.errorLabels.indexOf('UnknownTransactionCommitResult') >= 0 - ) { + if (error.hasErrorLabel('UnknownTransactionCommitResult')) { console.log('UnknownTransactionCommitResult, retrying commit operation ...'); await commitWithRetry(session); } else { @@ -156,10 +153,7 @@ describe('examples(transactions):', function() { await session.commitTransaction(); console.log('Transaction committed.'); } catch (error) { - if ( - error.errorLabels && - error.errorLabels.indexOf('UnknownTransactionCommitResult') >= 0 - ) { + if (error.hasErrorLabel('UnknownTransactionCommitResult')) { console.log('UnknownTransactionCommitResult, retrying commit operation ...'); await commitWithRetry(session); } else { @@ -176,7 +170,7 @@ describe('examples(transactions):', function() { console.log('Transaction aborted. Caught exception during transaction.'); // If transient error, retry the whole transaction - if (error.errorLabels && error.errorLabels.indexOf('TransientTransactionError') >= 0) { + if (error.hasErrorLabel('TransientTransactionError')) { console.log('TransientTransactionError, retrying transaction ...'); await runTransactionWithRetry(txnFunc, client, session); } else { diff --git a/test/functional/aggregation.test.js b/test/functional/aggregation.test.js index 1ab06ad801c..bfde7e026ff 100644 --- a/test/functional/aggregation.test.js +++ b/test/functional/aggregation.test.js @@ -53,6 +53,7 @@ describe('Aggregation', function() { var collection = db.collection('shouldCorrectlyExecuteSimpleAggregationPipelineUsingArray'); // Insert the docs collection.insert(docs, { w: 1 }, function(err, result) { + if (err) console.dir({ err }); expect(result).to.exist; expect(err).to.be.null; diff --git a/test/functional/apm.test.js b/test/functional/apm.test.js index 3f229b751af..665f56fb31e 100644 --- a/test/functional/apm.test.js +++ b/test/functional/apm.test.js @@ -1,14 +1,15 @@ 'use strict'; const instrument = require('../..').instrument; -const shared = require('./shared'); -const setupDatabase = shared.setupDatabase; -const filterForCommands = shared.filterForCommands; -const filterOutCommands = shared.filterOutCommands; -const ignoreNsNotFound = shared.ignoreNsNotFound; -const loadSpecTests = require('../spec').loadSpecTests; -const chai = require('chai'); -const expect = chai.expect; +const { + setupDatabase, + filterForCommands, + filterOutCommands, + ignoreNsNotFound +} = require('./shared'); +const { loadSpecTests } = require('../spec'); +const { expect } = require('chai'); +const ReadPreference = require('../../lib/read_preference'); describe('APM', function() { before(function() { @@ -197,7 +198,6 @@ describe('APM', function() { // The actual test we wish to run test: function() { const self = this; - const ReadPreference = self.configuration.require.ReadPreference; const started = []; const succeeded = []; const client = self.configuration.newClient( @@ -223,15 +223,9 @@ describe('APM', function() { ) .then(() => { expect(started).to.have.lengthOf(2); - - if (self.configuration.usingUnifiedTopology()) { - expect(started[0]) - .property('address') - .to.not.equal(started[1].address); - } else { - // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); - } + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); return client.close(); }); @@ -245,7 +239,6 @@ describe('APM', function() { // The actual test we wish to run test: function() { const self = this; - const ReadPreference = self.configuration.require.ReadPreference; const started = []; const succeeded = []; const client = self.configuration.newClient( @@ -279,15 +272,9 @@ describe('APM', function() { ) .then(() => { expect(started).to.have.lengthOf(2); - - // Ensure command was not sent to the primary - if (self.configuration.usingUnifiedTopology()) { - expect(started[0]) - .property('address') - .to.not.equal(started[1].address); - } else { - expect(started[0].connectionId).to.not.equal(started[1].connectionId); - } + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); return client.close(); }); @@ -371,7 +358,6 @@ describe('APM', function() { // The actual test we wish to run test: function() { const self = this; - const ReadPreference = self.configuration.require.ReadPreference; const started = []; const succeeded = []; const failed = []; @@ -446,7 +432,6 @@ describe('APM', function() { // The actual test we wish to run test: function() { const self = this; - const ReadPreference = self.configuration.require.ReadPreference; const started = []; const succeeded = []; const failed = []; diff --git a/test/functional/buffering_proxy.test.js b/test/functional/buffering_proxy.test.js index 933f3653d0a..fd4ef67803b 100644 --- a/test/functional/buffering_proxy.test.js +++ b/test/functional/buffering_proxy.test.js @@ -2,6 +2,7 @@ var test = require('./shared').assert; var co = require('co'); var mock = require('mongodb-mock-server'); +const { ReadPreference, ObjectId } = require('../..'); var extend = function(template, fields) { var object = {}; @@ -28,10 +29,7 @@ describe.skip('Buffering Proxy', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId, - ReadPreference = configuration.require.ReadPreference; - + var configuration = this.configuration; var currentIsMasterIndex = 0; var electionIds = [new ObjectId(0), new ObjectId(1)]; @@ -249,10 +247,7 @@ describe.skip('Buffering Proxy', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId, - ReadPreference = configuration.require.ReadPreference; - + var configuration = this.configuration; var currentIsMasterIndex = 0; var electionIds = [new ObjectId(0), new ObjectId(1)]; diff --git a/test/functional/change_stream.test.js b/test/functional/change_stream.test.js index 61b823e7b59..8a08667ba5f 100644 --- a/test/functional/change_stream.test.js +++ b/test/functional/change_stream.test.js @@ -1,15 +1,14 @@ 'use strict'; -var assert = require('assert'); -var Transform = require('stream').Transform; -const MongoError = require('../../lib/core').MongoError; -var MongoNetworkError = require('../../lib/core').MongoNetworkError; -var setupDatabase = require('./shared').setupDatabase; -var delay = require('./shared').delay; -var co = require('co'); -var mock = require('mongodb-mock-server'); +const assert = require('assert'); +const { Transform } = require('stream'); +const { MongoError, MongoNetworkError } = require('../../lib/error'); +const { setupDatabase, delay } = require('./shared'); +const co = require('co'); +const mock = require('mongodb-mock-server'); const chai = require('chai'); const expect = chai.expect; const sinon = require('sinon'); +const { ObjectId, Timestamp, Long, ReadPreference } = require('../..'); chai.use(require('chai-subset')); @@ -709,7 +708,6 @@ describe('Change Streams', function() { test: function(done) { var configuration = this.configuration; - const ObjectId = configuration.require.ObjectId; // Contain mock server var primaryServer = null; @@ -806,7 +804,6 @@ describe('Change Streams', function() { }, test: function(done) { var configuration = this.configuration; - const ObjectId = configuration.require.ObjectId; // Contain mock server var primaryServer = null; @@ -901,9 +898,6 @@ describe('Change Streams', function() { }, test: function(done) { var configuration = this.configuration; - const ObjectId = configuration.require.ObjectId; - const Timestamp = configuration.require.Timestamp; - const Long = configuration.require.Long; // Contain mock server var primaryServer = null; @@ -1249,7 +1243,6 @@ describe('Change Streams', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var ReadPreference = configuration.require.ReadPreference; const client = configuration.newClient(); return client.connect().then(client => { @@ -1347,9 +1340,6 @@ describe('Change Streams', function() { }, test: function(done) { var configuration = this.configuration; - const ObjectId = configuration.require.ObjectId; - const Timestamp = configuration.require.Timestamp; - const Long = configuration.require.Long; // Contain mock server var primaryServer = null; @@ -1636,9 +1626,6 @@ describe('Change Streams', function() { metadata: { requires: { topology: 'replicaset', mongodb: '>=3.7.3' } }, test: function(done) { const configuration = this.configuration; - const ObjectId = configuration.require.ObjectId; - const Timestamp = configuration.require.Timestamp; - const Long = configuration.require.Long; const OPERATION_TIME = new Timestamp(4, 1501511802); @@ -2047,7 +2034,7 @@ describe('Change Streams', function() { this.collection = 'test_coll'; this.ns = `${this.database}.${this.collection}`; this._timestampCounter = 0; - this.cursorId = new this.config.require.Long('9064341847921713401'); + this.cursorId = new Long('9064341847921713401'); this.commandIterators = commandIterators; this.promise = this.init(); } @@ -2174,7 +2161,7 @@ describe('Change Streams', function() { // Helpers timestamp() { - return new this.config.require.Timestamp(this._timestampCounter++, Date.now()); + return new Timestamp(this._timestampCounter++, Date.now()); } applyOpTime(obj) { @@ -2223,7 +2210,7 @@ describe('Change Streams', function() { return { ts: this.timestamp(), ns: this.namespace, - _id: new this.config.require.ObjectId() + _id: new ObjectId() }; } } diff --git a/test/functional/client_side_encryption/corpus.test.js b/test/functional/client_side_encryption/corpus.test.js index 46e65597c83..e4d075635d2 100644 --- a/test/functional/client_side_encryption/corpus.test.js +++ b/test/functional/client_side_encryption/corpus.test.js @@ -128,10 +128,7 @@ describe('Client Side Encryption Corpus', function() { before(function() { // 1. Create a MongoClient without encryption enabled (referred to as ``client``). - client = this.configuration.newClient({ - useNewUrlParser: true, - useUnifiedTopology: true - }); + client = this.configuration.newClient(); return Promise.resolve() .then(() => client.connect()) @@ -192,14 +189,7 @@ describe('Client Side Encryption Corpus', function() { [dataNamespace]: corpusSchema }; } - clientEncrypted = this.configuration.newClient( - {}, - { - useNewUrlParser: true, - useUnifiedTopology: true, - autoEncryption - } - ); + clientEncrypted = this.configuration.newClient({}, { autoEncryption }); return clientEncrypted.connect().then(() => { clientEncryption = new mongodbClientEncryption.ClientEncryption(client, { diff --git a/test/functional/client_side_encryption/driver.test.js b/test/functional/client_side_encryption/driver.test.js index ff36cd1a415..dd30606da28 100644 --- a/test/functional/client_side_encryption/driver.test.js +++ b/test/functional/client_side_encryption/driver.test.js @@ -26,7 +26,7 @@ describe('Client Side Encryption Functional', function() { describe('BSON Options', function() { beforeEach(function() { - this.client = this.configuration.newClient({}, { useUnifiedTopology: true }); + this.client = this.configuration.newClient(); const noop = () => {}; function encryptSchema(keyId, bsonType) { @@ -77,7 +77,6 @@ describe('Client Side Encryption Functional', function() { this.encryptedClient = this.configuration.newClient( {}, { - useUnifiedTopology: true, autoEncryption: { keyVaultNamespace, kmsProviders diff --git a/test/functional/client_side_encryption/prose.test.js b/test/functional/client_side_encryption/prose.test.js index 5c5abfd67f9..6907526fd75 100644 --- a/test/functional/client_side_encryption/prose.test.js +++ b/test/functional/client_side_encryption/prose.test.js @@ -38,10 +38,7 @@ describe('Client Side Encryption Prose Tests', function() { const mongodbClientEncryption = this.configuration.mongodbClientEncryption; // #. Create a MongoClient without encryption enabled (referred to as ``client``). Enable command monitoring to listen for command_started events. - this.client = this.configuration.newClient( - {}, - { useNewUrlParser: true, useUnifiedTopology: true, monitorCommands: true } - ); + this.client = this.configuration.newClient({}, { monitorCommands: true }); this.commandStartedEvents = new EventCollector(this.client, 'commandStarted', { exclude: ['ismaster'] @@ -105,8 +102,6 @@ describe('Client Side Encryption Prose Tests', function() { this.clientEncrypted = this.configuration.newClient( {}, { - useNewUrlParser: true, - useUnifiedTopology: true, autoEncryption: { keyVaultNamespace, kmsProviders: this.configuration.kmsProviders(null, localKey), @@ -316,10 +311,7 @@ describe('Client Side Encryption Prose Tests', function() { // "aws": { } // } // Configure with ``keyVaultNamespace`` set to ``admin.datakeys``, and a default MongoClient as the ``keyVaultClient``. - this.client = this.configuration.newClient( - {}, - { useNewUrlParser: true, useUnifiedTopology: true } - ); + this.client = this.configuration.newClient(); return this.client.connect().then(() => { const mongodbClientEncryption = this.configuration.mongodbClientEncryption; @@ -466,10 +458,7 @@ describe('Client Side Encryption Prose Tests', function() { // First, perform the setup. // #. Create a MongoClient without encryption enabled (referred to as ``client``). - this.client = this.configuration.newClient( - {}, - { useNewUrlParser: true, useUnifiedTopology: true } - ); + this.client = this.configuration.newClient(); return ( this.client @@ -501,8 +490,6 @@ describe('Client Side Encryption Prose Tests', function() { this.clientEncrypted = this.configuration.newClient( {}, { - useNewUrlParser: true, - useUnifiedTopology: true, monitorCommands: true, autoEncryption: { keyVaultNamespace, @@ -659,10 +646,7 @@ describe('Client Side Encryption Prose Tests', function() { // First, perform the setup. // #. Create a MongoClient without encryption enabled (referred to as ``client``). - this.client = this.configuration.newClient( - {}, - { useNewUrlParser: true, useUnifiedTopology: true } - ); + this.client = this.configuration.newClient(); return this.client .connect() @@ -685,8 +669,6 @@ describe('Client Side Encryption Prose Tests', function() { this.clientEncrypted = this.configuration.newClient( {}, { - useNewUrlParser: true, - useUnifiedTopology: true, autoEncryption: { keyVaultNamespace, kmsProviders: this.configuration.kmsProviders(null, localKey) @@ -737,10 +719,7 @@ describe('Client Side Encryption Prose Tests', function() { const externalSchema = loadExternal('external-schema.json'); beforeEach(function() { - this.client = this.configuration.newClient( - {}, - { useNewUrlParser: true, useUnifiedTopology: true } - ); + this.client = this.configuration.newClient(); // #. Create a MongoClient without encryption enabled (referred to as ``client``). return ( @@ -785,7 +764,7 @@ describe('Client Side Encryption Prose Tests', function() { // this.configuration.url('fake-user', 'fake-pwd'), // TODO: Do this properly {}, - { useNewUrlParser: true, useUnifiedTopology: true, monitorCommands: true } + { monitorCommands: true } ); this.commandStartedEvents = new EventCollector( @@ -822,8 +801,6 @@ describe('Client Side Encryption Prose Tests', function() { this.clientEncrypted = this.configuration.newClient( {}, { - useNewUrlParser: true, - useUnifiedTopology: true, autoEncryption: Object.assign({}, options, { schemaMap: { 'db.coll': externalSchema diff --git a/test/functional/cmap/connection.test.js b/test/functional/cmap/connection.test.js index 121fa94ec5a..fb7f7390e0e 100644 --- a/test/functional/cmap/connection.test.js +++ b/test/functional/cmap/connection.test.js @@ -1,7 +1,7 @@ 'use strict'; const Connection = require('../../../lib/cmap/connection').Connection; -const connect = require('../../../lib/core/connection/connect'); +const connect = require('../../../lib/cmap/connect'); const expect = require('chai').expect; const BSON = require('bson'); const setupDatabase = require('../../functional/shared').setupDatabase; diff --git a/test/functional/collations.test.js b/test/functional/collations.test.js index 3c34cab8698..06e7f1c5a6f 100644 --- a/test/functional/collations.test.js +++ b/test/functional/collations.test.js @@ -2,6 +2,7 @@ const setupDatabase = require('./shared').setupDatabase; const mock = require('mongodb-mock-server'); const expect = require('chai').expect; +const { Long, Code } = require('../..'); const testContext = {}; describe('Collation', function() { @@ -209,12 +210,11 @@ describe('Collation', function() { } }); - it('Successfully pass through collation to mapreduce command', { + it('Successfully pass through collation to mapReduce command', { metadata: { requires: { generators: true, topology: 'single' } }, test: function() { const configuration = this.configuration; - const Code = configuration.require.Code; const client = configuration.newClient(`mongodb://${testContext.server.uri()}/test`); const primary = [Object.assign({}, mock.DEFAULT_ISMASTER)]; @@ -223,7 +223,7 @@ describe('Collation', function() { var doc = request.document; if (doc.ismaster) { request.reply(primary[0]); - } else if (doc.mapreduce) { + } else if (doc.mapReduce) { commandResult = doc; request.reply({ ok: 1, result: 'tempCollection' }); } else if (doc.endSessions) { @@ -440,7 +440,6 @@ describe('Collation', function() { metadata: { requires: { generators: true, topology: 'single' } }, test: function() { const configuration = this.configuration; - const Long = configuration.require.Long; const client = configuration.newClient(`mongodb://${testContext.server.uri()}/test`); const primary = [Object.assign({}, mock.DEFAULT_ISMASTER)]; diff --git a/test/functional/collection.test.js b/test/functional/collection.test.js index 0a4349fc3d2..81bb7d94078 100644 --- a/test/functional/collection.test.js +++ b/test/functional/collection.test.js @@ -4,6 +4,7 @@ const chai = require('chai'); const expect = chai.expect; const sinonChai = require('sinon-chai'); const mock = require('mongodb-mock-server'); +const { Long, ObjectID } = require('../..'); chai.use(sinonChai); describe('Collection', function() { @@ -419,7 +420,6 @@ describe('Collection', function() { * @ignore */ it('should correctly save document with Long value', function(done) { - const Long = configuration.require.Long; db.createCollection('test_save_long', (err, collection) => { collection.insertOne( { x: Long.fromNumber(9223372036854775807) }, @@ -550,7 +550,6 @@ describe('Collection', function() { * @ignore */ it('should correctly save document with nested array', function(done) { - const ObjectID = configuration.require.ObjectID; db.createCollection('save_error_on_save_test', (err, collection) => { // Create unique index for username collection.createIndex([['username', 1]], configuration.writeConcernMax(), err => { diff --git a/test/functional/command_write_concern.test.js b/test/functional/command_write_concern.test.js index 7d2f6e238b9..ced094c2bcb 100644 --- a/test/functional/command_write_concern.test.js +++ b/test/functional/command_write_concern.test.js @@ -2,6 +2,7 @@ var test = require('./shared').assert; var co = require('co'); var mock = require('mongodb-mock-server'); +const { ObjectId, Long, Code } = require('../..'); // Extend the object var extend = function(template, fields) { @@ -29,9 +30,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -144,10 +143,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId, - Long = configuration.require.Long; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -267,9 +263,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -388,9 +382,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -506,9 +498,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -624,9 +614,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -733,7 +721,7 @@ describe('Command Write Concern', function() { } }); - it('successfully pass through writeConcern to dropIndexes command', { + it('successfully pass through writeConcern to mapReduce command', { metadata: { requires: { generators: true, @@ -742,10 +730,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId, - Code = configuration.require.Code; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -798,7 +783,7 @@ describe('Command Write Concern', function() { var doc = request.document; if (doc.ismaster) { request.reply(primary[0]); - } else if (doc.mapreduce) { + } else if (doc.mapReduce) { commandResult = doc; request.reply({ ok: 1, result: 'tempCollection' }); } else if (doc.endSessions) { @@ -869,9 +854,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -981,9 +964,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', @@ -1093,9 +1074,7 @@ describe('Command Write Concern', function() { }, test: function(done) { - var configuration = this.configuration, - ObjectId = configuration.require.ObjectId; - + var configuration = this.configuration; var electionIds = [new ObjectId(), new ObjectId()]; var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, { setName: 'rs', diff --git a/test/functional/connection.test.js b/test/functional/connection.test.js index 3b6084275c3..ccc73efc684 100644 --- a/test/functional/connection.test.js +++ b/test/functional/connection.test.js @@ -32,34 +32,6 @@ describe('Connection', function() { } }); - /** - * @ignore - */ - it('should correctly disable monitoring for single server connection', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - var client = configuration.newClient( - { w: 1 }, - { poolSize: 1, host: '/tmp/mongodb-27017.sock', monitoring: false } - ); - - client.connect(function(err, client) { - test.equal(null, err); - test.equal(false, client.topology.s.coreTopology.s.monitoring); - - client.close(done); - }); - } - }); - /** * @ignore */ @@ -191,32 +163,6 @@ describe('Connection', function() { } }); - /** - * @ignore - */ - it('should fail to connect using non-domain socket with undefined port', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration, - Server = configuration.require.Server, - MongoClient = configuration.require.MongoClient; - - var error; - try { - var client = new MongoClient(new Server('localhost', undefined), { w: 0 }); - client.connect(function() {}); - } catch (err) { - error = err; - } - - test.ok(error instanceof Error); - test.ok(/port must be specified/.test(error)); - done(); - } - }); - /** * @ignore */ @@ -260,67 +206,6 @@ describe('Connection', function() { } }); - /** - * @ignore - */ - it('test connect server options', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient(configuration.url(), { - auto_reconnect: true, - poolSize: 4 - }); - - client.connect( - connectionTester(configuration, 'testConnectServerOptions', function(client) { - test.ok(client.topology.poolSize >= 1); - test.equal(4, client.topology.s.coreTopology.s.pool.size); - test.equal(true, client.topology.autoReconnect); - client.close(done); - }) - ); - } - }); - - /** - * @ignore - */ - it('testConnectAllOptions', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient(configuration.url(), { - auto_reconnect: true, - poolSize: 4, - native_parser: process.env['TEST_NATIVE'] != null - }); - - client.connect( - connectionTester(configuration, 'testConnectAllOptions', function(client) { - test.ok(client.topology.poolSize >= 1); - test.equal(4, client.topology.s.coreTopology.s.pool.size); - test.equal(true, client.topology.autoReconnect); - client.close(done); - }) - ); - } - }); - /** * @ignore */ @@ -454,67 +339,4 @@ describe('Connection', function() { done(); } }); - - /** - * @ignore - */ - it('should correctly reconnect and finish query operation', { - metadata: { requires: { topology: 'single', unifiedTopology: false } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The unified topology deprecates autoReconnect, this test depends on the `reconnect` event - return this.skip(); - } - - var client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: true }); - client.connect(function(err, client) { - var db = client.db(configuration.db); - test.equal(null, err); - - db.collection('test_reconnect').insert({ a: 1 }, function(err) { - test.equal(null, err); - // Signal db reconnect - var dbReconnect = 0; - var dbClose = 0; - - db.on('reconnect', function() { - ++dbReconnect; - }); - - db.on('close', function() { - ++dbClose; - }); - - client.topology.once('reconnect', function() { - // Await reconnect and re-authentication - db.collection('test_reconnect').findOne(function(err, doc) { - test.equal(null, err); - test.equal(1, doc.a); - test.equal(1, dbReconnect); - test.equal(1, dbClose); - - // Attempt disconnect again - client.topology.connections()[0].destroy(); - - // Await reconnect and re-authentication - db.collection('test_reconnect').findOne(function(err, doc) { - test.equal(null, err); - test.equal(1, doc.a); - test.equal(2, dbReconnect); - test.equal(2, dbClose); - - client.close(done); - }); - }); - }); - - // Force close - client.topology.connections()[0].destroy(); - }); - }); - } - }); }); diff --git a/test/functional/connection_string_spec.test.js b/test/functional/connection_string_spec.test.js deleted file mode 100644 index 2a8fb961aa3..00000000000 --- a/test/functional/connection_string_spec.test.js +++ /dev/null @@ -1,32 +0,0 @@ -'use strict'; - -const parse = require('../../lib/url_parser'); -const expect = require('chai').expect; -const loadSpecTests = require('../spec').loadSpecTests; - -describe('Connection String (spec)', function() { - loadSpecTests('connection-string').forEach(suite => { - describe(suite.name, function() { - suite.tests.forEach(test => { - it(test.description, { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const valid = test.valid; - - parse(test.uri, {}, function(err, result) { - if (valid === false) { - expect(err).to.exist; - expect(result).to.not.exist; - } else { - expect(err).to.not.exist; - expect(result).to.exist; - } - - done(); - }); - } - }); - }); - }); - }); -}); diff --git a/test/functional/connections_stepdown.test.js b/test/functional/connections_stepdown.test.js index d63ecada687..ddd65bc8def 100644 --- a/test/functional/connections_stepdown.test.js +++ b/test/functional/connections_stepdown.test.js @@ -30,10 +30,7 @@ describe('Connections survive primary step down', function() { let collection; beforeEach(function() { - client = this.configuration.newClient( - { w: 1 }, - { poolSize: 1, retryWrites: false, useUnifiedTopology: true } - ); + client = this.configuration.newClient({ w: 1 }, { poolSize: 1, retryWrites: false }); return client .connect() diff --git a/test/functional/core/basic_replset_server_auth.test.js b/test/functional/core/basic_replset_server_auth.test.js deleted file mode 100644 index 17846ced651..00000000000 --- a/test/functional/core/basic_replset_server_auth.test.js +++ /dev/null @@ -1,491 +0,0 @@ -'use strict'; - -var expect = require('chai').expect, - f = require('util').format, - locateAuthMethod = require('./shared').locateAuthMethod, - executeCommand = require('./shared').executeCommand, - ReplSet = require('../../../lib/core/topologies/replset'), - Connection = require('../../../lib/core/connection/connection'); - -const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; - -var setUp = function(configuration, options, callback) { - var ReplSetManager = require('mongodb-topology-manager').ReplSet; - - // Check if we have any options - if (typeof options === 'function') { - callback = options; - options = null; - } - - // Override options - var rsOptions; - if (options) { - rsOptions = options; - } else { - rsOptions = { - server: { - keyFile: __dirname + '/key/keyfile.key', - auth: null, - replSet: 'rs' - }, - client: { replSet: 'rs' } - }; - } - - // Set up the nodes - var nodes = [ - { - options: { - bind_ip: 'localhost', - port: 31000, - dbpath: f('%s/../db/31000', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31001, - dbpath: f('%s/../db/31001', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31002, - dbpath: f('%s/../db/31002', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31003, - dbpath: f('%s/../db/31003', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31004, - dbpath: f('%s/../db/31004', __dirname) - } - } - ]; - - // Merge in any node start up options - for (var i = 0; i < nodes.length; i++) { - for (var name in rsOptions.server) { - nodes[i].options[name] = rsOptions.server[name]; - } - } - - // Create a manager - var replicasetManager = new ReplSetManager('mongod', nodes, rsOptions.client); - // Purge the set - replicasetManager.purge().then(function() { - // Start the server - replicasetManager - .start() - .then(function() { - setTimeout(function() { - callback(null, replicasetManager); - }, 10000); - }) - .catch(function(e) { - console.dir(e); - }); - }); -}; - -describe.skip('Basic replica set server auth tests', function() { - it('should fail to authenticat emitting an error due to it being the initial connect', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - // Get right auth method - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - }, - { - host: 'localhost', - port: 31001 - } - ], - { - setName: 'rs' - } - ); - - server.on('error', function() { - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserErr).to.not.exist; - expect(dropUserRes).to.exist; - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - - server.connect({ credentials }); - } - ); - }); - }); - } - }); - - it('should correctly authenticate server using scram-sha-1 using connect auth', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - }, - { - host: 'localhost', - port: 31001 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(err).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - - server.connect({ credentials }); - } - ); - }); - }); - } - }); - - it('should correctly authenticate using auth method instead of connect', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - //{auth: [method, 'admin', 'root', 'root']} - // Attempt authentication - _server.auth(credentials, function(authErr, authRes) { - expect(authRes).to.exist; - expect(authErr).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(insertErr).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - }); - - server.connect(); - } - ); - }); - }); - } - }); - - it('should correctly authenticate using auth method instead of connect and logout user', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // console.log('------------------------------ -2') - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - // Attempt authentication - _server.auth(credentials, function(authErr, authRes) { - expect(authErr).to.exist; - expect(authRes).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(insertErr).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - _server.logout('admin', function(logoutErr, logoutRes) { - expect(logoutRes).to.exist; - expect(logoutErr).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function( - secondInsertErr, - secondInsertRes - ) { - if (secondInsertRes) console.dir(secondInsertRes.result); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - }); - }); - }); - - server.connect(); - } - ); - }); - }); - } - }); -}); diff --git a/test/functional/core/client_metadata.test.js b/test/functional/core/client_metadata.test.js deleted file mode 100644 index 62089f36f50..00000000000 --- a/test/functional/core/client_metadata.test.js +++ /dev/null @@ -1,98 +0,0 @@ -'use strict'; - -const expect = require('chai').expect; - -const core = require('../../../lib/core'); -const BSON = core.BSON; -const Mongos = core.Mongos; -const ReplSet = core.ReplSet; - -describe('Client metadata tests', function() { - it('should correctly pass the configuration settings to server', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Attempt to connect - var server = this.configuration.newTopology( - this.configuration.host, - this.configuration.port, - { - bson: new BSON(), - appname: 'My application name' - } - ); - - expect(server.clientMetadata.application.name).to.equal('My application name'); - done(); - } - }); - - // Skipped due to use of topology manager - it.skip('should correctly pass the configuration settings to replset', { - metadata: { requires: { topology: 'replicaset' } }, - - test: function(done) { - const self = this; - const manager = this.configuration.manager; - - // Get the primary server - manager.primary().then(function(_manager) { - // Attempt to connect - var server = new ReplSet( - [ - { - host: _manager.host, - port: _manager.port - } - ], - { - setName: self.configuration.setName, - appname: 'My application name' - } - ); - - server.on('connect', function(_server) { - _server.s.replicaSetState.allServers().forEach(function(x) { - expect(x.clientMetadata.application.name).to.equal('My application name'); - expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); - }); - - _server.destroy(done); - }); - - server.connect(); - }); - } - }); - - it('should correctly pass the configuration settings to mongos', { - metadata: { requires: { topology: 'sharded' } }, - - test: function(done) { - // Attempt to connect - var _server = new Mongos( - [ - { - host: 'localhost', - port: 51000 - } - ], - { - appname: 'My application name' - } - ); - - // Add event listeners - _server.once('connect', function(server) { - server.connectedProxies.forEach(function(x) { - expect(x.clientMetadata.application.name).to.equal('My application name'); - expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); - }); - - server.destroy(done); - }); - - _server.connect(); - } - }); -}); diff --git a/test/functional/core/cursor.test.js b/test/functional/core/cursor.test.js index 3104607431e..32307b09c45 100644 --- a/test/functional/core/cursor.test.js +++ b/test/functional/core/cursor.test.js @@ -336,132 +336,6 @@ describe('Cursor tests', function() { } }); - it('Should finish cursor correctly after all sockets to pool destroyed', { - metadata: { requires: { topology: ['single'] } }, - test: function(done) { - if (this.configuration.usingUnifiedTopology()) { - // This test tries to inspect the connection pool directly on the topology, which - // will no longer work with the new Topology type. The test should be reworked. - return this.skip(); - } - - const server = this.configuration.newTopology(); - var ns = f('%s.cursor6', this.configuration.db); - // Add event listeners - server.on('connect', function(_server) { - // Execute the write - _server.insert( - ns, - [{ a: 1 }, { a: 2 }, { a: 3 }], - { - writeConcern: { w: 1 }, - ordered: true - }, - function(err, results) { - expect(err).to.be.null; - expect(results.result.n).to.equal(3); - - // Execute find - var cursor = _server.cursor(ns, { find: ns, query: {}, batchSize: 2 }); - - // Execute next - cursor._next(function(nextCursorErr, nextCursorD) { - expect(nextCursorErr).to.be.null; - expect(nextCursorD.a).to.equal(1); - - // Get the next item - cursor._next(function(secondCursorErr, secondCursorD) { - expect(secondCursorErr).to.be.null; - expect(secondCursorD.a).to.equal(2); - - // Should be able to continue cursor after reconnect - _server.once('reconnect', function() { - cursor._next(function(thirdCursorErr, thirdCursorD) { - expect(thirdCursorErr).to.be.null; - expect(thirdCursorD.a).to.equal(3); - - // Destroy the server connection - _server.destroy(done); - }); - }); - - // Destroy all active connections in the pool - var connections = _server.s.pool.allConnections(); - for (var i = 0; i < connections.length; i++) { - connections[i].write('!@#!@#SADASDSA!@#!@#!@#!@#!@'); - } - }); - }); - } - ); - }); - - // Start connection - server.connect(); - } - }); - - it('Should not leak connnection workItem elements when using killCursor', { - metadata: { - requires: { topology: ['single'] } - }, - - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // This test tries to inspect the connection pool directly on the topology, which - // will no longer work with the new Topology type. The test should be reworked. - return this.skip(); - } - - const server = this.configuration.newTopology(); - var ns = f('%s.cursor4', this.configuration.db); - // Add event listeners - server.on('connect', function(_server) { - // Execute the write - _server.insert( - ns, - [{ a: 1 }, { a: 2 }, { a: 3 }], - { - writeConcern: { w: 1 }, - ordered: true - }, - function(err, results) { - expect(err).to.be.null; - expect(results.result.n).to.equal(3); - - // Execute find - var cursor = _server.cursor(ns, { find: ns, query: {}, batchSize: 2 }); - - // Execute next - cursor._next(function(nextCursorErr, nextCursorD) { - expect(nextCursorErr).to.be.null; - expect(nextCursorD.a).to.equal(1); - - // Kill cursor - cursor.kill(function() { - // Add a small delay so that the work can be queued after the kill - // callback has executed - setImmediate(function() { - var connections = _server.s.pool.allConnections(); - for (var i = 0; i < connections.length; i++) { - expect(connections[i].workItems.length).to.equal(0); - } - - // Destroy the server connection - _server.destroy(done); - }, 100); - }); - }); - } - ); - }); - - // Start connection - server.connect(); - } - }); - // NOTE: a notoriously flakey test, needs rewriting // Commented out to stop before task from running and breaking auth tests // it.skip('should not hang if autoReconnect=false and pools sockets all timed out', { diff --git a/test/functional/core/error.test.js b/test/functional/core/error.test.js index b5409568f84..af6d03f2d65 100644 --- a/test/functional/core/error.test.js +++ b/test/functional/core/error.test.js @@ -1,7 +1,8 @@ 'use strict'; -var expect = require('chai').expect, - f = require('util').format; +const { expect } = require('chai'); +const { format: f } = require('util'); +const { MongoError, MongoNetworkError } = require('../../../lib/error'); describe('Error tests', function() { it('should return helpful error when geoHaystack fails', { @@ -38,8 +39,6 @@ describe('Error tests', function() { }, test: function(done) { - var MongoError = require('../../../lib/core/error.js').MongoError; - var errorMessage = 'A test error'; var err = new MongoError(errorMessage); expect(err).to.be.an.instanceof(Error); @@ -56,8 +55,6 @@ describe('Error tests', function() { }, test: function(done) { - var MongoError = require('../../../lib/core/error.js').MongoError; - var errorMessage = 'A test error'; var err = new MongoError(new Error(errorMessage)); expect(err).to.be.an.instanceof(Error); @@ -74,8 +71,6 @@ describe('Error tests', function() { }, test: function(done) { - var MongoError = require('../../../lib/core/error.js').MongoError; - var errorMessage = 'A test error'; var err = new MongoError({ message: errorMessage, someData: 12345 }); expect(err).to.be.an.instanceof(Error); @@ -93,12 +88,10 @@ describe('Error tests', function() { }, test: function(done) { - var errors = require('../../../lib/core/error'); - var errorMessage = 'A test error'; - var err = new errors.MongoNetworkError(errorMessage); + var err = new MongoNetworkError(errorMessage); expect(err).to.be.an.instanceof(Error); - expect(err).to.be.an.instanceof(errors.MongoError); + expect(err).to.be.an.instanceof(MongoError); expect(err.name).to.equal('MongoNetworkError'); expect(err.message).to.equal(errorMessage); diff --git a/test/functional/core/extend_cursor.test.js b/test/functional/core/extend_cursor.test.js index 5966dc0f096..5c63e0ab100 100644 --- a/test/functional/core/extend_cursor.test.js +++ b/test/functional/core/extend_cursor.test.js @@ -1,8 +1,8 @@ 'use strict'; -const expect = require('chai').expect; -const f = require('util').format; -const CoreCursor = require('../../../lib/core/cursor').CoreCursor; +const { expect } = require('chai'); +const { format: f } = require('util'); +const { CoreCursor } = require('../../../lib/cursor'); describe('Extend cursor tests', function() { it('should correctly extend the cursor with custom implementation', { diff --git a/test/functional/core/max_staleness.test.js b/test/functional/core/max_staleness.test.js deleted file mode 100644 index 2dbccd32533..00000000000 --- a/test/functional/core/max_staleness.test.js +++ /dev/null @@ -1,145 +0,0 @@ -'use strict'; - -const expect = require('chai').expect, - p = require('path'), - fs = require('fs'), - Server = require('../../../lib/core/topologies/server'), - ReplSetState = require('../../../lib/core/topologies/replset_state'), - MongoError = require('../../../lib/core/error').MongoError, - ReadPreference = require('../../../lib/core/topologies/read_preference'); - -const rsWithPrimaryPath = p.resolve(__dirname, '../../spec/max-staleness/ReplicaSetWithPrimary'); -const rsWithoutPrimaryPath = p.resolve(__dirname, '../../spec/max-staleness/ReplicaSetNoPrimary'); - -describe('Max Staleness', function() { - describe('ReplicaSet without primary', function() { - fs.readdirSync(rsWithoutPrimaryPath) - .filter(x => x.indexOf('.json') !== -1) - .forEach(x => { - it(p.basename(x, '.json'), function(done) { - executeEntry(`${rsWithoutPrimaryPath}/${x}`, done); - }); - }); - }); - - describe('ReplicaSet with primary', function() { - fs.readdirSync(rsWithPrimaryPath) - .filter(x => x.indexOf('.json') !== -1) - .filter(x => x.indexOf('LongHeartbeat2.jwson') === -1) - .forEach(x => { - it(p.basename(x, '.json'), function(done) { - executeEntry(`${rsWithPrimaryPath}/${x}`, done); - }); - }); - }); -}); - -function convert(mode) { - if (mode === undefined) return 'primary'; - if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; - if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; - return mode.toLowerCase(); -} - -function executeEntry(path, callback) { - // Read and parse the json file - var file = require(path); - - // Let's pick out the parts of the selection specification - var error = file.error; - var heartbeatFrequencyMS = file.heartbeatFrequencyMS || 10000; - var inLatencyWindow = file.in_latency_window; - var readPreference = file.read_preference; - var topologyDescription = file.topology_description; - - try { - // Create a Replset and populate it with dummy topology servers - var replset = new ReplSetState({ - heartbeatFrequencyMS: heartbeatFrequencyMS - }); - - replset.topologyType = topologyDescription.type; - // For each server add them to the state - topologyDescription.servers.forEach(function(s) { - var server = new Server({ - host: s.address.split(':')[0], - port: parseInt(s.address.split(':')[1], 10) - }); - - // Add additional information - if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; - if (s.lastUpdateTime) server.lastUpdateTime = s.lastUpdateTime; - // Set the last write - if (s.lastWrite) { - server.lastWriteDate = s.lastWrite.lastWriteDate.$numberLong; - } - - server.ismaster = {}; - if (s.tags) server.ismaster.tags = s.tags; - if (s.maxWireVersion) server.ismaster.maxWireVersion = s.maxWireVersion; - // Ensure the server looks connected - server.isConnected = function() { - return true; - }; - - if (s.type === 'RSSecondary') { - server.ismaster.secondary = true; - replset.secondaries.push(server); - } else if (s.type === 'RSPrimary') { - server.ismaster.ismaster = true; - replset.primary = server; - } else if (s.type === 'RSArbiter') { - server.ismaster.arbiterOnly = true; - replset.arbiters.push(server); - } - }); - - // Calculate staleness - replset.updateSecondariesMaxStaleness(heartbeatFrequencyMS); - - // Create read preference - var rp = new ReadPreference(convert(readPreference.mode), readPreference.tag_sets, { - maxStalenessSeconds: readPreference.maxStalenessSeconds - }); - - // Perform a pickServer - var server = replset.pickServer(rp); - var foundWindow = null; - - // We expect an error - if (error) { - expect(server).to.be.an.instanceof(MongoError); - return callback(null, null); - } - - // server should be in the latency window - for (var i = 0; i < inLatencyWindow.length; i++) { - var w = inLatencyWindow[i]; - - if (server.name === w.address) { - foundWindow = w; - break; - } - } - - if ( - ['ReplicaSetNoPrimary', 'Primary', 'ReplicaSetWithPrimary'].indexOf( - topologyDescription.type - ) !== -1 && - inLatencyWindow.length === 0 - ) { - if (server instanceof MongoError) { - expect(server.message).to.equal('maxStalenessSeconds must be set to at least 90 seconds'); - } else { - expect(server).to.be.null; - } - } else { - expect(foundWindow).to.not.be.null; - } - } catch (err) { - if (file.error) return callback(null, null); - return callback(err, null); - } - - callback(null, null); -} diff --git a/test/functional/core/mongos_server_selection.test.js b/test/functional/core/mongos_server_selection.test.js deleted file mode 100644 index db9ff6cbd4a..00000000000 --- a/test/functional/core/mongos_server_selection.test.js +++ /dev/null @@ -1,85 +0,0 @@ -'use strict'; - -const expect = require('chai').expect; -const path = require('path'); -const fs = require('fs'); -const Mongos = require('../../../lib/core/topologies/mongos'); -const ReadPreference = require('../../../lib/core/topologies/read_preference'); -const Server = require('../../../lib/core/topologies/server'); - -describe('Mongos server selection tests', function() { - var specPath = `${__dirname}/../../spec/server-selection/server_selection/Sharded/read`; - var entries = fs.readdirSync(specPath).filter(function(x) { - return x.indexOf('.json') !== -1; - }); - - entries.forEach(entry => { - it(path.basename(entry, '.json'), function(done) { - executeEntry(entry, `${specPath}/${entry}`, done); - }); - }); -}); - -function convert(mode) { - if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; - if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; - return mode.toLowerCase(); -} - -function executeEntry(file, path, done) { - // Read and parse the json file - file = require(path); - // Let's pick out the parts of the selection specification - var topologyDescription = file.topology_description; - var inLatencyWindow = file.in_latency_window; - var readPreferenceSpec = file.read_preference; - - try { - // Create a Replset and populate it with dummy topology servers - var topology = new Mongos(); - // For each server add them to the state - topologyDescription.servers.forEach(function(s) { - var server = new Server({ - host: s.address.split(':')[0], - port: parseInt(s.address.split(':')[1], 10) - }); - - // Add additional information - if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; - if (s.tags) server.ismaster = { tags: s.tags }; - // Ensure the server looks connected - server.isConnected = function() { - return true; - }; - // Add server to topology - topology.connectedProxies.push(server); - }); - - // Create read preference - var readPreference = new ReadPreference( - convert(readPreferenceSpec.mode), - readPreferenceSpec.tag_sets - ); - - // Perform a pickServer - topology.selectServer({ readPreference }, (err, server) => { - if (err) return done(err); - var foundWindow = null; - - // server should be in the latency window - for (var i = 0; i < inLatencyWindow.length; i++) { - var w = inLatencyWindow[i]; - - if (server.name === w.address) { - foundWindow = w; - break; - } - } - - expect(foundWindow).to.not.be.null; - done(); - }); - } catch (err) { - done(err); - } -} diff --git a/test/functional/core/operation_example.test.js b/test/functional/core/operation_example.test.js index e614e39fa85..d8abe90a2ec 100644 --- a/test/functional/core/operation_example.test.js +++ b/test/functional/core/operation_example.test.js @@ -1,9 +1,7 @@ 'use strict'; const expect = require('chai').expect; -const core = require('../../../lib/core'); -const ReplSet = core.ReplSet; -const Mongos = core.Mongos; +const { Topology } = require('../../../lib/sdam/topology'); /************************************************************************** * @@ -329,15 +327,15 @@ describe('Server operation example tests', function() { * *************************************************************************/ -describe('Replset operation example tests', function() { +describe('Topology operation example tests', function() { /** - * Correctly insert a document using the ReplSet insert method + * Correctly insert a document using the Topology insert method * - * @example-class ReplSet + * @example-class Topology * @example-method insert * @ignore */ - it('simple insert into db using ReplSet', { + it('simple insert into db using Topology', { metadata: { requires: { topology: 'replicaset' @@ -357,11 +355,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -393,11 +391,11 @@ describe('Replset operation example tests', function() { /** * Correctly update a document using the Server update method * - * @example-class ReplSet + * @example-class Topology * @example-method update * @ignore */ - it('update using ReplSet instance', { + it('update using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -417,11 +415,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -470,13 +468,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly remove a document using the ReplSet remove method + * Correctly remove a document using the Topology remove method * - * @example-class ReplSet + * @example-class Topology * @example-method remove * @ignore */ - it('remove using ReplSet instance', { + it('remove using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -496,11 +494,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -549,13 +547,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly find a document on the ReplSet using the cursor + * Correctly find a document on the Topology using the cursor * - * @example-class ReplSet + * @example-class Topology * @example-method cursor * @ignore */ - it('cursor using ReplSet instance', { + it('cursor using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -575,11 +573,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -621,13 +619,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly execute ismaster command on the ReplSet using the cursor + * Correctly execute ismaster command on the Topology using the cursor * - * @example-class ReplSet + * @example-class Topology * @example-method command * @ignore */ - it('command using ReplSet instance', { + it('command using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -647,11 +645,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -691,7 +689,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -730,7 +728,7 @@ describe.skip('Mongos operation example tests', function() { * @example-method update * @ignore */ - it('update using ReplSet instance', { + it('update using Topology instance', { metadata: { requires: { topology: 'mongos' @@ -739,7 +737,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -806,7 +804,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -873,7 +871,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -933,7 +931,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port diff --git a/test/functional/core/operations.test.js b/test/functional/core/operations.test.js index 40dce776fec..778c3092976 100644 --- a/test/functional/core/operations.test.js +++ b/test/functional/core/operations.test.js @@ -3,26 +3,16 @@ const expect = require('chai').expect; const f = require('util').format; const mock = require('mongodb-mock-server'); -const ConnectionSpy = require('./shared').ConnectionSpy; -const setupDatabase = require('./shared').setupDatabase; +const { setupDatabase } = require('./shared'); +const ReadPreference = require('../../../lib/read_preference'); -const core = require('../../../lib/core'); -const ReadPreference = core.ReadPreference; -const Connection = core.Connection; - -const test = {}; describe('Operation tests', function() { beforeEach(function() { - test.spy = new ConnectionSpy(); - Connection.enableConnectionAccounting(test.spy); return setupDatabase(this.configuration); }); afterEach(() => { - return mock.cleanup(test.spy).then(() => { - test.spy = undefined; - Connection.disableConnectionAccounting(); - }); + return mock.cleanup(); }); it('should correctly connect using server object', { @@ -113,7 +103,6 @@ describe('Operation tests', function() { var self = this; const config = this.configuration; const server = config.newTopology(); - var ReadPreference = self.configuration.mongo.ReadPreference; // Add event listeners server.on('connect', function(_server) { @@ -174,7 +163,6 @@ describe('Operation tests', function() { const self = this; const config = this.configuration; const server = config.newTopology(); - var ReadPreference = self.configuration.mongo.ReadPreference; // Add event listeners server.on('connect', function(_server) { @@ -237,7 +225,6 @@ describe('Operation tests', function() { var self = this; const config = this.configuration; const server = config.newTopology(); - var ReadPreference = self.configuration.mongo.ReadPreference; // Add event listeners server.on('connect', function(_server) { diff --git a/test/functional/core/pool.test.js b/test/functional/core/pool.test.js deleted file mode 100644 index 77658999183..00000000000 --- a/test/functional/core/pool.test.js +++ /dev/null @@ -1,1204 +0,0 @@ -'use strict'; - -var expect = require('chai').expect, - locateAuthMethod = require('./shared').locateAuthMethod, - executeCommand = require('./shared').executeCommand, - Pool = require('../../../lib/core/connection/pool'), - Connection = require('../../../lib/core/connection/connection'), - Query = require('../../../lib/core/connection/commands').Query, - Bson = require('bson'), - mock = require('mongodb-mock-server'), - ConnectionSpy = require('./shared').ConnectionSpy; - -const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; - -const test = {}; -describe('Pool tests', function() { - beforeEach(() => { - test.spy = new ConnectionSpy(); - Connection.enableConnectionAccounting(test.spy); - }); - - afterEach(() => { - return mock.cleanup(test.spy).then(() => { - test.spy = undefined; - Connection.disableConnectionAccounting(); - }); - }); - - it('should correctly connect pool to single server', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson(), - messageHandler: function() {} - }); - - // Add event listeners - pool.on('connect', function() { - pool.destroy(); - done(); - }); - - // Start connection - pool.connect(); - } - }); - - it('Should only listen on connect once', { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson(), - messageHandler: function() {} - }); - - pool.on('connect', function() { - process.nextTick(() => { - const connections = pool.allConnections(); - expect(connections).to.have.lengthOf(1); - expect(connections[0].socket.listenerCount('connect')).to.equal(0); - - pool.destroy(); - done(); - }); - }); - - pool.connect(); - } - }); - - it('should correctly write ismaster operation to the server', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson() - }); - - // Add event listeners - pool.on('connect', function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, function(err, result) { - expect(err).to.be.null; - expect(result.result.ismaster).to.be.true; - pool.destroy(); - done(); - }); - }); - - // Start connection - pool.connect(); - } - }); - - it('should correctly grow server pool on concurrent operations', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Index - var index = 0; - - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson() - }); - - var messageHandler = function(err, result) { - index = index + 1; - - expect(err).to.be.null; - expect(result.result.ismaster).to.be.true; - - // Did we receive an answer for all the messages - if (index === 100) { - expect(pool.allConnections().length).to.equal(5); - pool.destroy(); - done(); - } - }; - - // Add event listeners - pool.on('connect', function() { - for (var i = 0; i < 10; ++i) { - for (var j = 0; j < 10; ++j) { - const query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, messageHandler); - } - } - }); - - // Start connection - pool.connect(); - } - }); - - // Skipped due to use of topology manager - it('should correctly write ismaster operation to the server and handle timeout', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - this.timeout(0); - - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - socketTimeout: 500, - bson: new Bson(), - reconnect: false - }); - - // Add event listeners - pool.on('connect', function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, function() {}); - }); - - pool.on('timeout', function() { - pool.destroy(); - done(); - }); - - // Start connection - pool.connect(); - } - }); - - it('should correctly error out operations if pool is closed in the middle of a set', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - socketTimeout: 3000, - bson: new Bson() - }); - - var index = 0; - var errorCount = 0; - - var messageHandler = function(err) { - if (err) errorCount = errorCount + 1; - index = index + 1; - if (index === 500) { - expect(errorCount).to.be.at.least(250); - pool.destroy(); - done(); - } - }; - - function execute(i) { - setTimeout(function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - pool.write(query, messageHandler); - if (i === 249) { - pool.destroy(); - } - }, i); - } - - // Add event listeners - pool.on('connect', function() { - for (var i = 0; i < 500; i++) { - execute(i); - } - }); - - // Start connection - pool.connect(); - } - }); - - it.skip('should correctly recover from a server outage', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - var self = this; - - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - socketTimeout: 3000, - connectionTimeout: 1000, - reconnectTries: 120, - bson: new Bson() - }); - - var index = 0; - var errorCount = 0; - var executed = false; - var restarted = false; - - function waitForRestart(callback) { - setTimeout(function() { - if (!restarted) return waitForRestart(callback); - callback(); - }, 10); - } - - var messageHandler = function(err) { - if (err) errorCount = errorCount + 1; - index = index + 1; - - if (index === 500 && !executed) { - waitForRestart(function() { - executed = true; - expect(errorCount).to.be.at.least(0); - pool.destroy(); - done(); - }); - } - }; - - function execute(i) { - setTimeout(function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - pool.write(query, messageHandler); - - if (i === 250) { - self.configuration.manager.restart(true).then(function() { - // console.log('!!!!!!!!!!! execute 1') - restarted = true; - }); - } - }, i); - } - - // Add event listeners - pool.on('connect', function() { - for (var i = 0; i < 500; i++) { - execute(i); - } - }); - - // Start connection - pool.connect(); - } - }); - - // Skipped due to use of topology manager - it.skip('should correctly recover from a longer server outage', { - metadata: { - requires: { topology: 'single' }, - ignore: { travis: true } - }, - - test: function(done) { - var self = this; - - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - socketTimeout: 3000, - bson: new Bson(), - reconnectTries: 120 - }); - - var index = 0; - var errorCount = 0; - var reconnect = false; - var stopped = false; - var started = false; - - var messageHandler = function(err) { - if (err) errorCount = errorCount + 1; - index = index + 1; - - if (index === 500) { - expect(errorCount).to.be.at.least(0); - pool.destroy(); - expect(stopped).to.be.true; - expect(started).to.be.true; - expect(reconnect).to.be.true; - done(); - } - }; - - pool.on('reconnect', function() { - reconnect = true; - }); - - function execute(i) { - setTimeout(function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - pool.write(query, messageHandler); - - if (i === 250) { - self.configuration.manager.stop().then(function() { - stopped = true; - - setTimeout(function() { - self.configuration.manager.start().then(function() { - started = true; - }); - }, 5000); - }); - } - }, i); - } - - // Add event listeners - pool.on('connect', function() { - for (var i = 0; i < 500; i++) { - execute(i); - } - }); - - // Start connection - pool.connect(); - } - }); - - it('should correctly reclaim immediateRelease socket', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - socketTimeout: 1000, - bson: new Bson(), - reconnect: false - }); - - var index = 0; - - // Add event listeners - pool.on('connect', function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, { immediateRelease: true }, function(err) { - expect(err).to.not.exist; - index = index + 1; - }); - }); - - pool.on('timeout', function() { - expect(index).to.equal(0); - pool.destroy(); - done(); - }); - - // Start connection - pool.connect(); - } - }); - - // Skipped due to use of topology manager - it('should correctly authenticate using scram-sha-1 using connect auth', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - // Restart instance - self.configuration.manager.restart(true).then(function() { - locateAuthMethod(self.configuration, function(err, method) { - expect(err).to.be.null; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.be.null; - // Attempt to connect - var pool = new Pool(null, { - host: self.configuration.host, - port: self.configuration.port, - bson: new Bson() - }); - - // Add event listeners - pool.on('connect', function() { - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { credentials }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.be.null; - - pool.destroy(true); - done(); - } - ); - }); - - // Start connection - pool.connect(credentials); - } - ); - }); - }); - } - }); - - // Skipped due to use of topology manager - it.skip( - 'should correctly authenticate using scram-sha-1 using connect auth and maintain auth on new connections', - { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - // Restart instance - self.configuration.manager.restart(true).then(function() { - locateAuthMethod(self.configuration, function(err, method) { - expect(err).to.be.null; - - const credentials = new MongoCredentials({ - mechansim: method, - source: 'admin', - username: 'root', - passsword: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - function(createRootUserErr, createRootUserRes) { - expect(createRootUserRes).to.exist; - expect(createRootUserErr).to.be.null; - - executeCommand( - self.configuration, - 'test', - { - createUser: 'admin', - pwd: 'admin', - roles: ['readWrite', 'dbAdmin'], - digestPassword: true - }, - { credentials }, - function(createAdminUserErr, createAdminUserRes) { - expect(createAdminUserRes).to.exist; - expect(createAdminUserErr).to.be.null; - - // Attempt to connect - var pool = new Pool(null, { - host: self.configuration.host, - port: self.configuration.port, - bson: new Bson() - }); - - var index = 0; - - var messageHandler = function(handlerErr, handlerResult) { - index = index + 1; - - // Tests - expect(handlerErr).to.be.null; - expect(handlerResult.result.n).to.equal(1); - // Did we receive an answer for all the messages - if (index === 100) { - expect(pool.socketCount()).to.equal(5); - pool.destroy(true); - done(); - } - }; - - // Add event listeners - pool.on('connect', function() { - for (var i = 0; i < 10; i++) { - process.nextTick(function() { - var query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - - query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - }); - } - }); - - // Start connection - pool.connect(credentials); - } - ); - } - ); - }); - }); - } - } - ); - - // Skipped due to use of topology manager - it.skip('should correctly authenticate using scram-sha-1 using auth method', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - // Enable connections accounting - Connection.enableConnectionAccounting(); - - // Restart instance - self.configuration.manager.restart(true).then(function() { - locateAuthMethod(self.configuration, function(err, method) { - expect(err).to.be.null; - - const credentials = new MongoCredentials({ - mechansim: method, - source: 'admin', - username: 'root', - passsword: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - function(createRootUserErr, createRootUserRes) { - expect(createRootUserRes).to.exist; - expect(createRootUserErr).to.be.null; - - executeCommand( - self.configuration, - 'test', - { - createUser: 'admin', - pwd: 'admin', - roles: ['readWrite', 'dbAdmin'], - digestPassword: true - }, - { credentials }, - function(createAdminUserErr, createAdminUserRes) { - expect(createAdminUserRes).to.exist; - expect(createAdminUserErr).to.be.null; - - // Attempt to connect - var pool = new Pool(null, { - host: self.configuration.host, - port: self.configuration.port, - bson: new Bson() - }); - - var index = 0; - var error = false; - - var messageHandler = function(handlerErr, handlerResult) { - index = index + 1; - - // Tests - expect(handlerErr).to.be.null; - expect(handlerResult.result.n).to.equal(1); - // Did we receive an answer for all the messages - if (index === 100) { - expect(pool.socketCount()).to.equal(5); - expect(error).to.be.false; - - pool.destroy(true); - done(); - } - }; - - // Add event listeners - pool.on('connect', function() { - pool.auth(credentials, function(authErr, authRes) { - expect(authRes).to.exist; - expect(authErr).to.not.exist; - - var testCmd = function() { - var query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write( - query, - { command: true, requestId: query.requestId }, - messageHandler - ); - }; - - for (var i = 0; i < 100; i++) { - process.nextTick(testCmd); - } - }); - - var systemCmd = function() { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, { command: true, requestId: query.requestId }, function(e) { - if (e) error = e; - }); - }; - - for (var i = 0; i < 100; i++) { - process.nextTick(systemCmd); - } - }); - - // Start connection - pool.connect(); - } - ); - } - ); - }); - }); - } - }); - - // Skipped due to use of topology manager - it.skip('should correctly authenticate using scram-sha-1 using connect auth then logout', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - // Restart instance - self.configuration.manager.restart(true).then(function() { - locateAuthMethod(self.configuration, function(err, method) { - expect(err).to.be.null; - - const credentials = new MongoCredentials({ - mechansim: method, - source: 'admin', - username: 'root', - passsword: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - function(createRootUserErr, createRootUserRes) { - expect(createRootUserRes).to.exist; - expect(createRootUserErr).to.be.null; - - executeCommand( - self.configuration, - 'test', - { - createUser: 'admin', - pwd: 'admin', - roles: ['readWrite', 'dbAdmin'], - digestPassword: true - }, - { credentials }, - function(createAdminUserErr, createAdminUserRes) { - expect(createAdminUserRes).to.exist; - expect(createAdminUserErr).to.be.null; - // Attempt to connect - var pool = new Pool(null, { - host: self.configuration.host, - port: self.configuration.port, - bson: new Bson() - }); - - // Add event listeners - pool.on('connect', function() { - var query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, { command: true, requestId: query.requestId }, function( - loginErr, - loginRes - ) { - expect(loginErr).to.be.null; - expect(loginRes).to.exist; - - // Logout pool - pool.logout('test', function(logoutErr) { - expect(logoutErr).to.be.null; - - pool.write(query, { command: true, requestId: query.requestId }, function( - postLogoutWriteErr, - postLogoutWriteRes - ) { - expect(postLogoutWriteErr).to.not.be.null; - expect(postLogoutWriteRes).to.not.exist; - - pool.destroy(true); - done(); - }); - }); - }); - }); - - // Start connection - pool.connect(credentials); - } - ); - } - ); - }); - }); - } - }); - - // Skipped due to use of topology manager - it.skip('should correctly have auth wait for logout to finish', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - // Restart instance - self.configuration.manager.restart(true).then(function() { - locateAuthMethod(self.configuration, function(err, method) { - expect(err).to.be.null; - - const credentials = new MongoCredentials({ - mechansim: method, - source: 'admin', - username: 'root', - passsword: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - function(createRootUserErr, createRootUserRes) { - expect(createRootUserErr).to.be.null; - expect(createRootUserRes).to.exist; - - executeCommand( - self.configuration, - 'test', - { - createUser: 'admin', - pwd: 'admin', - roles: ['readWrite', 'dbAdmin'], - digestPassword: true - }, - { credentials }, - function(createAdminUserErr, createAdminUserRes) { - expect(createAdminUserErr).to.be.null; - expect(createAdminUserRes).to.exist; - - // Attempt to connect - var pool = new Pool(null, { - host: self.configuration.host, - port: self.configuration.port, - bson: new Bson() - }); - - // Add event listeners - pool.on('connect', function() { - var query = new Query( - new Bson(), - 'test.$cmd', - { insert: 'test', documents: [{ a: 1 }] }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, { requestId: query.requestId }, function(loginErr, loginRes) { - expect(loginRes).to.exist; - expect(loginErr).to.be.null; - - // Logout pool - pool.logout('test', function(logoutErr) { - expect(logoutErr).to.be.null; - }); - - pool.auth(credentials, function(testMethodErr, testMethodRes) { - expect(testMethodRes).to.exist; - expect(testMethodErr).to.be.null; - - pool.write(query, { requestId: query.requestId }, function( - postLogoutWriteErr, - postLogoutWriteRes - ) { - expect(postLogoutWriteRes).to.exist; - expect(postLogoutWriteErr).to.be.null; - - pool.destroy(true); - done(); - }); - }); - }); - }); - - // Start connection - pool.connect(credentials); - } - ); - } - ); - }); - }); - } - }); - - it('should correctly exit _execute loop when single available connection is destroyed', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - // Attempt to connect - var pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson(), - size: 1, - socketTimeout: 500, - messageHandler: function() {} - }); - - // Add event listeners - pool.on('connect', function() { - // Execute ismaster should not cause cpu to start spinning - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, function(initalQueryErr, initalQueryRes) { - expect(initalQueryRes).to.exist; - expect(initalQueryErr).to.be.null; - - // Mark available connection as broken - var con = pool.availableConnections[0]; - con.destroyed = true; - - // Execute ismaster should not cause cpu to start spinning - query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, function(secondQueryErr, secondQueryRes) { - expect(secondQueryRes).to.exist; - expect(secondQueryErr).to.be.null; - - con.destroy(); - pool.destroy(); - done(); - }); - }); - }); - - // Start connection - pool.connect(); - } - }); - - it('should properly emit errors on forced destroy', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - const pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson() - }); - - pool.on('connect', () => { - var query = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.write(query, function(err, result) { - expect(err).to.exist; - expect(err).to.match(/Pool was force destroyed/); - expect(result).to.not.exist; - done(); - }); - - pool.destroy({ force: true }); - }); - - pool.connect(); - } - }); - - it('should support callback mode for connect', { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson() - }); - - pool.on('connect', () => done(new Error('connect was emitted'))); - pool.connect(err => { - expect(err).to.not.exist; - setTimeout(() => { - pool.destroy(true, done); - }, 100); // wait to ensure event is not emitted - }); - } - }); - - it('should support resetting', function(done) { - const pool = new Pool(null, { - host: this.configuration.host, - port: this.configuration.port, - bson: new Bson() - }); - - const isMasterQuery = new Query( - new Bson(), - 'system.$cmd', - { ismaster: true }, - { numberToSkip: 0, numberToReturn: 1 } - ); - - pool.once('connect', () => { - const connections = pool.allConnections().map(conn => conn.id); - expect(connections).to.have.length(1); - - pool.write(isMasterQuery, err => { - expect(err).to.not.exist; - - pool.reset(err => { - expect(err).to.not.exist; - - pool.write(isMasterQuery, err => { - expect(err).to.not.exist; - - // verify the previous connection was dropped, and a new connection was created - const newConnections = pool.allConnections().map(conn => conn.id); - expect(newConnections).to.have.length(1); - expect(newConnections[0]).to.not.equal(connections[0]); - - pool.destroy(done); - }); - }); - }); - }); - - pool.connect(); - }); -}); diff --git a/test/functional/core/replset_server_selection.test.js b/test/functional/core/replset_server_selection.test.js deleted file mode 100644 index 1fbd2fb2bdb..00000000000 --- a/test/functional/core/replset_server_selection.test.js +++ /dev/null @@ -1,152 +0,0 @@ -'use strict'; - -var expect = require('chai').expect, - fs = require('fs'), - p = require('path'), - ReplSetState = require('../../../lib/core/topologies/replset_state'), - MongoError = require('../../../lib/core/error').MongoError, - ReadPreference = require('../../../lib/core/topologies/read_preference'); - -describe('A replicaset with no primary', function() { - before(function() { - // These tests are not relevant to the new topology layer - if (this.configuration.usingUnifiedTopology()) this.skip(); - }); - - it('should correctly execute server selection tests', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - const config = this.configuration; - var path = p.resolve( - __dirname, - '../../spec/server-selection/server_selection/ReplicaSetNoPrimary/read' - ); - var entries = fs.readdirSync(path).filter(function(x) { - return x.indexOf('.json') !== -1; - }); - - // Execute each of the entries - entries.forEach(function(x) { - executeEntry(config, x, `${path}/${x}`); - }); - - done(); - } - }); -}); - -describe('A replicaset with a primary', function() { - before(function() { - // These tests are not relevant to the new topology layer - if (this.configuration.usingUnifiedTopology()) this.skip(); - }); - - it('should correctly execute server selection tests', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - const config = this.configuration; - var path = p.resolve( - __dirname, - '../../spec/server-selection/server_selection/ReplicaSetWithPrimary/read' - ); - - var entries = fs.readdirSync(path).filter(function(x) { - return x.indexOf('.json') !== -1; - }); - - // Execute each of the entries - entries.forEach(function(x) { - executeEntry(config, x, `${path}/${x}`); - }); - - done(); - } - }); -}); - -function convert(mode) { - if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; - if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; - return mode.toLowerCase(); -} - -function executeEntry(config, file, path) { - // Read and parse the json file - file = require(path); - - // Let's pick out the parts of the selection specification - var topologyDescription = file.topology_description; - var inLatencyWindow = file.in_latency_window; - var readPreference = file.read_preference; - - try { - // Create a Replset and populate it with dummy topology servers - var replset = new ReplSetState(); - replset.topologyType = topologyDescription.type; - // For each server add them to the state - topologyDescription.servers.forEach(function(s) { - var server = config.newTopology( - s.address.split(':')[0], - parseInt(s.address.split(':')[1], 10) - ); - - // Add additional information - if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; - if (s.tags) server.ismaster = { tags: s.tags }; - // Ensure the server looks connected - server.isConnected = function() { - return true; - }; - - if (s.type === 'RSSecondary') { - replset.secondaries.push(server); - } else if (s.type === 'RSPrimary') { - replset.primary = server; - } else if (s.type === 'RSArbiter') { - replset.arbiters.push(server); - } - }); - - // Create read preference - let rp; - if (convert(readPreference.mode) !== 'primary' && readPreference.tag_sets) { - rp = new ReadPreference(convert(readPreference.mode), readPreference.tag_sets); - } else { - rp = new ReadPreference(convert(readPreference.mode)); - } - - // Perform a pickServer - var server = replset.pickServer(rp); - var foundWindow = null; - - // server should be in the latency window - for (var i = 0; i < inLatencyWindow.length; i++) { - var w = inLatencyWindow[i]; - - if (server.name === w.address) { - foundWindow = w; - break; - } - } - - if ( - ['ReplicaSetNoPrimary', 'Primary', 'ReplicaSetWithPrimary'].indexOf( - topologyDescription.type - ) !== -1 && - inLatencyWindow.length === 0 - ) { - if (server instanceof MongoError) { - expect(server.message).to.equal('no primary server available'); - } else { - expect(server).to.be.null; - } - } else { - expect(foundWindow).to.not.be.null; - } - } catch (err) { - console.log(err.stack); - process.exit(0); - } -} diff --git a/test/functional/core/replset_state.test.js b/test/functional/core/replset_state.test.js deleted file mode 100644 index 1c9e7575409..00000000000 --- a/test/functional/core/replset_state.test.js +++ /dev/null @@ -1,131 +0,0 @@ -'use strict'; - -const expect = require('chai').expect, - f = require('util').format, - p = require('path'), - fs = require('fs'), - ObjectId = require('bson').ObjectId, - ReplSetState = require('../../../lib/core/topologies/replset_state'); - -describe('ReplicaSet state', function() { - const path = p.resolve(__dirname, '../../spec/server-discovery-and-monitoring/rs'); - - fs.readdirSync(path) - .filter(x => x.indexOf('.json') !== -1) - .forEach(x => { - var testData = require(f('%s/%s', path, x)); - - it(testData.description, function(done) { - executeEntry(testData, done); - }); - }); -}); - -function executeEntry(testData, callback) { - var uri = testData.uri; - var phases = testData.phases; - - // Get replicaset name if any - var match = uri.match(/replicaSet=[a-z|A-Z|0-9]*/); - var replicaSet = match ? match.toString().split(/=/)[1] : null; - - // Replicaset - // Create a replset state - var state = new ReplSetState({ setName: replicaSet }); - - // Get all the server instances - var parts = uri - .split('mongodb://')[1] - .split('/')[0] - .split(','); - - // For each of the servers - parts.forEach(function(x) { - var params = x.split(':'); - state.update({ - name: f('%s:%s', params[0], params[1] ? parseInt(params[1], 10) : 27017), - lastIsMaster: function() { - return null; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - }); - - // Run each phase - executePhases(phases, state, callback); -} - -function executePhases(phases, state, callback) { - if (phases.length === 0) { - return callback(null, null); - } - - executePhase(phases.shift(), state, err => { - if (err) return callback(err, null); - return executePhases(phases, state, callback); - }); -} - -function executePhase(phase, state, callback) { - var responses = phase.responses; - var outcome = phase.outcome; - - // Apply all the responses - responses.forEach(function(x) { - if (Object.keys(x[1]).length === 0) { - state.remove({ - name: x[0], - lastIsMaster: function() { - return null; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - } else { - var ismaster = x[1]; - if (ismaster.electionId) ismaster.electionId = new ObjectId(ismaster.electionId.$oid); - - state.update({ - name: x[0], - lastIsMaster: function() { - return ismaster; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - } - }); - - // Validate the state of the final outcome - for (var name in outcome.servers) { - try { - if (outcome.servers[name].electionId) { - outcome.servers[name].electionId = new ObjectId(outcome.servers[name].electionId.$oid); - } - - expect(state.set[name]).to.exist; - for (var n in outcome.servers[name]) { - if (outcome.servers[name][n]) { - expect(state.set[name][n]).to.eql(outcome.servers[name][n]); - } - } - } catch (e) { - return callback(e); - } - } - - // // Check the topology type - expect(state.topologyType).to.equal(outcome.topologyType); - expect(state.setName).to.equal(outcome.setName); - callback(null, null); -} diff --git a/test/functional/core/shared.js b/test/functional/core/shared.js index 915d9b8f2d7..4240c7a888a 100644 --- a/test/functional/core/shared.js +++ b/test/functional/core/shared.js @@ -1,10 +1,10 @@ 'use strict'; const EventEmitter = require('events'); -const Pool = require('../../../lib/core/connection/pool'); -const f = require('util').format; +const { ConnectionPool } = require('../../../lib/cmap/connection_pool'); +const { format: f } = require('util'); const bson = require('bson'); -const Query = require('../../../lib/core/connection/commands').Query; -const ReadPreference = require('../../../lib/core/topologies/read_preference'); +const { Query } = require('../../../lib/cmap/commands'); +const ReadPreference = require('../../../lib/read_preference'); function executeCommand(configuration, db, cmd, options, cb) { // Optional options @@ -17,7 +17,7 @@ function executeCommand(configuration, db, cmd, options, cb) { var port = options.port || configuration.port; // Attempt to connect - var pool = new Pool(null, { + var pool = new ConnectionPool(null, { host: host, port: port, bson: new bson() @@ -51,17 +51,17 @@ function executeCommand(configuration, db, cmd, options, cb) { } function locateAuthMethod(configuration, cb) { - var Pool = require('../../../lib/core/connection/pool'), + var ConnectionPool = require('../../../lib/cmap/connection_pool'), bson = require('bson'), f = require('util').format, - Query = require('../../../lib/core/connection/commands').Query; + { Query } = require('../../../lib/cmap/commands'); // Set up operations var db = 'admin'; var cmd = { ismaster: true }; // Attempt to connect - var pool = new Pool(null, { + var pool = new ConnectionPool(null, { host: configuration.host, port: configuration.port, bson: new bson() diff --git a/test/functional/core/single_mocks/timeout.test.js b/test/functional/core/single_mocks/timeout.test.js deleted file mode 100644 index 17572723285..00000000000 --- a/test/functional/core/single_mocks/timeout.test.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict'; -var expect = require('chai').expect, - co = require('co'), - mock = require('mongodb-mock-server'); - -describe('Single Timeout (mocks)', function() { - before(function() { - if (this.configuration.usingUnifiedTopology()) { - // The new SDAM layer always reconnects, so these tests are no longer relevant. - return this.skip(); - } - }); - - afterEach(() => mock.cleanup()); - - it('Should correctly timeout socket operation and then correctly re-execute', { - metadata: { - requires: { - generators: true, - topology: 'single' - } - }, - - test: function(done) { - const config = this.configuration; - - // Current index for the ismaster - var currentStep = 0; - // Primary stop responding - var stopRespondingPrimary = false; - - // Default message fields - var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER); - - // Primary server states - var serverIsMaster = [Object.assign({}, defaultFields)]; - - // Boot the mock - co(function*() { - const server = yield mock.createServer(); - - server.setMessageHandler(request => { - var doc = request.document; - if (doc.ismaster && currentStep === 0) { - request.reply(serverIsMaster[0]); - currentStep += 1; - } else if (doc.insert && currentStep === 1) { - // Stop responding to any calls (emulate dropping packets on the floor) - if (stopRespondingPrimary) { - // yield timeoutPromise(3000); - // continue; - return; - } - - currentStep += 1; - } else if (doc.ismaster && currentStep === 2) { - request.reply(serverIsMaster[0]); - } else if (doc.insert && currentStep === 2) { - request.reply({ ok: 1, n: doc.documents, lastOp: new Date() }); - } - }); - - // Start dropping the packets - setTimeout(function() { - stopRespondingPrimary = true; - }, 5000); - - var replset = config.newTopology(server.address().host, server.address().port, { - connectionTimeout: 5000, - socketTimeout: 1000, - size: 1 - }); - - // Not done - var finished = false; - - // Add event listeners - replset.once('connect', function(_server) { - _server.insert('test.test', [{ created: new Date() }], function(err, r) { - expect(r).to.not.exist; - expect(err).to.exist; - - function wait() { - setTimeout(function() { - _server.insert('test.test', [{ created: new Date() }], function(_err, _r) { - if (_r && !finished) { - finished = true; - expect(_r.connection.port).to.equal(server.address().port); - replset.destroy({ force: true }); - done(); - } else { - wait(); - } - }); - }, 500); - } - - wait(); - }); - }); - - replset.on('error', done); - replset.connect(); - }); - } - }); - - it('Should correctly recover from an immediate shutdown mid insert', { - metadata: { - requires: { - generators: true, - topology: 'single' - } - }, - - test: function(done) { - const config = this.configuration; - - // Current index for the ismaster - var currentStep = 0; - // Should fail due to broken pipe - var brokenPipe = false; - - // Default message fields - var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER); - - // Primary server states - var serverIsMaster = [Object.assign({}, defaultFields)]; - - co(function*() { - const mockServer = yield mock.createServer(0, 'localhost', { - onRead: function(_server, connection) { - // Force EPIPE error - if (currentStep === 1) { - // Destroy connection mid write - connection.destroy(); - // Reset the mock to accept ismasters - setTimeout(function() { - currentStep += 1; - }, 10); - // Return connection was destroyed - return true; - } - } - }); - - mockServer.setMessageHandler(request => { - var doc = request.document; - if (doc.ismaster && currentStep === 0) { - currentStep += 1; - request.reply(serverIsMaster[0]); - } else if (doc.insert && currentStep === 2) { - currentStep += 1; - request.reply({ ok: 1, n: doc.documents, lastOp: new Date() }); - } else if (doc.ismaster) { - request.reply(serverIsMaster[0]); - } - }); - - var server = config.newTopology(mockServer.address().host, mockServer.address().port, { - connectionTimeout: 3000, - socketTimeout: 2000, - size: 1 - }); - - var docs = []; - // Create big insert message - for (var i = 0; i < 1000; i++) { - docs.push({ - a: i, - string: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string1: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string2: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string3: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string4: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string5: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string6: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string7: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string8: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string9: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string10: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string11: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string12: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string13: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string14: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string15: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string16: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string17: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string18: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string19: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string20: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string21: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string22: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string23: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string24: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string25: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string26: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string27: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world', - string28: - 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world' - }); - } - - // Add event listeners - server.once('connect', function(_server) { - _server.insert('test.test', docs, function(err, r) { - expect(r).to.not.exist; - expect(err).to.exist; - brokenPipe = true; - }); - }); - - server.once('reconnect', function(_server) { - _server.insert('test.test', [{ created: new Date() }], function(err, r) { - expect(r).to.exist; - expect(brokenPipe).to.equal(true); - server.destroy(); - done(); - }); - }); - - server.on('error', done); - setTimeout(function() { - server.connect(); - }, 100); - }); - } - }); - - it.skip( - 'Should not start double reconnect timeouts due to socket timeout during attemptReconnect', - { - metadata: { - requires: { - generators: true, - topology: 'single' - } - }, - - test: function(done) { - const config = this.configuration; - - // Current index for the ismaster - var currentStep = 0; - - // Default message fields - var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER); - - // Primary server states - var serverIsMaster = [Object.assign({}, defaultFields)]; - - // Boot the mock - co(function*() { - const mockServer = yield mock.createServer(); - - mockServer.setMessageHandler(request => { - if (currentStep === 1) { - // yield timeoutPromise(5000); - // continue; - return; - } - - var doc = request.document; - if (doc.ismaster && currentStep === 0) { - request.reply(serverIsMaster[0]); - currentStep += 1; - } - }); - - var server = config.newTopology(mockServer.address().host, mockServer.address().port, { - connectionTimeout: 2000, - socketTimeout: 1000, - size: 1 - }); - - // Add event listeners - server.once('connect', function() { - // _server.insert('test.test', [{created:new Date()}], function(err, r) { - // test.ok(err != null); - // // console.dir(err) - // - // function wait() { - // setTimeout(function() { - // _server.insert('test.test', [{created:new Date()}], function(err, r) { - // if (r && !done) { - // done = true; - // test.equal(37019, r.connection.port); - // replset.destroy(); - // running = false; - // test.done(); - // } else { - // wait(); - // } - // }); - // }, 500); - // } - // - // wait(); - // }); - }); - - server.on('error', done); - server.connect(); - }); - } - } - ); -}); diff --git a/test/functional/core/topology.test.js b/test/functional/core/topology.test.js index 06bb36c6951..017f9a746e0 100644 --- a/test/functional/core/topology.test.js +++ b/test/functional/core/topology.test.js @@ -1,7 +1,7 @@ 'use strict'; const expect = require('chai').expect; -describe('Topology', { requires: { unifiedTopology: true } }, function() { +describe('Topology', function() { it('should correctly track states of a topology', function(done) { const topology = this.configuration.newTopology(); diff --git a/test/functional/crud_spec.test.js b/test/functional/crud_spec.test.js index 5983b497ba2..f71162eb7e7 100644 --- a/test/functional/crud_spec.test.js +++ b/test/functional/crud_spec.test.js @@ -23,7 +23,6 @@ function findScenarios() { const readScenarios = findScenarios('v1', 'read'); const writeScenarios = findScenarios('v1', 'write'); -const dbScenarios = findScenarios('db'); const testContext = {}; describe('CRUD spec', function() { @@ -103,37 +102,6 @@ describe('CRUD spec', function() { }); }); - describe('db', function() { - dbScenarios.forEach(scenarioData => { - const scenarioName = scenarioData[0]; - const scenario = scenarioData[1]; - scenario.name = scenarioName; - const databaseName = scenarioData[1].database_name; - - const metadata = { - requires: { - topology: ['single', 'replicaset', 'sharded'] - } - }; - - if (scenario.minServerVersion) { - metadata.requires.mongodb = `>=${scenario.minServerVersion}`; - } - - describe(scenarioName, function() { - scenario.tests.forEach(scenarioTest => { - it(scenarioTest.description, { - metadata, - test: function() { - const db = testContext.client.db(databaseName); - return executeDbAggregateTest(scenarioTest, db); - } - }); - }); - }); - }); - }); - function transformBulkWriteResult(result) { const r = {}; r.insertedCount = result.nInserted; @@ -399,19 +367,6 @@ describe('CRUD spec', function() { return findPromise.then(assertReadExpectations(db, collection, scenarioTest.outcome)); } - function executeDbAggregateTest(scenarioTest, db) { - const options = {}; - if (scenarioTest.operation.arguments.allowDiskUse) { - options.allowDiskUse = scenarioTest.operation.arguments.allowDiskUse; - } - - const pipeline = scenarioTest.operation.arguments.pipeline; - return db - .aggregate(pipeline, options) - .toArray() - .then(assertReadExpectations(db, null, scenarioTest.outcome)); - } - function executeScenario(scenario, scenarioTest, configuration, context) { const collection = context.db.collection( 'crud_spec_tests_' + scenario.name + '_' + scenarioTest.operation.name diff --git a/test/functional/cursor.test.js b/test/functional/cursor.test.js index f039e6f82e7..7ee4d9df2cf 100644 --- a/test/functional/cursor.test.js +++ b/test/functional/cursor.test.js @@ -1,15 +1,12 @@ 'use strict'; -const test = require('./shared').assert; -const setupDatabase = require('./shared').setupDatabase; +const { assert: test } = require('./shared'); +const { setupDatabase } = require('./shared'); const fs = require('fs'); -const expect = require('chai').expect; -const Long = require('bson').Long; +const { expect } = require('chai'); +const { Long } = require('bson'); const sinon = require('sinon'); -const Buffer = require('safe-buffer').Buffer; -const Writable = require('stream').Writable; - -const core = require('../../lib/core'); -const ReadPreference = core.ReadPreference; +const { Writable } = require('stream'); +const ReadPreference = require('../../lib/read_preference'); describe('Cursor', function() { before(function() { @@ -296,15 +293,8 @@ describe('Cursor', function() { expect(err).to.not.exist; const db = client.db(configuration.db); - let internalClientCursor; - if (configuration.usingUnifiedTopology()) { - internalClientCursor = sinon.spy(client.topology, 'cursor'); - } else { - internalClientCursor = sinon.spy(client.topology.s.coreTopology, 'cursor'); - } - + const internalClientCursor = sinon.spy(client.topology, 'cursor'); const expectedReadPreference = new ReadPreference(ReadPreference.SECONDARY); - const cursor = db.collection('countTEST').find({ qty: { $gt: 4 } }); cursor.count(true, { readPreference: ReadPreference.SECONDARY }, err => { expect(err).to.be.null; @@ -2139,75 +2129,6 @@ describe('Cursor', function() { } }); - /** - * @ignore - * @api private - */ - it('cursor stream errors connection force closed', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { - mongodb: '<=3.5.0', // NOTE: remove this when SERVER-30576 is resolved - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); - client.connect(function(err, client) { - var db = client.db(configuration.db); - test.equal(null, err); - - db.createCollection('cursor_stream_errors', function(err, collection) { - test.equal(null, err); - - var docs = []; - for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), function(err) { - test.equal(null, err); - - var finished = 0, - i = 0; - - var stream = collection.find({}, { batchSize: 5 }).stream(); - - stream.on('data', function() { - if (++i === 5) { - client.topology - .connections()[0] - .write(Buffer.from('312312321321askdjljsaNCKnablibh')); - } - }); - - stream.once('close', testDone('close')); - stream.once('error', testDone('error')); - - function testDone() { - return function() { - ++finished; - - if (finished === 2) { - setTimeout(function() { - test.equal(5, i); - test.equal(2, finished); - test.equal(true, stream.isClosed()); - client.close(true, done); - }, 150); - } - }; - } - }); - }); - }); - } - }); - /** * @ignore * @api private @@ -4030,7 +3951,6 @@ describe('Cursor', function() { test: function(done) { var docs = []; var configuration = this.configuration; - var Long = configuration.require.Long; for (var i = 0; i < 50; i++) { var d = new Date().getTime() + i * 1000; diff --git a/test/functional/cursorstream.test.js b/test/functional/cursorstream.test.js index 58c24296351..ac67bdaa83f 100644 --- a/test/functional/cursorstream.test.js +++ b/test/functional/cursorstream.test.js @@ -2,6 +2,7 @@ var expect = require('chai').expect; var Buffer = require('safe-buffer').Buffer; const setupDatabase = require('./shared').setupDatabase; +const { Binary } = require('../..'); describe('Cursor Streams', function() { before(function() { @@ -91,7 +92,6 @@ describe('Cursor Streams', function() { // The actual test we wish to run test: function(done) { var self = this; - var Binary = self.configuration.require.Binary; var docs = []; for (var i = 0; i < 10000; i++) { @@ -167,7 +167,6 @@ describe('Cursor Streams', function() { // The actual test we wish to run test: function(done) { var self = this; - var Binary = self.configuration.require.Binary; var docs = []; var counter = 0; var counter2 = 0; @@ -219,9 +218,7 @@ describe('Cursor Streams', function() { // The actual test we wish to run test: function(done) { - var self = this, - Binary = self.configuration.require.Binary; - + var self = this; var client = self.configuration.newClient(self.configuration.writeConcernMax(), { poolSize: 1 }); diff --git a/test/functional/custom_pk.test.js b/test/functional/custom_pk.test.js index feac1b4a9fb..9d1b6c5e519 100644 --- a/test/functional/custom_pk.test.js +++ b/test/functional/custom_pk.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { ObjectID } = require('../..'); describe('Custom PK', function() { before(function() { @@ -18,7 +19,6 @@ describe('Custom PK', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // Custom factory (need to provide a 12 byte array); var CustomPKFactory = function() {}; diff --git a/test/functional/db.test.js b/test/functional/db.test.js index b9503f35034..75be793c26a 100644 --- a/test/functional/db.test.js +++ b/test/functional/db.test.js @@ -2,6 +2,7 @@ var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; const expect = require('chai').expect; +const { Db, DBRef } = require('../..'); describe('Db', function() { before(function() { @@ -18,9 +19,6 @@ describe('Db', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var Db = configuration.require.Db; - // Assert rename try { new Db(5); @@ -145,89 +143,6 @@ describe('Db', function() { } }); - /** - * @ignore - */ - it('shouldCorrectlyPerformAutomaticConnect', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The unified topology deprecates autoReconnect - return this.skip(); - } - - var client = configuration.newClient(configuration.writeConcernMax(), { - poolSize: 1, - auto_reconnect: true - }); - - client.connect(function(err, client) { - var automatic_connect_client = client.db(configuration.db); - - var closeListener = function() { - var collection = automatic_connect_client.collection('test_object_id_generation_data2'); - collection.insert({ name: 'Patty', age: 34 }, configuration.writeConcernMax(), function( - err, - r - ) { - test.equal(1, r.ops.length); - test.ok(r.ops[0]._id.toHexString().length === 24); - - collection.findOne({ name: 'Patty' }, function(err, document) { - test.equal(r.ops[0]._id.toHexString(), document._id.toHexString()); - client.close(done); - }); - }); - }; - - automatic_connect_client.once('close', closeListener); - automatic_connect_client.serverConfig.connections()[0].destroy(); - }); - } - }); - - /** - * @ignore - */ - it.skip('shouldCorrectlyPerformAutomaticConnectWithMaxBufferSize0', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The unified topology does not use a store - return this.skip(); - } - - var client = configuration.newClient( - { w: 1 }, - { poolSize: 1, auto_reconnect: true, bufferMaxEntries: 0 } - ); - - client.connect(function(err, client) { - var automatic_connect_client = client.db(configuration.db); - - var closeListener = function() { - var collection = automatic_connect_client.collection('test_object_id_generation_data2'); - collection.insert({ name: 'Patty', age: 34 }, configuration.writeConcernMax(), function( - err - ) { - test.ok(err != null); - test.ok(err.message.indexOf('0') !== -1); - client.close(done); - }); - }; - - automatic_connect_client.once('close', closeListener); - automatic_connect_client.serverConfig.connections()[0].destroy(); - }); - } - }); - /** * @ignore */ @@ -262,8 +177,6 @@ describe('Db', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var DBRef = configuration.require.DBRef; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -659,41 +572,4 @@ describe('Db', function() { }); } }); - - /** - * @ignore - */ - it('should correctly execute close function in order', { - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - mongodb: '>= 2.8.0', - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); - - client.connect(function(err, client) { - expect(err).to.not.exist; - - // run one command to ensure connections exist, otherwise `close` is near immediate - client.db('admin').command({ ping: 1 }, err => { - expect(err).to.not.exist; - - var items = []; - items.push(1); - client.close(function() { - expect(items).to.have.length(2); - done(); - }); - - items.push(2); - }); - }); - } - }); }); diff --git a/test/functional/decimal128.test.js b/test/functional/decimal128.test.js index 59ac5ccf7ab..bf3b241fa5c 100644 --- a/test/functional/decimal128.test.js +++ b/test/functional/decimal128.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { Decimal128 } = require('../..'); describe('Decimal128', function() { before(function() { @@ -23,8 +24,6 @@ describe('Decimal128', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Decimal128 = configuration.require.Decimal128; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); diff --git a/test/functional/domain.test.js b/test/functional/domain.test.js index 21330c66ce7..0f766a4fd5b 100644 --- a/test/functional/domain.test.js +++ b/test/functional/domain.test.js @@ -105,89 +105,4 @@ describe('Domains', function() { }); } }); - - /** - * @ignore - */ - it('shouldStayInCorrectDomainForQueuedReadCommand', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - var Domain = require('domain'); - var domainInstance = Domain.create(); - var configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The unified topology does not use a store - return this.skip(); - } - - var client = configuration.newClient( - { w: 0 }, - { poolSize: 1, auto_reconnect: true, domainsEnabled: true, bufferMaxEntries: 0 } - ); - - client.connect(function(err, client) { - var db = client.db(configuration.db); - var connection = client.topology.connections()[0]; - var collection = db.collection('test'); - connection.destroy(); - - domainInstance.run(function() { - collection.count({}, function(err) { - test.ok(err != null); - test.ok(process.domain === domainInstance); - domainInstance.exit(); - client.close(true, done); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('shouldStayInCorrectDomainForQueuedWriteCommand', { - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] - } - }, - - // The actual test we wish to run - test: function(done) { - var Domain = require('domain'); - var domainInstance = Domain.create(); - var configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The unified topology does not use a store - return this.skip(); - } - - var client = configuration.newClient( - { w: 1 }, - { poolSize: 1, auto_reconnect: true, domainsEnabled: true, bufferMaxEntries: 0 } - ); - - client.connect(function(err, client) { - test.ok(!err); - var db = client.db(configuration.db); - var connection = client.topology.connections()[0]; - var collection = db.collection('test'); - connection.destroy(); - - domainInstance.run(function() { - collection.insert({ field: 123 }, function(err) { - test.ok(err != null); - test.ok(process.domain === domainInstance); - domainInstance.exit(); - client.close(true, done); - }); - }); - }); - } - }); }); diff --git a/test/functional/find.test.js b/test/functional/find.test.js index 86ef96d9bb4..fcd64fc05ed 100644 --- a/test/functional/find.test.js +++ b/test/functional/find.test.js @@ -4,6 +4,7 @@ const setupDatabase = require('./shared').setupDatabase; const expect = require('chai').expect; const Buffer = require('safe-buffer').Buffer; const sinon = require('sinon'); +const { Code, ObjectID, Long, Binary } = require('../..'); describe('Find', function() { before(function() { @@ -534,8 +535,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -652,8 +651,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -692,7 +689,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -770,8 +766,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1325,8 +1319,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1374,8 +1366,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1409,9 +1399,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1462,7 +1449,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; var p_client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1, auto_reconnect: false @@ -1510,8 +1496,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1578,8 +1562,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1712,8 +1694,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1750,7 +1730,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; var p_client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1, auto_reconnect: false @@ -2184,7 +2163,6 @@ describe('Find', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Binary = configuration.require.Binary; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -3095,11 +3073,6 @@ describe('Find', function() { const config = this.configuration; const client = config.newClient({}, { monitorCommands: true, readPreference: 'secondary' }); - if (!config.usingUnifiedTopology()) { - this.skip(); - return; - } - client.connect((err, client) => { expect(err).to.not.exist; diff --git a/test/functional/gridfs.test.js b/test/functional/gridfs.test.js index 69ce6561470..d47d0587110 100644 --- a/test/functional/gridfs.test.js +++ b/test/functional/gridfs.test.js @@ -7,6 +7,7 @@ const test = require('./shared').assert, child_process = require('child_process'), expect = require('chai').expect, Buffer = require('safe-buffer').Buffer; +const { GridStore, ObjectID, Long, Chunk } = require('../..'); describe('GridFS', function() { before(function() { @@ -24,8 +25,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -58,7 +57,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -93,7 +91,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -127,8 +124,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -164,8 +159,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -210,8 +203,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -248,8 +239,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -291,8 +280,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -369,8 +356,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -449,8 +434,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -521,8 +504,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -565,8 +546,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -609,8 +588,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -651,8 +628,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -706,8 +681,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -775,8 +748,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -863,8 +834,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -903,8 +872,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -953,8 +920,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1002,8 +967,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1052,8 +1015,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1097,9 +1058,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1141,8 +1099,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1183,8 +1139,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var asset = { source: new ObjectID() }; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); @@ -1216,9 +1170,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1260,9 +1211,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1319,9 +1267,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1380,9 +1325,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1443,9 +1385,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1502,9 +1441,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1560,9 +1496,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1621,8 +1554,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1682,8 +1613,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1740,8 +1669,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1791,9 +1718,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1858,8 +1782,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1902,8 +1824,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1944,8 +1864,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -1985,9 +1903,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2054,9 +1969,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2122,8 +2034,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2205,8 +2115,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2257,9 +2165,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2318,8 +2223,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2377,8 +2280,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2426,8 +2327,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2479,8 +2378,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2629,8 +2526,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2686,7 +2581,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { @@ -2739,9 +2633,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - Chunk = configuration.require.Chunk; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2779,9 +2670,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - Chunk = configuration.require.Chunk; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2818,8 +2706,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2864,8 +2750,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2904,8 +2788,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -2942,8 +2824,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3000,8 +2880,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3067,8 +2945,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3123,8 +2999,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3162,8 +3036,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3191,8 +3063,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3244,8 +3114,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3285,9 +3153,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3329,8 +3194,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3394,9 +3257,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3447,8 +3307,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3505,8 +3363,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3531,8 +3387,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3559,9 +3413,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { expect(err).to.not.exist; @@ -3600,8 +3451,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - fs = require('fs'); // Use connect method to connect to the Server const client = configuration.newClient({}, { sslValidate: false }); @@ -3644,7 +3493,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; // Use connect method to connect to the Server const client = configuration.newClient({}, { sslValidate: false }); @@ -3713,8 +3561,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var id = new ObjectID(); const client = configuration.newClient({}, { sslValidate: false }); @@ -3795,8 +3641,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; // Create a test buffer var buffer = Buffer.alloc(200033); @@ -3836,8 +3680,6 @@ describe('GridFS', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var started = []; var succeeded = []; diff --git a/test/functional/gridfs_stream.test.js b/test/functional/gridfs_stream.test.js index 02caf0a4bcb..0aa083e9fd7 100644 --- a/test/functional/gridfs_stream.test.js +++ b/test/functional/gridfs_stream.test.js @@ -8,6 +8,7 @@ const crypto = require('crypto'), setupDatabase = require('./shared').setupDatabase, expect = require('chai').expect, Buffer = require('safe-buffer').Buffer; +const { GridFSBucket, ObjectId } = require('../..'); describe('GridFS Stream', function() { before(function() { @@ -27,8 +28,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, @@ -111,8 +110,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -196,8 +193,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -262,9 +257,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket, - ObjectId = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -304,8 +296,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -357,8 +347,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -417,8 +405,6 @@ describe('GridFS Stream', function() { test: function(done) { const configuration = this.configuration; - const GridFSBucket = configuration.require.GridFSBucket; - const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect((err, client) => { expect(err).to.not.exist; @@ -460,8 +446,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -520,8 +504,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -582,8 +564,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -646,8 +626,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -718,8 +696,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -770,8 +746,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -815,8 +789,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -876,8 +848,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -935,8 +905,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -981,8 +949,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -1024,8 +990,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1066,8 +1030,6 @@ describe('GridFS Stream', function() { } const configuration = this.configuration; - const GridFSBucket = configuration.require.GridFSBucket; - const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { const db = client.db(configuration.db); @@ -1105,8 +1067,6 @@ describe('GridFS Stream', function() { test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1162,8 +1122,6 @@ describe('GridFS Stream', function() { test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1347,7 +1305,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); @@ -1414,8 +1371,6 @@ describe('GridFS Stream', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridFSBucket = configuration.require.GridFSBucket; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); diff --git a/test/functional/ignore_undefined.test.js b/test/functional/ignore_undefined.test.js index 824b6b6694c..01d285a7aac 100644 --- a/test/functional/ignore_undefined.test.js +++ b/test/functional/ignore_undefined.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { ObjectId } = require('../..'); describe('Ignore Undefined', function() { before(function() { @@ -101,8 +102,6 @@ describe('Ignore Undefined', function() { test: function(done) { var configuration = this.configuration; - var ObjectId = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1, ignoreUndefined: true diff --git a/test/functional/index.test.js b/test/functional/index.test.js index 25f0d061404..c7aadacc7dc 100644 --- a/test/functional/index.test.js +++ b/test/functional/index.test.js @@ -1034,48 +1034,6 @@ describe('Indexes', function() { } }); - /** - * @ignore - */ - it('should correctly error out due to driver close', { - metadata: { - requires: { topology: ['single'] }, - sessions: { skipLeakTests: true } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The new topology type has loose concepts of 'closing' and 'opening' a client. It will - // simply attempt here to retry the connection and reconnect, so this is a bad test for - // the driver in that configuration. - - return this.skip(); - } - - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); - client.connect(function(err, client) { - var db = client.db(configuration.db); - client.close(function() { - setTimeout(() => { - db.createCollection('nonexisting', { w: 1 }, function(err) { - test.ok(err != null); - db.collection('nonexisting', { strict: true }, function(err) { - test.ok(err != null); - db.collection('nonexisting', { strict: false }, function(err) { - // When set to false (default) it should not create an error - test.ok(err === null); - setTimeout(() => done()); - }); - }); - }); - }); - }); - }); - } - }); - /** * @ignore */ diff --git a/test/functional/insert.test.js b/test/functional/insert.test.js index d560ea77f34..64c7ca69485 100644 --- a/test/functional/insert.test.js +++ b/test/functional/insert.test.js @@ -6,6 +6,19 @@ const test = require('./shared').assert, normalizedFunctionString = require('bson/lib/bson/parser/utils').normalizedFunctionString, Buffer = require('safe-buffer').Buffer; +const { + Long, + Timestamp, + ObjectID, + DBRef, + Symbol, + Double, + Binary, + MinKey, + MaxKey, + Code +} = require('../..'); + /** * Module for parsing an ISO 8601 formatted string into a Date object. * @ignore @@ -233,11 +246,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Binary = configuration.require.Binary, - Code = configuration.require.Code, - DBRef = configuration.require.DBRef; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -383,11 +391,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Binary = configuration.require.Binary, - Code = configuration.require.Code, - DBRef = configuration.require.DBRef; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -477,8 +480,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -547,9 +548,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Timestamp = configuration.require.Timestamp, - Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -620,10 +618,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var DBRef = configuration.require.DBRef, - ObjectID = configuration.require.ObjectID; - var dbref = new DBRef('foo', ObjectID.createFromHexString('fc24a04d4560531f00000000'), null); JSON.stringify(dbref); done(); @@ -716,8 +710,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Binary = configuration.require.Binary; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -818,9 +810,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var DBRef = configuration.require.DBRef, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -960,8 +949,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -993,8 +980,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var doc = { _id: new ObjectID('4e886e687ff7ef5e00000162'), str: 'foreign', @@ -1039,9 +1024,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Timestamp = configuration.require.Timestamp; - var doc = { _id: new ObjectID('4e886e687ff7ef5e00000162'), str: 'foreign', @@ -1085,8 +1067,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var doc = { _id: new ObjectID('4e886e687ff7ef5e00000162'), $key: 'foreign' @@ -1279,8 +1259,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var doc = { _id: new ObjectID(), Prop1: 'p1', @@ -1524,8 +1502,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Binary = configuration.require.Binary; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1721,14 +1697,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Symbol = configuration.require.Symbol, - Double = configuration.require.Double, - Binary = configuration.require.Binary, - MinKey = configuration.require.MinKey, - MaxKey = configuration.require.MaxKey, - Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1810,14 +1778,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID, - Symbol = configuration.require.Symbol, - Double = configuration.require.Double, - Binary = configuration.require.Binary, - MinKey = configuration.require.MinKey, - MaxKey = configuration.require.MaxKey, - Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -1896,8 +1856,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Timestamp = configuration.require.Timestamp; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -2072,8 +2030,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - var o = configuration.writeConcernMax(); o.promoteLongs = false; var client = configuration.newClient(configuration.writeConcernMax(), { @@ -2113,7 +2069,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; var o = configuration.writeConcernMax(); o.promoteLongs = false; @@ -2180,8 +2135,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -2220,8 +2173,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1, promoteLongs: false @@ -2260,8 +2211,6 @@ describe('Insert', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); diff --git a/test/functional/logger.test.js b/test/functional/logger.test.js index 0fca9bff443..01b0cf21968 100644 --- a/test/functional/logger.test.js +++ b/test/functional/logger.test.js @@ -1,6 +1,7 @@ 'use strict'; var expect = require('chai').expect; var connectToDb = require('./shared').connectToDb; +const Logger = require('../../lib/logger'); describe('Logger', function() { /** @@ -13,7 +14,6 @@ describe('Logger', function() { // The actual test we wish to run test: function(done) { var self = this; - var Logger = self.configuration.require.Logger; var client = self.configuration.newClient(self.configuration.writeConcernMax(), { poolSize: 1 }); @@ -59,8 +59,7 @@ describe('Logger', function() { // The actual test we wish to run test: function(done) { - var self = this, - Logger = self.configuration.require.Logger; + var self = this; // set a custom logger per http://mongodb.github.io/node-mongodb-native/2.0/tutorials/logging/ Logger.setCurrentLogger(function() {}); @@ -94,8 +93,7 @@ describe('Logger', function() { // The actual test we wish to run test: function(done) { - var self = this, - Logger = self.configuration.require.Logger; + var self = this; connectToDb('mongodb://localhost:27017/test', self.configuration.db, function( err, @@ -143,8 +141,7 @@ describe('Logger', function() { // The actual test we wish to run test: function(done) { - var self = this, - Logger = self.configuration.require.Logger; + var self = this; Logger.filter('class', ['Cursor']); var logged = false; diff --git a/test/functional/mapreduce.test.js b/test/functional/mapreduce.test.js index 402dc088aff..662c39a6739 100644 --- a/test/functional/mapreduce.test.js +++ b/test/functional/mapreduce.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { Code } = require('../..'); describe('MapReduce', function() { before(function() { @@ -218,8 +219,6 @@ describe('MapReduce', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -264,8 +263,6 @@ describe('MapReduce', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -316,8 +313,6 @@ describe('MapReduce', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); diff --git a/test/functional/max_staleness.test.js b/test/functional/max_staleness.test.js index 310ddd99c33..6e52e214c65 100644 --- a/test/functional/max_staleness.test.js +++ b/test/functional/max_staleness.test.js @@ -2,6 +2,7 @@ const Long = require('bson').Long, expect = require('chai').expect, mock = require('mongodb-mock-server'); +const { ReadPreference } = require('../..'); const test = {}; describe('Max Staleness', function() { @@ -84,10 +85,7 @@ describe('Max Staleness', function() { }, test: function(done) { - var self = this; const configuration = this.configuration; - const ReadPreference = self.configuration.require.ReadPreference; - const client = configuration.newClient(`mongodb://${test.server.uri()}/test`); client.connect(function(err, client) { expect(err).to.not.exist; @@ -126,8 +124,6 @@ describe('Max Staleness', function() { test: function(done) { var self = this; const configuration = this.configuration; - const ReadPreference = self.configuration.require.ReadPreference; - const client = configuration.newClient(`mongodb://${test.server.uri()}/test`); client.connect(function(err, client) { expect(err).to.not.exist; @@ -163,8 +159,6 @@ describe('Max Staleness', function() { test: function(done) { var self = this; const configuration = this.configuration; - const ReadPreference = self.configuration.require.ReadPreference; - const client = configuration.newClient(`mongodb://${test.server.uri()}/test`); client.connect(function(err, client) { expect(err).to.not.exist; diff --git a/test/functional/mongo_client.test.js b/test/functional/mongo_client.test.js index c46c81aea9d..c8fc32e7984 100644 --- a/test/functional/mongo_client.test.js +++ b/test/functional/mongo_client.test.js @@ -11,99 +11,6 @@ describe('MongoClient', function() { return setupDatabase(this.configuration); }); - it('Should Correctly Do MongoClient with bufferMaxEntries:0 and ordered execution', { - metadata: { - requires: { - topology: ['single', 'ssl', 'wiredtiger'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // the new topology is far more resilient in these scenarios, making very difficult - // to reproduce the issues tested here. - return this.skip(); - } - - const client = configuration.newClient({}, { bufferMaxEntries: 0, sslValidate: false }); - - client.connect(function(err, client) { - var db = client.db(configuration.db); - // Listener for closing event - var closeListener = function() { - // Let's insert a document - var collection = db.collection('test_object_id_generation.data2'); - // Insert another test document and collect using ObjectId - var docs = []; - for (var i = 0; i < 1500; i++) docs.push({ a: i }); - - collection.insert(docs, configuration.writeConcern(), function(err) { - test.ok(err != null); - test.ok(err.message.indexOf('0') !== -1); - - // Let's close the db - client.close(done); - }); - }; - - // Add listener to close event - db.once('close', closeListener); - // Ensure death of server instance - client.topology.connections()[0].destroy(); - }); - } - }); - - it('Should Correctly Do MongoClient with bufferMaxEntries:0 and unordered execution', { - metadata: { - requires: { - topology: ['single', 'ssl', 'wiredtiger'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // the new topology is far more resilient in these scenarios, making very difficult - // to reproduce the issues tested here. - return this.skip(); - } - - const client = configuration.newClient({}, { bufferMaxEntries: 0, sslValidate: false }); - - client.connect(function(err, client) { - var db = client.db(configuration.db); - // Listener for closing event - var closeListener = function() { - // Let's insert a document - var collection = db.collection('test_object_id_generation.data_3'); - // Insert another test document and collect using ObjectId - var docs = []; - for (var i = 0; i < 1500; i++) docs.push({ a: i }); - - var opts = configuration.writeConcern(); - opts.keepGoing = true; - // Execute insert - collection.insert(docs, opts, function(err) { - test.ok(err != null); - test.ok(err.message.indexOf('0') !== -1); - - // Let's close the db - client.close(done); - }); - }; - - // Add listener to close event - db.once('close', closeListener); - // Ensure death of server instance - client.topology.connections()[0].destroy(); - }); - } - }); - it('Should correctly pass through extra db options', { metadata: { requires: { @@ -159,267 +66,6 @@ describe('MongoClient', function() { } }); - it('Should correctly pass through extra server options', { - metadata: { - requires: { - topology: ['single'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient( - {}, - { - poolSize: 10, - autoReconnect: false, - noDelay: false, - keepAlive: true, - keepAliveInitialDelay: 100, - connectTimeoutMS: 444444, - socketTimeoutMS: 555555 - } - ); - - client.connect(function(err, client) { - var db = client.db(configuration.db); - test.equal(10, db.s.topology.s.poolSize); - test.equal(false, db.s.topology.autoReconnect); - test.equal(444444, db.s.topology.s.clonedOptions.connectionTimeout); - test.equal(555555, db.s.topology.s.clonedOptions.socketTimeout); - test.equal(true, db.s.topology.s.clonedOptions.keepAlive); - test.equal(100, db.s.topology.s.clonedOptions.keepAliveInitialDelay); - - client.close(done); - }); - } - }); - - it.skip('Should correctly pass through extra replicaset options', { - metadata: { - requires: { - topology: ['replicaset'] - } - }, - - // The actual test we wish to run - test: function(done) { - // NOTE: skipped because this test is using explicit variable names not used by - // mongo-orchestration. This behavior should be unit tested without depending - // on the test harness used. - - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - var url = configuration.url().replace('rs_name=rs', 'rs_name=rs1'); - const client = configuration.newClient(url, { - replSet: { - ha: false, - haInterval: 10000, - replicaSet: 'rs', - secondaryAcceptableLatencyMS: 100, - connectWithNoPrimary: true, - poolSize: 1, - socketOptions: { - noDelay: false, - keepAlive: true, - keepAliveInitialDelay: 100, - connectTimeoutMS: 444444, - socketTimeoutMS: 555555 - } - } - }); - - client.connect(function(err, client) { - expect(err).to.not.exist; - var db = client.db(configuration.db); - - test.equal(false, db.s.topology.s.clonedOptions.ha); - test.equal(10000, db.s.topology.s.clonedOptions.haInterval); - test.equal('rs', db.s.topology.s.clonedOptions.setName); - test.equal(100, db.s.topology.s.clonedOptions.acceptableLatency); - test.equal(true, db.s.topology.s.clonedOptions.secondaryOnlyConnectionAllowed); - test.equal(1, db.s.topology.s.clonedOptions.size); - - test.equal(444444, db.s.topology.s.clonedOptions.connectionTimeout); - test.equal(555555, db.s.topology.s.clonedOptions.socketTimeout); - test.equal(true, db.s.topology.s.clonedOptions.keepAlive); - test.equal(100, db.s.topology.s.clonedOptions.keepAliveInitialDelay); - - client.close(done); - }); - } - }); - - it('Should correctly pass through extra sharded options', { - metadata: { - requires: { - topology: ['sharded'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient( - {}, - { - ha: false, - haInterval: 10000, - acceptableLatencyMS: 100, - poolSize: 1, - socketOptions: { - noDelay: false, - keepAlive: true, - keepAliveInitialDelay: 100, - connectTimeoutMS: 444444, - socketTimeoutMS: 555555 - } - } - ); - - client.connect(function(err, client) { - expect(err).to.not.exist; - var db = client.db(configuration.db); - - test.equal(false, db.s.topology.s.clonedOptions.ha); - test.equal(10000, db.s.topology.s.clonedOptions.haInterval); - test.equal(100, db.s.topology.s.clonedOptions.localThresholdMS); - test.equal(1, db.s.topology.s.clonedOptions.poolSize); - - test.equal(444444, db.s.topology.s.clonedOptions.connectionTimeout); - test.equal(555555, db.s.topology.s.clonedOptions.socketTimeout); - test.equal(true, db.s.topology.s.clonedOptions.keepAlive); - test.equal(100, db.s.topology.s.clonedOptions.keepAliveInitialDelay); - - client.close(done); - }); - } - }); - - it('Should correctly set MaxPoolSize on single server', { - metadata: { - requires: { - topology: ['single'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - var url = configuration.url(); - url = - url.indexOf('?') !== -1 - ? f('%s&%s', url, 'maxPoolSize=100') - : f('%s?%s', url, 'maxPoolSize=100'); - - const client = configuration.newClient(url); - client.connect(function(err, client) { - test.equal(1, client.topology.connections().length); - test.equal(100, client.topology.s.coreTopology.s.pool.size); - - client.close(done); - }); - } - }); - - it('Should correctly set MaxPoolSize on replicaset server', { - metadata: { - requires: { - topology: ['replicaset'], - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var url = configuration.url(); - url = - url.indexOf('?') !== -1 - ? f('%s&%s', url, 'maxPoolSize=100') - : f('%s?%s', url, 'maxPoolSize=100'); - - const client = configuration.newClient(url); - client.connect(function(err, client) { - test.ok(client.topology.connections().length >= 1); - - var connections = client.topology.connections(); - - for (var i = 0; i < connections.length; i++) { - test.equal(10000, connections[i].connectionTimeout); - test.equal(360000, connections[i].socketTimeout); - } - - client.close(); - - const secondClient = configuration.newClient(url, { - connectTimeoutMS: 15000, - socketTimeoutMS: 30000 - }); - - secondClient.connect(function(err) { - test.equal(null, err); - test.ok(secondClient.topology.connections().length >= 1); - - var connections = secondClient.topology.connections(); - - for (var i = 0; i < connections.length; i++) { - test.equal(15000, connections[i].connectionTimeout); - test.equal(30000, connections[i].socketTimeout); - } - - secondClient.close(done); - }); - }); - } - }); - - it('Should correctly set MaxPoolSize on sharded server', { - metadata: { - requires: { - topology: ['sharded'], - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - var url = configuration.url(); - url = - url.indexOf('?') !== -1 - ? f('%s&%s', url, 'maxPoolSize=100') - : f('%s?%s', url, 'maxPoolSize=100'); - - const client = configuration.newClient(url); - client.connect(function(err, client) { - test.ok(client.topology.connections().length >= 1); - - client.close(done); - }); - } - }); - /** * @ignore */ @@ -499,76 +145,6 @@ describe('MongoClient', function() { } }); - /** - * @ignore - */ - it('correctly connect setting keepAlive to 100', { - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - const client = configuration.newClient( - {}, - { - keepAlive: true, - keepAliveInitialDelay: 100 - } - ); - - client.connect(function(err, client) { - test.equal(null, err); - var connection = client.topology.connections()[0]; - test.equal(true, connection.keepAlive); - test.equal(100, connection.keepAliveInitialDelay); - - client.close(); - - const secondClient = configuration.newClient({}, { keepAlive: false }); - secondClient.connect(function(err) { - test.equal(null, err); - - secondClient.topology.connections().forEach(function(x) { - test.equal(false, x.keepAlive); - }); - - secondClient.close(done); - }); - }); - } - }); - - /** - * @ignore - */ - it('default keepAlive behavior', { - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], - unifiedTopology: false - } - }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration; - const client = configuration.newClient(); - client.connect(function(err, client) { - test.equal(null, err); - client.topology.connections().forEach(function(x) { - test.equal(true, x.keepAlive); - }); - - client.close(done); - }); - } - }); - it('should fail dure to garbage connection string', { metadata: { requires: { @@ -590,38 +166,6 @@ describe('MongoClient', function() { } }); - it.skip('Should fail to connect due to instances not being mongos proxies', { - metadata: { - requires: { - topology: ['replicaset'] - } - }, - - // The actual test we wish to run - test: function(done) { - // NOTE: skipped because this test is using explicit variable names not used by - // mongo-orchestration. This behavior should be unit tested without depending - // on the test harness used. - - var configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // this is no longer relevant with the unified topology - return this.skip(); - } - - var url = configuration - .url() - .replace('replicaSet=rs', '') - .replace('localhost:31000', 'localhost:31000,localhost:31001'); - - const client = configuration.newClient(url); - client.connect(function(err) { - test.ok(err != null); - done(); - }); - } - }); - it('Should correctly pass through appname', { metadata: { requires: { @@ -706,33 +250,6 @@ describe('MongoClient', function() { } }); - it('Should correctly pass through socketTimeoutMS and connectTimeoutMS from uri', { - metadata: { - requires: { - topology: ['single'] - } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - var uri = f('%s?socketTimeoutMS=120000&connectTimeoutMS=15000', configuration.url()); - const client = configuration.newClient(uri); - client.connect(function(err, client) { - test.equal(null, err); - test.equal(120000, client.topology.s.coreTopology.s.options.socketTimeout); - test.equal(15000, client.topology.s.coreTopology.s.options.connectionTimeout); - - client.close(done); - }); - } - }); - ////////////////////////////////////////////////////////////////////////////////////////// // // new MongoClient connection tests @@ -790,28 +307,6 @@ describe('MongoClient', function() { } }); - it('Should use compression from URI', { - metadata: { requires: { topology: ['single'], unifiedTopology: false } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - const url = `mongodb://${configuration.host}:${configuration.port}/?compressors=zlib`; - const client = configuration.newClient(url, { useNewUrlParser: true }); - - client.connect(function(err, client) { - expect(err).to.not.exist; - - const db = client.db('integration_tests'); - db.collection('new_mongo_client_collection').insertOne({ a: 1 }, (err, r) => { - expect(err).to.not.exist; - expect(r.connection.options.compression).to.deep.equal({ compressors: ['zlib'] }); - client.close(done); - }); - }); - } - }); - it('should be able to access a database named "constructor"', function() { const client = this.configuration.newClient(); let err; diff --git a/test/functional/mongo_client_options.test.js b/test/functional/mongo_client_options.test.js index efdf2c6c407..cd0c7984793 100644 --- a/test/functional/mongo_client_options.test.js +++ b/test/functional/mongo_client_options.test.js @@ -2,72 +2,13 @@ const test = require('./shared').assert, setupDatabase = require('./shared').setupDatabase, expect = require('chai').expect; +const { connect } = require('../..'); describe('MongoClient Options', function() { before(function() { return setupDatabase(this.configuration); }); - /** - * @ignore - */ - it('pass in server and db top level options', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient(configuration.url(), { - autoReconnect: true, - poolSize: 4 - }); - - client.connect( - connectionTester(configuration, 'testConnectServerOptions', function(client) { - test.ok(client.topology.poolSize >= 1); - test.equal(4, client.topology.s.coreTopology.s.pool.size); - test.equal(true, client.topology.autoReconnect); - client.close(done); - }) - ); - } - }); - - /** - * @ignore - */ - it('pass in server and db top level options', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient(configuration.url(), { - autoReconnect: true, - poolSize: 4 - }); - - client.connect( - connectionTester(configuration, 'testConnectServerOptions', function(client) { - test.ok(client.topology.poolSize >= 1); - test.equal(4, client.topology.s.coreTopology.s.pool.size); - test.equal(true, client.topology.autoReconnect); - client.close(done); - }) - ); - } - }); - /** * @ignore */ @@ -77,8 +18,6 @@ describe('MongoClient Options', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var connect = configuration.require; - connect( configuration.url(), { @@ -95,27 +34,4 @@ describe('MongoClient Options', function() { ); } }); - - /** - * @ignore - */ - function connectionTester(configuration, testName, callback) { - return function(err, client) { - test.equal(err, null); - var db = client.db(configuration.db); - - db.collection(testName, function(err, collection) { - test.equal(err, null); - - collection.insert({ foo: 123 }, { w: 1 }, function(err) { - test.equal(err, null); - db.dropDatabase(function(err, dropped) { - test.equal(err, null); - test.ok(dropped); - if (callback) return callback(client); - }); - }); - }); - }; - } }); diff --git a/test/functional/mongodb_aws.test.js b/test/functional/mongodb_aws.test.js new file mode 100644 index 00000000000..ce0b025a865 --- /dev/null +++ b/test/functional/mongodb_aws.test.js @@ -0,0 +1,43 @@ +'use strict'; +const expect = require('chai').expect; + +describe('MONGODB-AWS', function() { + before(function() { + const MONGODB_URI = process.env.MONGODB_URI; + if (!MONGODB_URI || MONGODB_URI.indexOf('MONGODB-AWS') === -1) { + this.skip(); + } + }); + + it('should not authorize when not authenticated', function(done) { + const config = this.configuration; + const client = config.newClient(config.url()); // strip provided URI of credentials + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db('aws').command({ count: 'test' }, (err, count) => { + expect(err).to.exist; + expect(count).to.not.exist; + + done(); + }); + }); + }); + + it('should authorize when successfully authenticated', function(done) { + const config = this.configuration; + const client = config.newClient(); // use the URI built by the test environment + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db('aws').command({ count: 'test' }, (err, count) => { + expect(err).to.not.exist; + expect(count).to.exist; + + done(); + }); + }); + }); +}); diff --git a/test/functional/mongodb_srv.test.js b/test/functional/mongodb_srv.test.js deleted file mode 100644 index 2b9310818a7..00000000000 --- a/test/functional/mongodb_srv.test.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict'; - -var fs = require('fs'); -var path = require('path'); - -var parse = require('../../lib/url_parser'); -var expect = require('chai').expect; - -function getTests() { - return fs - .readdirSync(path.resolve(__dirname, '../spec/dns-txt-records')) - .filter(x => x.indexOf('json') !== -1) - .map(x => [x, fs.readFileSync(path.resolve(__dirname, '../spec/dns-txt-records', x), 'utf8')]) - .map(x => [path.basename(x[0], '.json'), JSON.parse(x[1])]); -} - -describe('mongodb+srv (spec)', function() { - it('should parse a default database', function(done) { - parse('mongodb+srv://test5.test.build.10gen.cc/somedb', (err, result) => { - expect(err).to.not.exist; - expect(result.dbName).to.eql('somedb'); - done(); - }); - }); - - getTests().forEach(function(test) { - if (!test[1].comment) test[1].comment = test[0]; - - it(test[1].comment, { - metadata: { - requires: { topology: ['single'] } - }, - test: function(done) { - parse(test[1].uri, function(err, object) { - if (test[1].error) { - expect(err).to.exist; - expect(object).to.not.exist; - } else { - expect(err).to.be.null; - expect(object).to.exist; - if (test[1].options && test[1].options.replicaSet) { - expect(object.rs_options.rs_name).to.equal(test[1].options.replicaSet); - } - if (test[1].options && test[1].options.ssl) { - expect(object.server_options.ssl).to.equal(test[1].options.ssl); - } - if ( - test[1].parsed_options && - test[1].parsed_options.user && - test[1].parsed_options.password - ) { - expect(object.auth.user).to.equal(test[1].parsed_options.user); - expect(object.auth.password).to.equal(test[1].parsed_options.password); - } - } - done(); - }); - } - }); - }); -}); diff --git a/test/functional/object_id.test.js b/test/functional/object_id.test.js index 93b6aaa9ecd..5d5557e8868 100644 --- a/test/functional/object_id.test.js +++ b/test/functional/object_id.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { ObjectID } = require('../..'); describe('ObjectID', function() { before(function() { @@ -18,7 +19,6 @@ describe('ObjectID', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -82,8 +82,6 @@ describe('ObjectID', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // Create a new ObjectID var objectId = new ObjectID(); // Verify that the hex string is 24 characters long @@ -102,8 +100,6 @@ describe('ObjectID', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // Create a new ObjectID var objectId = new ObjectID(); // Verify that the hex string is 24 characters long @@ -158,9 +154,6 @@ describe('ObjectID', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var timestamp = Math.floor(new Date().getTime() / 1000); var objectID = new ObjectID(timestamp); var time2 = objectID.generationTime; @@ -179,9 +172,6 @@ describe('ObjectID', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; - var timestamp = 1000; var objectID = new ObjectID(); var id1 = objectID.id; diff --git a/test/functional/operation_example.test.js b/test/functional/operation_example.test.js index 45b89d3b5b2..df1636f0544 100644 --- a/test/functional/operation_example.test.js +++ b/test/functional/operation_example.test.js @@ -1,8 +1,10 @@ 'use strict'; -const test = require('./shared').assert; -const setupDatabase = require('./shared').setupDatabase; -const f = require('util').format; -const Buffer = require('safe-buffer').Buffer; +const { assert: test } = require('./shared'); +const { setupDatabase } = require('./shared'); +const { format: f } = require('util'); +const { Buffer } = require('safe-buffer'); +const { Topology } = require('../../lib/sdam/topology'); +const { Code, ObjectID, GridStore } = require('../..'); const chai = require('chai'); const expect = chai.expect; @@ -1975,7 +1977,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -2075,7 +2076,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Code = configuration.require.Code; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -3599,68 +3599,6 @@ describe('Operation Examples', function() { } }); - /** - * An example that shows how to force close a db connection so it cannot be reused. - * - * @example-class Db - * @example-method close - * @ignore - */ - it('shouldCorrectlyFailOnRetryDueToAppCloseOfDb', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The new topology type has loose concepts of 'closing' and 'opening' a client. It will - // simply attempt here to retry the connection and reconnect, so this is a bad test for - // the driver in that configuration. - - return this.skip(); - } - - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); - client.connect(function(err, client) { - // LINE var MongoClient = require('mongodb').MongoClient, - // LINE test = require('assert'); - // LINE const client = new MongoClient('mongodb://localhost:27017/test'); - // LINE client.connect(function(err, client) { - // LINE var db = client.db('test); - // REPLACE configuration.writeConcernMax() WITH {w:1} - // REMOVE-LINE restartAndDone - // REMOVE-LINE done(); - // REMOVE-LINE var db = client.db(configuration.db); - // BEGIN - var db = client.db(configuration.db); - // Fetch a collection - var collection = db.collection('shouldCorrectlyFailOnRetryDueToAppCloseOfDb'); - - // Insert a document - collection.insertOne({ a: 1 }, configuration.writeConcernMax(), function(err, result) { - test.ok(result); - test.equal(null, err); - - // Force close the connection - client.close(true, function(err) { - test.equal(null, err); - - // Attemp to insert should fail now with correct message 'db closed by application' - collection.insertOne({ a: 2 }, configuration.writeConcernMax(), function(err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - client.close(done); - }); - }); - }); - }); - // END - } - }); - /** * An example of a simple single server db connection * @@ -4663,23 +4601,19 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ReplSet = configuration.require.ReplSet, - MongoClient = configuration.require.MongoClient, - Server = configuration.require.Server; // Replica configuration - var replSet = new ReplSet( + var client = new Topology( [ - new Server(configuration.host, configuration.port), - new Server(configuration.host, configuration.port + 1), - new Server(configuration.host, configuration.port + 2) + { host: configuration.host, port: configuration.port }, + { host: configuration.host, port: configuration.port + 1 }, + { host: configuration.host, port: configuration.port + 2 } ], - { rs_name: configuration.replicasetName } + { replicaSet: configuration.replicasetName } ); - var client = new MongoClient(replSet, { w: 0 }); client.connect(function(err, client) { - test.equal(null, err); + expect(err).to.not.exist; // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); // LINE const client = new MongoClient('mongodb://localhost:27017/test'); @@ -6468,8 +6402,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6504,8 +6436,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6534,8 +6464,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6573,8 +6501,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6609,8 +6535,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6645,8 +6569,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var ObjectID = configuration.require.ObjectID; // LINE var ObjectID = require('mongodb').ObjectID, // LINE test = require('assert'); // REPLACE configuration.writeConcernMax() WITH {w:1} @@ -6680,9 +6602,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -6751,9 +6670,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -6888,8 +6804,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -6940,8 +6854,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7028,8 +6940,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7097,8 +7007,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7156,9 +7064,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7219,9 +7124,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7282,9 +7184,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7352,9 +7251,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7415,9 +7311,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7470,9 +7363,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7522,9 +7412,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7592,9 +7479,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7644,9 +7528,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7710,9 +7591,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7780,8 +7658,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7835,8 +7711,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7908,8 +7782,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -7961,7 +7833,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8032,7 +7903,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8154,9 +8024,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8232,8 +8099,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8285,8 +8150,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8349,8 +8212,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8404,9 +8265,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8484,9 +8342,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8556,9 +8411,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, @@ -8644,9 +8496,6 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { // LINE var MongoClient = require('mongodb').MongoClient, diff --git a/test/functional/operation_generators_example.test.js b/test/functional/operation_generators_example.test.js index 2b4fbf8aae5..315a4ba5c35 100644 --- a/test/functional/operation_generators_example.test.js +++ b/test/functional/operation_generators_example.test.js @@ -2,6 +2,7 @@ var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; var Buffer = require('safe-buffer').Buffer; +const { Code, GridStore, ObjectID } = require('../..'); /************************************************************************** * @@ -1294,8 +1295,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - Code = configuration.require.Code; + var co = require('co'); return co(function*() { // Connect @@ -1388,8 +1388,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - Code = configuration.require.Code; + var co = require('co'); return co(function*() { // Connect @@ -2791,68 +2790,6 @@ describe('Operation (Generators)', function() { * *************************************************************************/ - /** - * An example that shows how to force close a db connection so it cannot be reused using a Generator and the co module.. - * - * @example-class Db - * @example-method close - * @ignore - */ - it('shouldCorrectlyFailOnRetryDueToAppCloseOfDbWithGenerators', { - metadata: { requires: { generators: true, topology: ['single'] } }, - - // The actual test we wish to run - test: function() { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The new topology type has loose concepts of 'closing' and 'opening' a client. It will - // simply attempt here to retry the connection and reconnect, so this is a bad test for - // the driver in that configuration. - - return this.skip(); - } - - var co = require('co'); - return co(function*() { - // Connect - var client = yield configuration - .newClient(configuration.writeConcernMax(), { poolSize: 1 }) - .connect(); - var db = client.db(configuration.db); - // LINE var MongoClient = require('mongodb').MongoClient, - // LINE co = require('co'); - // LINE test = require('assert'); - // LINE - // LINE co(function*() { - // LINE const client = new MongoClient('mongodb://localhost:27017/test'); - // LINE yield client.connect(); - // LINE - // LINE var db = client.db('test'); - // REPLACE configuration.writeConcernMax() WITH {w:1} - // BEGIN - - // Fetch a collection - var collection = db.collection( - 'shouldCorrectlyFailOnRetryDueToAppCloseOfDb_with_generators' - ); - - // Insert a document - yield collection.insertOne({ a: 1 }, configuration.writeConcernMax()); - - // Force close the connection - yield client.close(true); - - try { - // Attemp to insert should fail now with correct message 'db closed by application' - yield collection.insertOne({ a: 2 }, configuration.writeConcernMax()); - } catch (err) { - yield client.close(); - } - }); - // END - } - }); - /** * An example of retrieving the collections list for a database using a Generator and the co module. * @@ -4468,9 +4405,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -4530,9 +4465,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -4648,8 +4581,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -4701,8 +4633,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -4776,8 +4707,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -4837,8 +4767,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -4892,9 +4821,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -4951,9 +4878,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5013,9 +4938,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5082,9 +5005,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5144,9 +5065,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5199,9 +5118,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5264,9 +5181,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5329,9 +5244,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5398,8 +5311,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -5452,8 +5364,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -5556,9 +5467,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect @@ -5627,8 +5536,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -5687,8 +5595,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore; + var co = require('co'); return co(function*() { // Connect @@ -5741,9 +5648,7 @@ describe('Operation (Generators)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var co = require('co'), - GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; + var co = require('co'); return co(function*() { // Connect diff --git a/test/functional/operation_promises_example.test.js b/test/functional/operation_promises_example.test.js index a3b8a7625fe..5099ad3ed75 100644 --- a/test/functional/operation_promises_example.test.js +++ b/test/functional/operation_promises_example.test.js @@ -4,6 +4,7 @@ var f = require('util').format; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; var Buffer = require('safe-buffer').Buffer; +const { Code, GridStore, ObjectID } = require('../..'); var delay = function(ms) { return new Promise(function(resolve) { @@ -1325,7 +1326,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var Code = configuration.require.Code; var client = configuration.newClient({ w: 0 }, { poolSize: 1 }); return client.connect().then(function(client) { @@ -1421,7 +1421,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var Code = configuration.require.Code; var client = configuration.newClient({ w: 0 }, { poolSize: 1 }); return client.connect().then(function(client) { @@ -2871,67 +2870,6 @@ describe('Operation (Promises)', function() { * *************************************************************************/ - /** - * An example that shows how to force close a db connection so it cannot be reused using a Promise.. - * - * @example-class Db - * @example-method close - * @ignore - */ - it('shouldCorrectlyFailOnRetryDueToAppCloseOfDbWithPromises', { - metadata: { requires: { topology: ['single'] } }, - - // The actual test we wish to run - test: function() { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // The new topology type has loose concepts of 'closing' and 'opening' a client. It will - // simply attempt here to retry the connection and reconnect, so this is a bad test for - // the driver in that configuration. - - return this.skip(); - } - - var client = configuration.newClient(configuration.writeConcernMax(), { - poolSize: 1, - auto_reconnect: false - }); - - return client.connect().then(function(client) { - var db = client.db(configuration.db); - // LINE var MongoClient = require('mongodb').MongoClient, - // LINE test = require('assert'); - // LINE const client = new MongoClient('mongodb://localhost:27017/test'); - // LINE client.connect().then(() => { - // LINE var db = client.db('test); - // REPLACE configuration.writeConcernMax() WITH {w:1} - // REMOVE-LINE done(); - // BEGIN - - // Fetch a collection - var collection = db.collection('shouldCorrectlyFailOnRetryDueToAppCloseOfDb_with_promise'); - - // Insert a document - return collection - .insertOne({ a: 1 }, configuration.writeConcernMax()) - .then(function(result) { - test.ok(result); - // Force close the connection - return client.close(true); - }) - .then(function() { - // Attemp to insert should fail now with correct message 'db closed by application' - return collection.insertOne({ a: 2 }, configuration.writeConcernMax()); - }) - .catch(function(err) { - test.ok(err); - return client.close(); - }); - }); - // END - } - }); - /** * An example of a simple single server db connection and close function using a Promise. * @@ -3834,50 +3772,6 @@ describe('Operation (Promises)', function() { } }); - /** - * Simple replicaset connection setup, requires a running replicaset on the correct ports using a Promise. - * - * @example-class Db - * @example-method open - * @ignore - */ - it('Should correctly connect with default replicasetNoOption With Promises', { - metadata: { requires: { topology: 'replicaset' } }, - - // The actual test we wish to run - test: function() { - var configuration = this.configuration; - var ReplSet = configuration.require.ReplSet, - MongoClient = configuration.require.MongoClient, - Server = configuration.require.Server; - - // Replica configuration - var replSet = new ReplSet( - [ - new Server(configuration.host, configuration.port), - new Server(configuration.host, configuration.port + 1), - new Server(configuration.host, configuration.port + 2) - ], - { rs_name: configuration.replicasetName } - ); - - var client = new MongoClient(replSet, { w: 0 }); - return client.connect().then(function() { - // LINE var MongoClient = require('mongodb').MongoClient, - // LINE test = require('assert'); - // LINE const client = new MongoClient('mongodb://localhost:27017/test'); - // LINE client.connect().then(() => { - // LINE var db = client.db('test); - // REPLACE configuration.writeConcernMax() WITH {w:1} - // REMOVE-LINE restartAndDone - // REMOVE-LINE done(); - // BEGIN - return client.close(); - }); - // END - } - }); - /************************************************************************** * * ADMIN TESTS @@ -4895,8 +4789,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { @@ -4962,8 +4854,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { @@ -5094,8 +4984,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5148,8 +5036,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5236,8 +5122,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5306,8 +5190,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5362,8 +5244,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5421,8 +5301,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5482,8 +5360,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5550,8 +5426,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5613,8 +5487,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5667,8 +5539,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5737,8 +5607,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5804,8 +5672,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5877,8 +5743,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -5934,8 +5798,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect().then(function(client) { var db = client.db(configuration.db); @@ -6059,9 +5921,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -6141,8 +6000,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -6209,7 +6066,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { var db = client.db(configuration.db); @@ -6268,8 +6124,6 @@ describe('Operation (Promises)', function() { // The actual test we wish to run test: function() { var configuration = this.configuration; - var GridStore = configuration.require.GridStore, - ObjectID = configuration.require.ObjectID; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); return client.connect().then(function(client) { @@ -7126,7 +6980,7 @@ describe('Operation (Promises)', function() { return client.withSession(session => { function commit() { return session.commitTransaction().catch(e => { - if (e.errorLabels && e.errorLabels.indexOf('UnknownTransactionCommitResult') < 0) { + if (e.hasErrorLabel('UnknownTransactionCommitResult')) { // LINE console.log('Transaction aborted. Caught exception during transaction.'); return commit(); } @@ -7184,7 +7038,7 @@ describe('Operation (Promises)', function() { // LINE console.log('Transaction aborted. Caught exception during transaction.'); // If transient error, retry the whole transaction - if (error.errorLabels && error.errorLabels.indexOf('TransientTransactionError') < 0) { + if (error.hasErrorLabel('TransientTransactionError')) { // LINE console.log('TransientTransactionError, retrying transaction ...'); return runTransactionWithRetry(txnFunc, client, session); } @@ -7246,10 +7100,7 @@ describe('Operation (Promises)', function() { .commitTransaction() // LINE .then(() => console.log('Transaction committed.')) .catch(error => { - if ( - error.errorLabels && - error.errorLabels.indexOf('UnknownTransactionCommitResult') < 0 - ) { + if (error.hasErrorLabel('UnknownTransactionCommitResult')) { // LINE console.log('UnknownTransactionCommitResult, retrying commit operation ...'); return commitWithRetry(session); } @@ -7310,10 +7161,7 @@ describe('Operation (Promises)', function() { .commitTransaction() // LINE .then(() => console.log('Transaction committed.')) .catch(error => { - if ( - error.errorLabels && - error.errorLabels.indexOf('UnknownTransactionCommitResult') < 0 - ) { + if (error.hasErrorLabel('UnknownTransactionCommitResult')) { // LINE console.log('UnknownTransactionCommitResult, retrying commit operation ...'); return commitWithRetry(session); } @@ -7328,7 +7176,7 @@ describe('Operation (Promises)', function() { // LINE console.log('Transaction aborted. Caught exception during transaction.'); // If transient error, retry the whole transaction - if (error.errorLabels && error.errorLabels.indexOf('TransientTransactionError') < 0) { + if (error.hasErrorLabel('TransientTransactionError')) { // LINE console.log('TransientTransactionError, retrying transaction ...'); return runTransactionWithRetry(txnFunc, client, session); } diff --git a/test/functional/promote_values.test.js b/test/functional/promote_values.test.js index e0780f685b1..3d626f186c4 100644 --- a/test/functional/promote_values.test.js +++ b/test/functional/promote_values.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { Long, Int32, Double } = require('../..'); describe('Promote Values', function() { before(function() { @@ -17,10 +18,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long, - Int32 = configuration.require.Int32, - Double = configuration.require.Double; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1, promoteValues: false @@ -64,10 +61,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long, - Int32 = configuration.require.Int32, - Double = configuration.require.Double; - const client = configuration.newClient({}, { promoteValues: false }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -106,10 +99,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long, - Int32 = configuration.require.Int32, - Double = configuration.require.Double; - const client = configuration.newClient({}, { promoteValues: false }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -150,10 +139,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long, - Int32 = configuration.require.Int32, - Double = configuration.require.Double; - const client = configuration.newClient(); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -194,10 +179,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long, - Int32 = configuration.require.Int32, - Double = configuration.require.Double; - const client = configuration.newClient(); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -238,8 +219,6 @@ describe('Promote Values', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var Long = configuration.require.Long; - const client = configuration.newClient(); client.connect(function(err, client) { var docs = new Array(150).fill(0).map(function(_, i) { diff --git a/test/functional/raw.test.js b/test/functional/raw.test.js index 2cc41040255..91db679f0f8 100644 --- a/test/functional/raw.test.js +++ b/test/functional/raw.test.js @@ -1,10 +1,9 @@ 'use strict'; -var test = require('./shared').assert; -var setupDatabase = require('./shared').setupDatabase; -var Buffer = require('buffer').Buffer; +const { assert: test, setupDatabase } = require('./shared'); +const { Buffer } = require('buffer'); -var BSON = require('../../lib/core').BSON; -var bson = new BSON([ +const BSON = require('../../lib/utils').retrieveBSON(); +const bson = new BSON([ BSON.Binary, BSON.Code, BSON.DBRef, diff --git a/test/functional/readconcern.test.js b/test/functional/readconcern.test.js index 75e38898447..65f202b75da 100644 --- a/test/functional/readconcern.test.js +++ b/test/functional/readconcern.test.js @@ -447,13 +447,13 @@ describe('ReadConcern', function() { const reduce = 'function(k,vals) { return 1; }'; // Listen to apm events - client.on('commandStarted', filterForCommands('mapreduce', started)); - client.on('commandSucceeded', filterForCommands('mapreduce', succeeded)); + client.on('commandStarted', filterForCommands('mapReduce', started)); + client.on('commandSucceeded', filterForCommands('mapReduce', succeeded)); // Execute mapReduce collection.mapReduce(map, reduce, { out: { replace: 'tempCollection' } }, err => { expect(err).to.not.exist; - validateTestResults(started, succeeded, 'mapreduce'); + validateTestResults(started, succeeded, 'mapReduce'); done(); }); } diff --git a/test/functional/readpreference.test.js b/test/functional/readpreference.test.js index 30a4b389ff2..024bc89a981 100644 --- a/test/functional/readpreference.test.js +++ b/test/functional/readpreference.test.js @@ -2,6 +2,7 @@ var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; const expect = require('chai').expect; +const { ReadPreference } = require('../..'); describe('ReadPreference', function() { before(function() { @@ -17,9 +18,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -60,9 +58,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -106,9 +101,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -153,9 +145,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -209,9 +198,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -256,15 +242,12 @@ describe('ReadPreference', function() { /** * @ignore */ - it('Should fail due to not using mapreduce inline with read preference', { + it('Should fail due to not using mapReduce inline with read preference', { metadata: { requires: { mongodb: '>=2.6.0', topology: ['single', 'ssl'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -301,9 +284,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -364,9 +344,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -406,9 +383,6 @@ describe('ReadPreference', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - ReadPreference = mongo.ReadPreference; - var client = configuration.newClient({ w: 1, readPreference: 'secondary' }, { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); @@ -521,14 +495,13 @@ describe('ReadPreference', function() { test: function(done) { var configuration = this.configuration; - var mongo = configuration.require, - SecondaryPreferred = mongo.ReadPreference.SECONDARY_PREFERRED; - var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); - var cursor = db.collection('test', { readPreference: SecondaryPreferred }).listIndexes(); + var cursor = db + .collection('test', { readPreference: ReadPreference.SECONDARY_PREFERRED }) + .listIndexes(); test.equal(cursor.options.readPreference.mode, 'secondaryPreferred'); client.close(done); }); diff --git a/test/functional/replicaset_mock.test.js b/test/functional/replicaset_mock.test.js index ca063a55cb0..65b3c95dc9c 100644 --- a/test/functional/replicaset_mock.test.js +++ b/test/functional/replicaset_mock.test.js @@ -2,6 +2,7 @@ var expect = require('chai').expect, mock = require('mongodb-mock-server'), ObjectId = require('bson').ObjectId; +const Logger = require('../../lib/logger'); const test = {}; describe('ReplSet (mocks)', function() { @@ -62,7 +63,6 @@ describe('ReplSet (mocks)', function() { test: function(done) { var configuration = this.configuration; - var Logger = configuration.require.Logger; var logger = Logger.currentLogger(); Logger.setLevel('warn'); Logger.setCurrentLogger(function(msg, state) { @@ -96,7 +96,6 @@ describe('ReplSet (mocks)', function() { test: function(done) { var configuration = this.configuration; - var Logger = configuration.require.Logger; var warnings = []; var logger = Logger.currentLogger(); Logger.setLevel('warn'); @@ -139,33 +138,4 @@ describe('ReplSet (mocks)', function() { }); } }); - - it('Should correctly set socketTimeoutMS and connectTimeoutMS for mongos', { - metadata: { - requires: { - generators: true, - topology: 'single' - } - }, - - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // skipped for direct legacy variable inspection - return this.skip(); - } - - const client = configuration.newClient( - `mongodb://${test.mongos1.uri()},${test.mongos2.uri()}/test?socketTimeoutMS=120000&connectTimeoutMS=15000` - ); - - client.connect(function(err, client) { - expect(err).to.not.exist; - expect(client.topology.s.coreTopology.s.options.connectionTimeout).to.equal(15000); - expect(client.topology.s.coreTopology.s.options.socketTimeout).to.equal(120000); - - client.close(done); - }); - } - }); }); diff --git a/test/functional/retryable_writes.test.js b/test/functional/retryable_writes.test.js index 8ef5f0fe20a..e7355ff5436 100644 --- a/test/functional/retryable_writes.test.js +++ b/test/functional/retryable_writes.test.js @@ -2,49 +2,53 @@ const expect = require('chai').expect; const loadSpecTests = require('../spec').loadSpecTests; +const parseRunOn = require('../functional/spec-runner').parseRunOn; describe('Retryable Writes', function() { let ctx = {}; loadSpecTests('retryable-writes').forEach(suite => { - const topology = ['replicaset']; - const mongodb = `>=${suite.minServerVersion}`; - - describe(suite.name, function() { - suite.tests.forEach(test => { - it(test.description, { - metadata: { requires: { topology, mongodb } }, - test: function() { - // Step 1: Test Setup. Includes a lot of boilerplate stuff - // like creating a client, dropping and refilling data collections, - // and enabling failpoints - return executeScenarioSetup(suite, test, this.configuration, ctx).then(() => - // Step 2: Run the test - executeScenarioTest(test, ctx) - ); - } - }); + const environmentRequirementList = parseRunOn(suite.runOn); + environmentRequirementList.forEach(requires => { + const suiteName = `${suite.name} - ${requires.topology.join()}`; + + describe(suiteName, { + metadata: { requires }, + test: function() { + // Step 3: Test Teardown. Turn off failpoints, and close client + afterEach(function() { + if (!ctx.db || !ctx.client) { + return; + } + + return Promise.resolve() + .then(() => (ctx.failPointName ? turnOffFailPoint(ctx.db, ctx.failPointName) : {})) + .then(() => ctx.client.close()) + .then(() => (ctx = {})); + }); + + suite.tests.forEach(test => { + it(test.description, function() { + // Step 1: Test Setup. Includes a lot of boilerplate stuff + // like creating a client, dropping and refilling data collections, + // and enabling failpoints + return executeScenarioSetup(suite, test, this.configuration, ctx).then(() => + // Step 2: Run the test + executeScenarioTest(test, ctx) + ); + }); + }); + } }); }); }); - - // Step 3: Test Teardown. Turn off failpoints, and close client - afterEach(function() { - if (!ctx.db || !ctx.client) { - return; - } - - return Promise.resolve() - .then(() => (ctx.failPointName ? turnOffFailPoint(ctx.db, ctx.failPointName) : {})) - .then(() => ctx.client.close()) - .then(() => (ctx = {})); - }); }); function executeScenarioSetup(scenario, test, config, ctx) { const url = config.url(); const options = Object.assign({}, test.clientOptions, { haInterval: 100, + minHeartbeatFrequencyMS: 100, monitorCommands: true, minSize: 10 }); @@ -68,7 +72,11 @@ function executeScenarioSetup(scenario, test, config, ctx) { throw err; } }) - .then(() => (scenario.data ? ctx.collection.insertMany(scenario.data) : {})) + .then(() => + Array.isArray(scenario.data) && scenario.data.length + ? ctx.collection.insertMany(scenario.data) + : {} + ) .then(() => (test.failPoint ? ctx.db.executeDbAdminCommand(test.failPoint) : {})); } diff --git a/test/functional/scram.test.js b/test/functional/scram.test.js index 5232ef53af1..0db42561daf 100644 --- a/test/functional/scram.test.js +++ b/test/functional/scram.test.js @@ -1,6 +1,7 @@ 'use strict'; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { MongoClient } = require('../..'); describe('SCRAM', function() { before(function() { @@ -16,9 +17,6 @@ describe('SCRAM', function() { // The actual test we wish to run test: function(done) { - var configuration = this.configuration; - var MongoClient = configuration.require.MongoClient; - // User and password var user = 'test'; var password = 'test'; diff --git a/test/functional/scram_sha_256.test.js b/test/functional/scram_sha_256.test.js index 31924228876..1e49dd8e582 100644 --- a/test/functional/scram_sha_256.test.js +++ b/test/functional/scram_sha_256.test.js @@ -1,10 +1,9 @@ 'use strict'; -const expect = require('chai').expect; const sinon = require('sinon'); -const ScramSHA256 = require('../../lib/core').ScramSHA256; -const setupDatabase = require('./shared').setupDatabase; -const withClient = require('./shared').withClient; +const { expect } = require('chai'); +const { ScramSHA256 } = require('../../lib/cmap/auth/scram'); +const { setupDatabase, withClient } = require('./shared'); describe('SCRAM-SHA-256 auth', function() { const test = {}; @@ -168,6 +167,32 @@ describe('SCRAM-SHA-256 auth', function() { } }); + it('should shorten SCRAM conversations if the server supports it ', { + metadata: { requires: { mongodb: '>=4.3.x' } }, + test: function() { + const options = { + auth: { + user: userMap.both.username, + password: userMap.both.password + }, + authSource: this.configuration.db + }; + + let sendAuthCommandSpy; + test.sandbox + .stub(ScramSHA256.prototype, '_executeScram') + .callsFake(function(sendAuthCommand, connection, credentials, nonce, callback) { + const executeScram = ScramSHA256.prototype._executeScram.wrappedMethod; + sendAuthCommandSpy = test.sandbox.spy(sendAuthCommand); + executeScram.apply(this, [sendAuthCommandSpy, connection, credentials, nonce, callback]); + }); + + return withClient(this.configuration.newClient({}, options), () => { + expect(sendAuthCommandSpy.callCount).to.equal(2); + }); + } + }); + // Step 3 // For test users that support only one mechanism, verify that explictly specifying the other mechanism fails. it('should fail to connect if incorrect auth mechanism is explicitly specified', { diff --git a/test/functional/sessions.test.js b/test/functional/sessions.test.js index c79510001d2..45e708f4244 100644 --- a/test/functional/sessions.test.js +++ b/test/functional/sessions.test.js @@ -187,11 +187,6 @@ describe('Sessions', function() { after(() => testContext.teardown()); before(function() { - if (!this.configuration.usingUnifiedTopology()) { - this.test.parent.pending = true; // https://github.com/mochajs/mocha/issues/2683 - this.skip(); - return; - } return testContext.setup(this.configuration); }); diff --git a/test/functional/sharding_connection.test.js b/test/functional/sharding_connection.test.js deleted file mode 100644 index fc4acfe7c33..00000000000 --- a/test/functional/sharding_connection.test.js +++ /dev/null @@ -1,162 +0,0 @@ -'use strict'; - -const setupDatabase = require('./shared').setupDatabase; -const expect = require('chai').expect; - -describe('Sharding (Connection)', function() { - before(function() { - return setupDatabase(this.configuration); - }); - - /** - * @ignore - */ - it('Should connect to mongos proxies using connectiong string and options', { - metadata: { requires: { topology: 'sharded' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // disable for inspection of legacy properties - return this.skip(); - } - - const url = `${configuration.url()}?w=1&readPreference=secondaryPreferred&readPreferenceTags=sf%3A1`; - const client = configuration.newClient(url, { haInterval: 500, useNewUrlParser: true }); - - client.connect(err => { - expect(err).to.not.exist; - expect(client).to.have.nested.property('topology.haInterval', 500); - - const db = client.db(configuration.db); - - db.collection('replicaset_mongo_client_collection').update( - { a: 1 }, - { b: 1 }, - { upsert: true }, - (err, result) => { - expect(err).to.not.exist; - expect(result).to.have.nested.property('result.n', 1); - - // Perform fetch of document - db.collection('replicaset_mongo_client_collection').findOne(err => { - expect(err).to.not.exist; - - client.close(done); - }); - } - ); - }); - } - }); - - /** - * @ignore - */ - it('Should correctly connect with a missing mongos', { - metadata: { requires: { topology: 'sharded' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // the unified topology will attempt to connect to all provided servers if you - // want to explicitly call `connect`. Otherwise, it will satisfy the requirements - // of this test, and not explicitly fail on an unknown server. - return this.skip(); - } - - const host = configuration.host; - const port = configuration.port; - - // TODO: Better way to do this? - const url = `mongodb://${host}:${port},${host}:${port + - 1},localhost:50002/sharded_test_db?w=1`; - - const client = configuration.newClient(url, { useNewUrlParser: true }); - - client.connect(err => { - expect(err).to.not.exist; - client.close(done); - }); - } - }); - - /** - * @ignore - */ - it('Should exercise all options on mongos topology', { - metadata: { requires: { topology: 'sharded' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // disable for inspection of legacy properties - return this.skip(); - } - - const url = `${configuration.url()}?w=1&readPreference=secondaryPreferred&readPreferenceTags=sf%3A1`; - - const client = configuration.newClient(url, { useNewUrlParser: true, haInterval: 500 }); - client.connect(function(err) { - expect(err).to.not.exist; - expect(client) - .to.have.property('topology') - .that.is.an('object'); - - const topology = client.topology; - - expect(topology).to.have.property('haInterval', 500); - expect(topology).to.have.property('bson').that.does.exist; - - expect(topology) - .to.have.property('isConnected') - .that.is.a('function'); - expect(topology.isConnected()).to.equal(true); - - ['capabilities', 'lastIsMaster', 'connections'].forEach(member => { - expect(topology) - .to.have.property(member) - .that.is.a('function'); - expect(topology[member]()).to.exist; - }); - - client.close(done); - }); - } - }); - - /** - * @ignore - */ - it('Should correctly modify the server reconnectTries for all sharded proxy instances', { - metadata: { requires: { topology: 'sharded' } }, - - // The actual test we wish to run - test: function(done) { - const configuration = this.configuration; - if (configuration.usingUnifiedTopology()) { - // disable for inspection of legacy properties - return this.skip(); - } - - const url = `${configuration.url()}?w=1&readPreference=secondaryPreferred&readPreferenceTags=sf%3A1`; - - const client = configuration.newClient(url, { useNewUrlParser: true, reconnectTries: 10 }); - client.connect(function(err) { - expect(err).to.not.exist; - expect(client) - .to.have.nested.property('topology.s.coreTopology.connectedProxies') - .that.is.an('array'); - - client.topology.s.coreTopology.connectedProxies.forEach(server => { - expect(server).to.have.nested.property('s.pool.options.reconnectTries', 10); - }); - - client.close(done); - }); - } - }); -}); diff --git a/test/functional/spec-runner/context.js b/test/functional/spec-runner/context.js index 04a8faac0d8..b7111247960 100644 --- a/test/functional/spec-runner/context.js +++ b/test/functional/spec-runner/context.js @@ -24,13 +24,12 @@ class TestRunnerContext { setup(config) { this.sharedClient = config.newClient( - resolveConnectionString(config, { useMultipleMongoses: true }), - { useUnifiedTopology: true } + resolveConnectionString(config, { useMultipleMongoses: true }) ); if (config.topologyType === 'Sharded') { this.failPointClients = config.options.hosts.map(proxy => - config.newClient(`mongodb://${proxy.host}:${proxy.port}/`, { useUnifiedTopology: true }) + config.newClient(`mongodb://${proxy.host}:${proxy.port}/`) ); } diff --git a/test/functional/spec-runner/index.js b/test/functional/spec-runner/index.js index 71511980868..3c25ee2d9bb 100644 --- a/test/functional/spec-runner/index.js +++ b/test/functional/spec-runner/index.js @@ -152,7 +152,7 @@ function prepareDatabaseForSuite(suite, context) { .admin() .command({ killAllSessions: [] }) .catch(err => { - if (err.code === 11601) { + if (err.code === 11601 || err.message.match(/no such/)) { return; } @@ -236,9 +236,6 @@ function runTestSuiteTest(configuration, spec, context) { clientOptions.minHeartbeatFrequencyMS = 100; clientOptions.useRecoveryToken = true; - // TODO: this should be configured by `newClient` and env variables - clientOptions.useUnifiedTopology = true; - const url = resolveConnectionString(configuration, spec); const client = configuration.newClient(url, clientOptions); return client.connect().then(client => { @@ -260,17 +257,24 @@ function runTestSuiteTest(configuration, spec, context) { spec.sessionOptions = spec.sessionOptions || {}; const database = client.db(context.dbName); - const session0 = client.startSession( - Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0)) - ); - const session1 = client.startSession( - Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1)) - ); - const savedSessionData = { - session0: JSON.parse(EJSON.stringify(session0.id)), - session1: JSON.parse(EJSON.stringify(session1.id)) - }; + let session0, session1; + let savedSessionData; + try { + session0 = client.startSession( + Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0)) + ); + session1 = client.startSession( + Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1)) + ); + + savedSessionData = { + session0: JSON.parse(EJSON.stringify(session0.id)), + session1: JSON.parse(EJSON.stringify(session1.id)) + }; + } catch (err) { + // ignore + } // enable to see useful APM debug information at the time of actual test run // displayCommands = true; @@ -293,8 +297,8 @@ function runTestSuiteTest(configuration, spec, context) { throw err; }) .then(() => { - session0.endSession(); - session1.endSession(); + if (session0) session0.endSession(); + if (session1) session1.endSession(); return validateExpectations(context.commandEvents, spec, savedSessionData); }); @@ -469,6 +473,35 @@ function resolveOperationArgs(operationName, operationArgs, context) { const CURSOR_COMMANDS = new Set(['find', 'aggregate', 'listIndexes', 'listCollections']); const ADMIN_COMMANDS = new Set(['listDatabases']); +const kOperations = new Map([ + [ + 'createIndex', + (operation, collection /*, context, options */) => { + const fieldOrSpec = operation.arguments.keys; + return collection.createIndex(fieldOrSpec); + } + ], + [ + 'dropIndex', + (operation, collection /*, context, options */) => { + const indexName = operation.arguments.name; + return collection.dropIndex(indexName); + } + ], + [ + 'mapReduce', + (operation, collection /*, context, options */) => { + const args = operation.arguments; + const map = args.map; + const reduce = args.reduce; + const options = {}; + if (args.out) options.out = args.out; + + return collection.mapReduce(map, reduce, options); + } + ] +]); + /** * * @param {Object} operation the operation definition from the spec test @@ -483,95 +516,101 @@ function testOperation(operation, obj, context, options) { let args = []; const operationName = translateOperationName(operation.name); - if (operation.arguments) { - args = resolveOperationArgs(operationName, operation.arguments, context); - - if (args == null) { - args = []; - Object.keys(operation.arguments).forEach(key => { - if (key === 'callback') { - args.push(() => - testOperations(operation.arguments.callback, context, { swallowOperationErrors: false }) - ); - return; - } - - if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) { - return args.unshift(operation.arguments[key]); - } + let opPromise; + if (kOperations.has(operationName)) { + opPromise = kOperations.get(operationName)(operation, obj, context, options); + } else { + if (operation.arguments) { + args = resolveOperationArgs(operationName, operation.arguments, context); + + if (args == null) { + args = []; + Object.keys(operation.arguments).forEach(key => { + if (key === 'callback') { + args.push(() => + testOperations(operation.arguments.callback, context, { + swallowOperationErrors: false + }) + ); + return; + } - if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') { - return args.unshift(operation.arguments[key]); - } + if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) { + return args.unshift(operation.arguments[key]); + } - if (key === 'command') return args.unshift(operation.arguments[key]); - if (key === 'requests') return args.unshift(extractBulkRequests(operation.arguments[key])); - if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]); - if (key === 'session') { - if (isTransactionCommand(operationName)) return; - opOptions.session = context[operation.arguments.session]; - return; - } + if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') { + return args.unshift(operation.arguments[key]); + } - if (key === 'returnDocument') { - opOptions.returnOriginal = operation.arguments[key] === 'Before' ? true : false; - return; - } + if (key === 'command') return args.unshift(operation.arguments[key]); + if (key === 'requests') + return args.unshift(extractBulkRequests(operation.arguments[key])); + if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]); + if (key === 'session') { + if (isTransactionCommand(operationName)) return; + opOptions.session = context[operation.arguments.session]; + return; + } - if (key === 'options') { - Object.assign(opOptions, operation.arguments[key]); - if (opOptions.readPreference) { - opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode); + if (key === 'returnDocument') { + opOptions.returnOriginal = operation.arguments[key] === 'Before' ? true : false; + return; } - return; - } + if (key === 'options') { + Object.assign(opOptions, operation.arguments[key]); + if (opOptions.readPreference) { + opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode); + } - if (key === 'readPreference') { - opOptions[key] = normalizeReadPreference(operation.arguments[key].mode); - return; - } + return; + } - opOptions[key] = operation.arguments[key]; - }); - } - } + if (key === 'readPreference') { + opOptions[key] = normalizeReadPreference(operation.arguments[key].mode); + return; + } - if ( - args.length === 0 && - !isTransactionCommand(operationName) && - !isTestRunnerCommand(context, operationName) - ) { - args.push({}); - } + opOptions[key] = operation.arguments[key]; + }); + } + } - if (Object.keys(opOptions).length > 0) { - // NOTE: this is awful, but in order to provide options for some methods we need to add empty - // query objects. - if (operationName === 'distinct') { + if ( + args.length === 0 && + !isTransactionCommand(operationName) && + !isTestRunnerCommand(context, operationName) + ) { args.push({}); } - args.push(opOptions); - } + if (Object.keys(opOptions).length > 0) { + // NOTE: this is awful, but in order to provide options for some methods we need to add empty + // query objects. + if (operationName === 'distinct') { + args.push({}); + } - if (ADMIN_COMMANDS.has(operationName)) { - obj = obj.db().admin(); - } + args.push(opOptions); + } - if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') { - opOptions.nameOnly = true; - } + if (ADMIN_COMMANDS.has(operationName)) { + obj = obj.db().admin(); + } - let opPromise; + if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') { + opOptions.nameOnly = true; + } - if (CURSOR_COMMANDS.has(operationName)) { - // `find` creates a cursor, so we need to call `toArray` on it - const cursor = obj[operationName].apply(obj, args); - opPromise = cursor.toArray(); - } else { - // wrap this in a `Promise.try` because some operations might throw - opPromise = Promise.try(() => obj[operationName].apply(obj, args)); + if (CURSOR_COMMANDS.has(operationName)) { + // `find` creates a cursor, so we need to call `toArray` on it + const cursor = obj[operationName].apply(obj, args); + opPromise = cursor.toArray(); + } else { + // wrap this in a `Promise.try` because some operations might throw + opPromise = Promise.try(() => obj[operationName].apply(obj, args)); + } } if (operation.error) { @@ -674,5 +713,6 @@ function testOperations(testData, operationContext, options) { module.exports = { TestRunnerContext, gatherTestSuites, - generateTopologyTests + generateTopologyTests, + parseRunOn }; diff --git a/test/functional/ssl_x509_connect.test.js b/test/functional/ssl_x509_connect.test.js index 0526771a766..30fba548e65 100644 --- a/test/functional/ssl_x509_connect.test.js +++ b/test/functional/ssl_x509_connect.test.js @@ -3,6 +3,7 @@ var fs = require('fs'); var f = require('util').format; var test = require('./shared').assert; var setupDatabase = require('./shared').setupDatabase; +const { MongoClient } = require('../..'); describe('SSL (x509)', function() { before(function() { @@ -18,8 +19,7 @@ describe('SSL (x509)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ServerManager = require('mongodb-topology-manager').Server, - MongoClient = configuration.require.MongoClient; + var ServerManager = require('mongodb-topology-manager').Server; // Read the cert and key var cert = fs.readFileSync(__dirname + '/ssl/x509/client.pem'); @@ -136,8 +136,7 @@ describe('SSL (x509)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ServerManager = require('mongodb-topology-manager').Server, - MongoClient = configuration.require.MongoClient; + var ServerManager = require('mongodb-topology-manager').Server; // Read the cert and key var cert = fs.readFileSync(__dirname + '/ssl/x509/client.pem'); @@ -254,8 +253,7 @@ describe('SSL (x509)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ServerManager = require('mongodb-topology-manager').Server, - MongoClient = configuration.require.MongoClient; + var ServerManager = require('mongodb-topology-manager').Server; // Read the cert and key var cert = fs.readFileSync(__dirname + '/ssl/x509/client.pem'); @@ -371,8 +369,7 @@ describe('SSL (x509)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ServerManager = require('mongodb-topology-manager').Server, - MongoClient = configuration.require.MongoClient; + var ServerManager = require('mongodb-topology-manager').Server; // Read the cert and key var cert = fs.readFileSync(__dirname + '/ssl/x509/client.pem'); @@ -482,8 +479,7 @@ describe('SSL (x509)', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ServerManager = require('mongodb-topology-manager').Server, - MongoClient = configuration.require.MongoClient; + var ServerManager = require('mongodb-topology-manager').Server; // Read the cert and key var cert = fs.readFileSync(__dirname + '/ssl/x509/client.pem'); diff --git a/test/functional/transactions.test.js b/test/functional/transactions.test.js index f71e3f641ea..db24f7bf533 100644 --- a/test/functional/transactions.test.js +++ b/test/functional/transactions.test.js @@ -1,13 +1,11 @@ 'use strict'; -const chai = require('chai'); -const expect = chai.expect; -const core = require('../../lib/core'); -const sessions = core.Sessions; -const TestRunnerContext = require('./spec-runner').TestRunnerContext; -const loadSpecTests = require('../spec').loadSpecTests; -const generateTopologyTests = require('./spec-runner').generateTopologyTests; -const MongoNetworkError = require('../../lib/core').MongoNetworkError; +const { expect } = require('chai'); +const { Topology } = require('../../lib/sdam/topology'); +const { ClientSession } = require('../../lib/sessions'); +const { TestRunnerContext, generateTopologyTests } = require('./spec-runner'); +const { loadSpecTests } = require('../spec'); +const { MongoNetworkError } = require('../../lib/error'); describe('Transactions', function() { const testContext = new TestRunnerContext(); @@ -35,9 +33,6 @@ describe('Transactions', function() { 'count', // This test needs there to be multiple mongoses 'increment txnNumber', - // There is something wrong with the distinct command in the runner: - // it is not failing properly - 'add transient label to connection errors', // Skipping this until SPEC-1320 is resolved 'remain pinned after non-transient error on commit' ]; @@ -52,9 +47,9 @@ describe('Transactions', function() { describe('withTransaction', function() { let session, sessionPool; beforeEach(() => { - const topology = new core.Server(); - sessionPool = new sessions.ServerSessionPool(topology); - session = new sessions.ClientSession(topology, sessionPool); + const topology = new Topology('localhost:27017'); + sessionPool = topology.s.sessionPool; + session = new ClientSession(topology, sessionPool); }); afterEach(() => { @@ -106,7 +101,7 @@ describe('Transactions', function() { metadata: { requires: { topology: ['sharded'], mongodb: '>=4.1.0' } }, test: function(done) { const configuration = this.configuration; - const client = configuration.newClient(configuration.url(), { useUnifiedTopology: true }); + const client = configuration.newClient(configuration.url()); client.connect(err => { expect(err).to.not.exist; @@ -130,7 +125,7 @@ describe('Transactions', function() { metadata: { requires: { topology: 'replicaset', mongodb: '>=4.0.0' } }, test: function(done) { const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { useUnifiedTopology: true }); + const client = configuration.newClient({ w: 1 }); client.connect(err => { expect(err).to.not.exist; @@ -156,10 +151,8 @@ describe('Transactions', function() { expect(session.inTransaction()).to.be.true; coll.insertOne({ b: 2 }, { session }, err => { - expect(err) - .to.exist.and.to.be.an.instanceof(MongoNetworkError) - .and.to.have.a.property('errorLabels') - .that.includes('TransientTransactionError'); + expect(err).to.exist.and.to.be.an.instanceof(MongoNetworkError); + expect(err.hasErrorLabel('TransientTransactionError')).to.be.true; session.abortTransaction(() => session.endSession(() => client.close(done))); }); @@ -175,7 +168,7 @@ describe('Transactions', function() { metadata: { requires: { topology: 'replicaset', mongodb: '>=4.0.0' } }, test: function(done) { const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { useUnifiedTopology: true }); + const client = configuration.newClient({ w: 1 }); client.connect(err => { expect(err).to.not.exist; @@ -191,9 +184,7 @@ describe('Transactions', function() { err => { expect(err).to.not.exist; coll.insertOne({ a: 1 }, err => { - expect(err) - .to.exist.and.to.be.an.instanceOf(MongoNetworkError) - .and.to.not.have.a.property('errorLabels'); + expect(err).to.exist.and.to.be.an.instanceOf(MongoNetworkError); client.close(done); }); } diff --git a/test/functional/unicode.test.js b/test/functional/unicode.test.js index 5c6edfeec33..7bdfb7217e3 100644 --- a/test/functional/unicode.test.js +++ b/test/functional/unicode.test.js @@ -1,6 +1,5 @@ 'use strict'; -var test = require('./shared').assert; -var setupDatabase = require('./shared').setupDatabase; +const { assert: test, setupDatabase } = require('./shared'); describe('Unicode', function() { before(function() { diff --git a/test/functional/uri.test.js b/test/functional/uri.test.js index a98b77e0375..88c278216b9 100644 --- a/test/functional/uri.test.js +++ b/test/functional/uri.test.js @@ -2,51 +2,9 @@ const expect = require('chai').expect; const sinon = require('sinon'); -const ReplSet = require('../../lib/topologies/replset'); const NativeTopology = require('../../lib/topologies/native_topology'); describe('URI', function() { - /** - * @ignore - */ - it( - 'Should correctly connect using MongoClient to a single server using connect with optional server setting', - { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: 'single', unifiedTopology: false } }, - - // The actual test we wish to run - test: function(done) { - var self = this; - - // Connect using the connection string - const client = this.configuration.newClient('mongodb://localhost:27017/integration_tests', { - native_parser: false, - socketOptions: { - connectTimeoutMS: 500 - } - }); - - client.connect(function(err, client) { - var db = client.db(self.configuration.db); - expect(err).to.not.exist; - expect(client.topology.connections()[0].connectionTimeout).to.equal(500); - - db.collection('mongoclient_test').update({ a: 1 }, { b: 1 }, { upsert: true }, function( - err, - result - ) { - expect(err).to.not.exist; - expect(result.result.n).to.equal(1); - - client.close(done); - }); - }); - } - } - ); - /** * @ignore */ @@ -191,10 +149,7 @@ describe('URI', function() { done(); } - const topologyPrototype = this.configuration.usingUnifiedTopology() - ? NativeTopology.prototype - : ReplSet.prototype; - + const topologyPrototype = NativeTopology.prototype; const connectStub = sinon.stub(topologyPrototype, 'connect').callsFake(validateConnect); const uri = 'mongodb://some-hostname/test?ssl=true&authMechanism=MONGODB-X509&replicaSet=rs0'; const client = this.configuration.newClient(uri, { useNewUrlParser: true }); diff --git a/test/functional/uri_options_spec.test.js b/test/functional/uri_options_spec.test.js index 19804decd00..cd4b466e726 100644 --- a/test/functional/uri_options_spec.test.js +++ b/test/functional/uri_options_spec.test.js @@ -4,10 +4,9 @@ const chai = require('chai'); const expect = chai.expect; chai.use(require('chai-subset')); -const core = require('../../lib/core'); -const parse = core.parseConnectionString; -const MongoParseError = core.MongoParseError; -const loadSpecTests = require('../spec').loadSpecTests; +const { parseConnectionString: parse } = require('../../lib/connection_string'); +const { MongoParseError } = require('../../lib/error'); +const { loadSpecTests } = require('../spec'); describe('URI Options (spec)', function() { loadSpecTests('uri-options').forEach(suite => { diff --git a/test/functional/url_parser.test.js b/test/functional/url_parser.test.js deleted file mode 100644 index df25d45a90a..00000000000 --- a/test/functional/url_parser.test.js +++ /dev/null @@ -1,1030 +0,0 @@ -'use strict'; - -/*! - * Module dependencies. - */ -var parse = require('../../lib/url_parser'); -var expect = require('chai').expect; - -describe('Url Parser', function() { - /** - * @ignore - */ - it('should correctly parse mongodb://localhost', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:27017', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:27017/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:27017test?appname=hello%20world', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:27017/test?appname=hello%20world', {}, function(err, object) { - expect(err).to.be.null; - expect(object.appname).to.equal('hello world'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost/?safe=true&readPreference=secondary', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?safe=true&readPreference=secondary', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:28101/', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:28101/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(28101); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foobar@localhost/baz', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - parse('mongodb://fred:foobar@localhost/baz', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('baz'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foobar'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo%20bar@localhost/baz', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - parse('mongodb://fred:foo%20bar@localhost/baz', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('baz'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo bar'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://%2Ftmp%2Fmongodb-27017.sock', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://%2Ftmp%2Fmongodb-27017.sock', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('admin'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('somedb'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('somedb'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - expect(object.db_options.safe).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://example1.com:27017,example2.com:27018', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://example1.com:27017,example2.com:27018', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(2); - expect(object.servers[0].host).to.equal('example1.com'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('example2.com'); - expect(object.servers[1].port).to.equal(27018); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost,localhost:27018,localhost:27019', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost,localhost:27018,localhost:27019', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('localhost'); - expect(object.servers[1].port).to.equal(27018); - expect(object.servers[2].host).to.equal('localhost'); - expect(object.servers[2].port).to.equal(27019); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://host1,host2,host3/?slaveOk=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3/?slaveOk=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.slave_ok).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should correctly parse mongodb://host1,host2,host3,host1/?slaveOk=true and de-duplicate names', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3,host1/?slaveOk=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.slave_ok).to.be.true; - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost/?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?safe=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.db_options.safe).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://host1,host2,host3/?safe=true;w=2;wtimeoutMS=2000', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3/?safe=true;w=2;wtimeoutMS=2000', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.db_options.safe).to.be.true; - expect(object.db_options.w).to.equal(2); - expect(object.db_options.wtimeout).to.equal(2000); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should parse mongodb://localhost/db?replicaSet=hello&ssl=prefer&connectTimeoutMS=1000&socketTimeoutMS=2000', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse( - 'mongodb://localhost/db?replicaSet=hello&ssl=prefer&connectTimeoutMS=1000&socketTimeoutMS=2000', - {}, - function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.rs_name).to.equal('hello'); - expect(object.server_options.socketOptions.connectTimeoutMS).to.equal(1000); - expect(object.server_options.socketOptions.socketTimeoutMS).to.equal(2000); - expect(object.rs_options.socketOptions.connectTimeoutMS).to.equal(1000); - expect(object.rs_options.socketOptions.socketTimeoutMS).to.equal(2000); - expect(object.rs_options.ssl).to.equal('prefer'); - expect(object.server_options.ssl).to.equal('prefer'); - done(); - } - ); - } - } - ); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?ssl=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?ssl=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.ssl).to.be.true; - expect(object.server_options.ssl).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?maxPoolSize=100', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?maxPoolSize=100', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.poolSize).to.equal(100); - expect(object.server_options.poolSize).to.equal(100); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?w=-1', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?w=-1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.db_options.w).to.equal(-1); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy, with one compressor specified', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?zlibCompressionLevel=-1 without issuing a warning', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?zlibCompressionLevel=-1 ', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(-1); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy&zlibCompressionLevel=3 without issuing a warning', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy&zlibCompressionLevel=3', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(3); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy,zlib&zlibCompressionLevel=-1', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy,zlib&zlibCompressionLevel=-1', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - expect(object.server_options.compression.compressors[1]).to.equal('zlib'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(-1); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should throw an error when parsing mongodb://localhost/?compressors=foo, where foo is an unsupported compressor', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - // Should throw due to unsupported compressor - parse('mongodb://localhost/?compressors=foo', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Compressors must be at least one of snappy or zlib'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should throw an error when parsing mongodb://localhost/?zlibCompressionLevel=10, where the integer is out of the specified bounds', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - // Should throw due to unsupported compressor - parse('mongodb://localhost/?zlibCompressionLevel=10', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('zlibCompressionLevel must be an integer between -1 and 9'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should log when unsupported options are used in url', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - var self = this, - Logger = self.configuration.require.Logger, - logged = false; - - const logger = Logger.currentLogger(); - Logger.setCurrentLogger(function(msg, context) { - expect(msg).to.exist; - expect(msg).to.contain('not supported'); - expect(context.type).to.equal('warn'); - expect(context.className).to.equal('URL Parser'); - logged = true; - }); - - Logger.setLevel('warn'); - - parse('mongodb://localhost/db?minPoolSize=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?maxIdleTimeMS=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?waitQueueMultiple=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?waitQueueTimeoutMS=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?uuidRepresentation=1', {}, function() { - expect(logged).to.be.true; - - Logger.setCurrentLogger(logger); - done(); - }); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should write concerns parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?safe=true&w=1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.safe).to.be.true; - parse('mongodb://localhost/db?safe=false&w=1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.safe).to.be.false; - // should throw as fireAndForget is set aswell as safe or any other - // write concerns - parse('mongodb://localhost/db?safe=true&w=0', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'w set to -1 or 0 cannot be combined with safe/w/journal/fsync' - ); - parse('mongodb://localhost/db?fsync=true&w=-1', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'w set to -1 or 0 cannot be combined with safe/w/journal/fsync' - ); - done(); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should parse GSSAPI', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://dev1%4010GEN.ME@kdc.10gen.com/test?authMechanism=GSSAPI', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth).to.eql({ user: 'dev1@10GEN.ME', password: null }); - expect(object.db_options.authMechanism).to.equal('GSSAPI'); - // Should throw due to missing principal - parse('mongodb://kdc.10gen.com/test?authMechanism=GSSAPI', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('GSSAPI requires a provided principal'); - // Should throw due to unsupported mechanism - parse('mongodb://kdc.10gen.com/test?authMechanism=NONE', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism' - ); - parse( - 'mongodb://dev1%4010GEN.ME:test@kdc.10gen.com/test?authMechanism=GSSAPI', - {}, - function(err, object) { - expect(err).to.be.null; - expect(object.auth).to.eql({ user: 'dev1@10GEN.ME', password: 'test' }); - expect(object.db_options.authMechanism).to.equal('GSSAPI'); - done(); - } - ); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('Read preferences parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?slaveOk=true', {}, function(err, object) { - expect(object.server_options.slave_ok).to.be.true; - parse('mongodb://localhost/db?readPreference=primary', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('primary'); - parse('mongodb://localhost/db?readPreference=primaryPreferred', {}, function( - err, - object - ) { - expect(object.db_options.readPreference).to.equal('primaryPreferred'); - parse('mongodb://localhost/db?readPreference=secondary', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('secondary'); - parse('mongodb://localhost/db?readPreference=secondaryPreferred', {}, function( - err, - object - ) { - expect(object.db_options.readPreference).to.equal('secondaryPreferred'); - parse('mongodb://localhost/db?readPreference=nearest', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('nearest'); - parse('mongodb://localhost/db', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('primary'); - parse('mongodb://localhost/db?readPreference=blah', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest' - ); - done(); - }); - }); - }); - }); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('Read preferences tag parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db', {}, function(err, object) { - expect(object.db_options.read_preference_tags).to.be.null; - parse('mongodb://localhost/db?readPreferenceTags=dc:ny', {}, function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([{ dc: 'ny' }]); - parse('mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1', {}, function( - err, - object - ) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([{ dc: 'ny', rack: '1' }]); - parse( - 'mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:sf,rack:2', - {}, - function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([ - { dc: 'ny', rack: '1' }, - { dc: 'sf', rack: '2' } - ]); - parse( - 'mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:sf,rack:2&readPreferenceTags=', - {}, - function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([ - { dc: 'ny', rack: '1' }, - { dc: 'sf', rack: '2' }, - {} - ]); - done(); - } - ); - } - ); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://[::1]:1234', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://[::1]:1234', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('::1'); - expect(object.servers[0].port).to.equal(1234); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://[::1]', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://[::1]', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('::1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost,[::1]:27018,[2607:f0d0:1002:51::41]', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost,[::1]:27018,[2607:f0d0:1002:51::41]', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('::1'); - expect(object.servers[1].port).to.equal(27018); - expect(object.servers[2].host).to.equal('2607:f0d0:1002:51::41'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://k?y:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('k?y'); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should correctly parse uriencoded k?y mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('k?y'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should correctly parse username kay:kay mongodb://kay%3Akay:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://kay%3Akay:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('kay:kay'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should use options passed into url parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://localhost/', { readPreference: 'secondary' }, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.readPreference).to.equal('secondary'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should raise exceptions on invalid hostnames with double colon in host identifier', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://invalid::host:27017/db', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Double colon in host identifier'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should raise exceptions on invalid hostnames with slash in host identifier', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://invalid/host:27017/db', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Slash in host identifier'); - done(); - }); - } - }); -}); diff --git a/test/functional/view.test.js b/test/functional/view.test.js index b5b060dceb4..275f87c1df9 100644 --- a/test/functional/view.test.js +++ b/test/functional/view.test.js @@ -2,6 +2,7 @@ var expect = require('chai').expect, mock = require('mongodb-mock-server'), co = require('co'); +const { Long } = require('../..'); describe('Views', function() { it('should successfully pass through collation to findAndModify command', { @@ -10,7 +11,6 @@ describe('Views', function() { test: function(done) { var self = this; const configuration = this.configuration; - const Long = configuration.mongo.Long; // Default message fields var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER); diff --git a/test/functional/write_concern.test.js b/test/functional/write_concern.test.js new file mode 100644 index 00000000000..5e0ac4e0a8d --- /dev/null +++ b/test/functional/write_concern.test.js @@ -0,0 +1,19 @@ +'use strict'; + +const TestRunnerContext = require('./spec-runner').TestRunnerContext; +const generateTopologyTests = require('./spec-runner').generateTopologyTests; +const loadSpecTests = require('../spec').loadSpecTests; + +describe('Write Concern', function() { + describe('spec tests', function() { + const testContext = new TestRunnerContext(); + const testSuites = loadSpecTests('read-write-concern/operation'); + + after(() => testContext.teardown()); + before(function() { + return testContext.setup(this.configuration); + }); + + generateTopologyTests(testSuites, testContext); + }); +}); diff --git a/test/spec/crud/README.rst b/test/spec/crud/README.rst index 8ddd8faddb1..f0f91d9a796 100644 --- a/test/spec/crud/README.rst +++ b/test/spec/crud/README.rst @@ -14,7 +14,7 @@ that drivers can use to prove their conformance to the CRUD spec. Running these integration tests will require a running MongoDB server or cluster with server versions 2.6.0 or later. Some tests have specific server -version requirements as noted by ``minServerVersion`` and ``maxServerVersion``. +version requirements as noted by the ``runOn`` section, if provided. Subdirectories for Test Formats ------------------------------- @@ -39,6 +39,27 @@ Test Format Each YAML file has the following keys: +- ``runOn`` (optional): An array of server version and/or topology requirements + for which the tests can be run. If the test environment satisfies one or more + of these requirements, the tests may be executed; otherwise, this file should + be skipped. If this field is omitted, the tests can be assumed to have no + particular requirements and should be executed. Each element will have some or + all of the following fields: + + - ``minServerVersion`` (optional): The minimum server version (inclusive) + required to successfully run the tests. If this field is omitted, it should + be assumed that there is no lower bound on the required server version. + + - ``maxServerVersion`` (optional): The maximum server version (inclusive) + against which the tests can be run successfully. If this field is omitted, + it should be assumed that there is no upper bound on the required server + version. + + - ``topology`` (optional): An array of server topologies against which the + tests can be run successfully. Valid topologies are "single", "replicaset", + and "sharded". If this field is omitted, the default is all topologies (i.e. + ``["single", "replicaset", "sharded"]``). + - ``collection_name`` (optional): The collection to use for testing. - ``database_name`` (optional): The database to use for testing. @@ -46,15 +67,6 @@ Each YAML file has the following keys: - ``data`` (optional): The data that should exist in the collection under test before each test run. -- ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the test. If this field is not present, it should - be assumed that there is no lower bound on the required server version. - -- ``maxServerVersion`` (optional): The maximum server version (exclusive) - against which this test can run successfully. If this field is not present, - it should be assumed that there is no upper bound on the required server - version. - - ``tests``: An array of tests that are to be run independently of each other. Each test will have some or all of the following fields: @@ -95,7 +107,7 @@ Each YAML file has the following keys: result object if their BulkWriteException (or equivalent) provides access to a write result object. - - ``expectations`` (optional): Array of documents, each describing a + - ``expectations`` (optional): Array of documents, each describing a `CommandStartedEvent <../../command-monitoring/command-monitoring.rst#api>`_ from the `Command Monitoring <../../command-monitoring/command-monitoring.rst>`_ @@ -137,10 +149,26 @@ single operation. Notable differences from the current format are as follows: defined under the ``tests[i].outcome.error`` and ``tests[i].outcome.result`` fields. +- Instead of a top-level ``runOn`` field, server requirements are denoted by + separate top-level ``minServerVersion`` and ``maxServerVersion`` fields. The + minimum server version is an inclusive lower bound for running the test. The + maximum server version is an exclusive upper bound for running the test. If a + field is not present, it should be assumed that there is no corresponding bound + on the required server version. + The legacy format should not conflict with the newer, multi-operation format used by other specs (e.g. Transactions). It is possible to create a unified test runner capable of executing both formats (as some drivers do). +Error Assertions for Bulk Write Operations +========================================== + +When asserting errors (e.g. ``errorContains``, ``errorCodeName``) for bulk write +operations, the test harness should inspect the ``writeConcernError`` and/or +``writeErrors`` properties of the bulk write exception. This may not be needed for +``errorContains`` if a driver concatenates all write and write concern error +messages into the bulk write exception's top-level message. + Test Runner Implementation ========================== @@ -155,7 +183,7 @@ Before running the tests: For each test file: - Using ``globalMongoClient``, check that the current server version satisfies - the ``minServerVersion`` and ``maxServerVersion`` top-level fields in the test + one of the configurations provided in the top-level ``runOn`` field in the test file (if applicable). If the requirements are not satisifed, the test file should be skipped. @@ -258,6 +286,14 @@ Note that in the case of result objects for some CRUD operations, ``expected`` may condition additional, optional fields (see: `Optional Fields in Expected Result Objects`_). +Fields that must NOT be present in Actual Documents +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some command-started events in ``expectations`` include ``null`` values for +optional fields such as ``allowDiskUse``. +Tests MUST assert that the actual command **omits** any field that has a +``null`` value in the expected command. + Optional Fields in Expected Result Objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/test/spec/crud/db/db-aggregate.json b/test/spec/crud/db/db-aggregate.json deleted file mode 100644 index bff33cb198e..00000000000 --- a/test/spec/crud/db/db-aggregate.json +++ /dev/null @@ -1,151 +0,0 @@ -{ - "database_name": "admin", - "data": [], - "minServerVersion": "4.0", - "tests": [ - { - "description": "Aggregate with $currentOp", - "operation": { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$currentOp": { - "allUsers": false, - "idleConnections": false, - "localOps": true - } - }, - { - "$match": { - "command.aggregate": { - "$eq": 1 - } - } - }, - { - "$project": { - "command": 1 - } - }, - { - "$project": { - "command.lsid": 0 - } - } - ] - } - }, - "outcome": { - "result": [ - { - "command": { - "aggregate": 1, - "pipeline": [ - { - "$currentOp": { - "allUsers": false, - "idleConnections": false - } - }, - { - "$match": { - "command.aggregate": { - "$eq": 1 - } - } - }, - { - "$project": { - "command": 1 - } - }, - { - "$project": { - "command.lsid": 0 - } - } - ], - "cursor": {}, - "$db": "admin" - } - } - ] - } - }, - { - "description": "Aggregate with $currentOp and allowDiskUse", - "operation": { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$currentOp": { - "allUsers": true, - "idleConnections": true, - "localOps": true - } - }, - { - "$match": { - "command.aggregate": { - "$eq": 1 - } - } - }, - { - "$project": { - "command": 1 - } - }, - { - "$project": { - "command.lsid": 0 - } - } - ], - "allowDiskUse": true - } - }, - "outcome": { - "result": [ - { - "command": { - "aggregate": 1, - "pipeline": [ - { - "$currentOp": { - "allUsers": true, - "idleConnections": true - } - }, - { - "$match": { - "command.aggregate": { - "$eq": 1 - } - } - }, - { - "$project": { - "command": 1 - } - }, - { - "$project": { - "command.lsid": 0 - } - } - ], - "allowDiskUse": true, - "cursor": {}, - "$db": "admin" - } - } - ] - } - } - ] -} diff --git a/test/spec/crud/db/db-aggregate.yml b/test/spec/crud/db/db-aggregate.yml deleted file mode 100644 index 6ef3904607a..00000000000 --- a/test/spec/crud/db/db-aggregate.yml +++ /dev/null @@ -1,60 +0,0 @@ -database_name: &database_name "admin" - -data: [] -minServerVersion: '4.0' -tests: - - - description: "Aggregate with $currentOp" - operation: - name: aggregate - object: database - arguments: - pipeline: - - $currentOp: {allUsers: false, idleConnections: false} - - $match: - command.aggregate: {$eq: 1} - - $project: {command: 1} - - $project: {command.lsid: 0} - - outcome: - result: - - - command: - aggregate: 1 - pipeline: - - $currentOp: {allUsers: false, idleConnections: false} - - $match: - command.aggregate: {$eq: 1} - - $project: {command: 1} - - $project: {command.lsid: 0} - cursor: {} - $db: "admin" - - - - description: "Aggregate with $currentOp and allowDiskUse" - operation: - name: aggregate - object: database - arguments: - pipeline: - - $currentOp: {allUsers: true, idleConnections: true} - - $match: - command.aggregate: {$eq: 1} - - $project: {command: 1} - - $project: {command.lsid: 0} - allowDiskUse: true - - outcome: - result: - - - command: - aggregate: 1 - pipeline: - - $currentOp: {allUsers: true, idleConnections: true} - - $match: - command.aggregate: {$eq: 1} - - $project: {command: 1} - - $project: {command.lsid: 0} - allowDiskUse: true - cursor: {} - $db: "admin" diff --git a/test/spec/crud/v2/aggregate-merge.json b/test/spec/crud/v2/aggregate-merge.json index c73cf0a0194..c61736a0bbf 100644 --- a/test/spec/crud/v2/aggregate-merge.json +++ b/test/spec/crud/v2/aggregate-merge.json @@ -1,4 +1,9 @@ { + "runOn": [ + { + "minServerVersion": "4.1.11" + } + ], "data": [ { "_id": 1, @@ -13,7 +18,6 @@ "x": 33 } ], - "minServerVersion": "4.2.0", "collection_name": "test_aggregate_merge", "tests": [ { diff --git a/test/spec/crud/v2/aggregate-merge.yml b/test/spec/crud/v2/aggregate-merge.yml index 244674b866a..5bc5c68acdb 100644 --- a/test/spec/crud/v2/aggregate-merge.yml +++ b/test/spec/crud/v2/aggregate-merge.yml @@ -1,10 +1,12 @@ +runOn: + - + minServerVersion: "4.1.11" + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } -minServerVersion: '4.2.0' - collection_name: &collection_name 'test_aggregate_merge' tests: diff --git a/test/spec/crud/v2/aggregate-out-readConcern.json b/test/spec/crud/v2/aggregate-out-readConcern.json index 02917c8a170..c39ee0e2815 100644 --- a/test/spec/crud/v2/aggregate-out-readConcern.json +++ b/test/spec/crud/v2/aggregate-out-readConcern.json @@ -1,4 +1,13 @@ { + "runOn": [ + { + "minServerVersion": "4.1.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -13,7 +22,6 @@ "x": 33 } ], - "minServerVersion": "4.1", "collection_name": "test_aggregate_out_readconcern", "tests": [ { diff --git a/test/spec/crud/v2/aggregate-out-readConcern.yml b/test/spec/crud/v2/aggregate-out-readConcern.yml index 97d2dce86ef..0a864f05e77 100644 --- a/test/spec/crud/v2/aggregate-out-readConcern.yml +++ b/test/spec/crud/v2/aggregate-out-readConcern.yml @@ -1,10 +1,13 @@ +runOn: + - + minServerVersion: "4.1.0" + topology: ["replicaset", "sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } -minServerVersion: '4.1' - collection_name: &collection_name 'test_aggregate_out_readconcern' tests: diff --git a/test/spec/crud/v2/bulkWrite-arrayFilters.json b/test/spec/crud/v2/bulkWrite-arrayFilters.json index 97bf3e789ad..be26a337a5a 100644 --- a/test/spec/crud/v2/bulkWrite-arrayFilters.json +++ b/test/spec/crud/v2/bulkWrite-arrayFilters.json @@ -1,4 +1,9 @@ { + "runOn": [ + { + "minServerVersion": "3.5.6" + } + ], "data": [ { "_id": 1, @@ -23,7 +28,6 @@ ] } ], - "minServerVersion": "3.5.6", "collection_name": "test", "database_name": "crud-tests", "tests": [ diff --git a/test/spec/crud/v2/bulkWrite-arrayFilters.yml b/test/spec/crud/v2/bulkWrite-arrayFilters.yml index c1de926aefa..f200f8836d8 100644 --- a/test/spec/crud/v2/bulkWrite-arrayFilters.yml +++ b/test/spec/crud/v2/bulkWrite-arrayFilters.yml @@ -1,9 +1,11 @@ +runOn: + - + minServerVersion: "3.5.6" + data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} -minServerVersion: '3.5.6' - collection_name: &collection_name "test" database_name: &database_name "crud-tests" diff --git a/test/spec/crud/v2/db-aggregate.json b/test/spec/crud/v2/db-aggregate.json index 940eb8f0298..d88b9e18197 100644 --- a/test/spec/crud/v2/db-aggregate.json +++ b/test/spec/crud/v2/db-aggregate.json @@ -1,6 +1,10 @@ { + "runOn": [ + { + "minServerVersion": "3.6.0" + } + ], "database_name": "admin", - "minServerVersion": "3.6", "tests": [ { "description": "Aggregate with $listLocalSessions", diff --git a/test/spec/crud/v2/db-aggregate.yml b/test/spec/crud/v2/db-aggregate.yml index ec707a86798..e9a814858db 100644 --- a/test/spec/crud/v2/db-aggregate.yml +++ b/test/spec/crud/v2/db-aggregate.yml @@ -1,6 +1,8 @@ -database_name: &database_name "admin" +runOn: + - + minServerVersion: "3.6.0" -minServerVersion: '3.6' +database_name: &database_name "admin" tests: - diff --git a/test/spec/crud/v2/find-allowdiskuse.json b/test/spec/crud/v2/find-allowdiskuse.json new file mode 100644 index 00000000000..2df4dbc98ea --- /dev/null +++ b/test/spec/crud/v2/find-allowdiskuse.json @@ -0,0 +1,78 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "collection_name": "test_find_allowdiskuse", + "tests": [ + { + "description": "Find does not send allowDiskuse when value is not specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": null + } + } + } + ] + }, + { + "description": "Find sends allowDiskuse false when false is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/spec/crud/v2/find-allowdiskuse.yml b/test/spec/crud/v2/find-allowdiskuse.yml new file mode 100644 index 00000000000..adfc0000eac --- /dev/null +++ b/test/spec/crud/v2/find-allowdiskuse.yml @@ -0,0 +1,50 @@ +runOn: + - { minServerVersion: "4.3.1" } + +collection_name: &collection_name 'test_find_allowdiskuse' + +tests: + - + description: "Find does not send allowDiskuse when value is not specified" + operations: + - + object: collection + name: find + arguments: + filter: { } + expectations: + - + command_started_event: + command: + find: *collection_name + allowDiskUse: + - + description: "Find sends allowDiskuse false when false is specified" + operations: + - + object: collection + name: find + arguments: + filter: { } + allowDiskUse: false + expectations: + - + command_started_event: + command: + find: *collection_name + allowDiskUse: false + - + description: "Find sends allowDiskUse true when true is specified" + operations: + - + object: collection + name: find + arguments: + filter: { } + allowDiskUse: true + expectations: + - + command_started_event: + command: + find: *collection_name + allowDiskUse: true diff --git a/test/spec/crud/v2/findOneAndReplace-hint-clientError.json b/test/spec/crud/v2/findOneAndReplace-hint-clientError.json new file mode 100644 index 00000000000..08fd4b3ecc0 --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint-clientError.json @@ -0,0 +1,90 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndReplace-hint-clientError.yml b/test/spec/crud/v2/findOneAndReplace-hint-clientError.yml new file mode 100644 index 00000000000..f50782d3384 --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint-clientError.yml @@ -0,0 +1,40 @@ +runOn: + - { maxServerVersion: "4.0.99" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndReplace_hint' + +tests: + - + description: "FindOneAndReplace with hint string unsupported (client-side error)" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: &filter { _id: 1 } + replacement: &replacement { x: 33 } + hint: "_id_" + error: true + expectations: [] + outcome: &outcome + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: "FindOneAndReplace with hint document unsupported (client-side error)" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: *filter + replacement: *replacement + hint: { _id: 1 } + error: true + expectations: [] + outcome: *outcome diff --git a/test/spec/crud/v2/findOneAndReplace-hint-serverError.json b/test/spec/crud/v2/findOneAndReplace-hint-serverError.json new file mode 100644 index 00000000000..6710e6a70e8 --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint-serverError.json @@ -0,0 +1,123 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndReplace-hint-serverError.yml b/test/spec/crud/v2/findOneAndReplace-hint-serverError.yml new file mode 100644 index 00000000000..41812658a1b --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint-serverError.yml @@ -0,0 +1,59 @@ +runOn: + # These tests assert that the driver does not raise client-side errors and + # instead relies on the server to raise an error. Server versions >= 4.1.10 + # raise errors for unknown findAndModify options (SERVER-40005), but the spec + # requires client-side errors for < 4.2. Support for findAndModify hint was + # added in 4.3.1 (SERVER-42099), so we'll allow up to 4.3.0 (inclusive). + - { minServerVersion: "4.2.0", maxServerVersion: "4.3.0" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndReplace_hint' + +tests: + - + description: "FindOneAndReplace with hint string unsupported (server-side error)" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: &filter { _id: 1 } + replacement: &replacement { x: 33 } + hint: "_id_" + error: true + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *replacement + hint: "_id_" + outcome: &outcome + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: "FindOneAndReplace with hint document unsupported (server-side error)" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: *filter + replacement: *replacement + hint: { _id: 1 } + error: true + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *replacement + hint: { _id: 1 } + outcome: *outcome diff --git a/test/spec/crud/v2/findOneAndReplace-hint.json b/test/spec/crud/v2/findOneAndReplace-hint.json new file mode 100644 index 00000000000..263fdf96239 --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint.json @@ -0,0 +1,128 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndReplace-hint.yml b/test/spec/crud/v2/findOneAndReplace-hint.yml new file mode 100644 index 00000000000..b585dd867b3 --- /dev/null +++ b/test/spec/crud/v2/findOneAndReplace-hint.yml @@ -0,0 +1,55 @@ +runOn: + - { minServerVersion: "4.3.1" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndReplace_hint' + +tests: + - + description: "FindOneAndReplace with hint string" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: &filter { _id: 1 } + replacement: &replacement { x: 33 } + hint: "_id_" + # original document is returned by default + result: &result { _id: 1, x: 11 } + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *replacement + hint: "_id_" + outcome: &outcome + collection: + data: + - { _id: 1, x: 33 } + - { _id: 2, x: 22 } + - + description: "FindOneAndReplace with hint document" + operations: + - + object: collection + name: findOneAndReplace + arguments: + filter: *filter + replacement: *replacement + hint: { _id: 1 } + result: *result + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *replacement + hint: { _id: 1 } + outcome: *outcome diff --git a/test/spec/crud/v2/findOneAndUpdate-hint-clientError.json b/test/spec/crud/v2/findOneAndUpdate-hint-clientError.json new file mode 100644 index 00000000000..8cd5cddb51e --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint-clientError.json @@ -0,0 +1,94 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndUpdate-hint-clientError.yml b/test/spec/crud/v2/findOneAndUpdate-hint-clientError.yml new file mode 100644 index 00000000000..aec3e244d25 --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint-clientError.yml @@ -0,0 +1,40 @@ +runOn: + - { maxServerVersion: "4.0.99" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndUpdate_hint' + +tests: + - + description: "FindOneAndUpdate with hint string unsupported (client-side error)" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: &filter { _id: 1 } + update: &update { $inc: { x: 1 }} + hint: "_id_" + error: true + expectations: [] + outcome: &outcome + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: "FindOneAndUpdate with hint document unsupported (client-side error)" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: *filter + update: *update + hint: { _id: 1 } + error: true + expectations: [] + outcome: *outcome diff --git a/test/spec/crud/v2/findOneAndUpdate-hint-serverError.json b/test/spec/crud/v2/findOneAndUpdate-hint-serverError.json new file mode 100644 index 00000000000..1f4b2bda8b6 --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint-serverError.json @@ -0,0 +1,131 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndUpdate-hint-serverError.yml b/test/spec/crud/v2/findOneAndUpdate-hint-serverError.yml new file mode 100644 index 00000000000..06f788a62b1 --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint-serverError.yml @@ -0,0 +1,58 @@ +runOn: + # These tests assert that the driver does not raise client-side errors and + # instead relies on the server to raise an error. Support for findAndModify + # hint was added in 4.3.1 (SERVER-42099), so we'll allow up to 4.3.0 + # (inclusive). + - { minServerVersion: "4.2.0", maxServerVersion: "4.3.0" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndUpdate_hint' + +tests: + - + description: "FindOneAndUpdate with hint string unsupported (server-side error)" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: &filter { _id: 1 } + update: &update { $inc: { x: 1 }} + hint: "_id_" + error: true + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *update + hint: "_id_" + outcome: &outcome + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: "FindOneAndUpdate with hint document unsupported (server-side error)" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: *filter + update: *update + hint: { _id: 1 } + error: true + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *update + hint: { _id: 1 } + outcome: *outcome diff --git a/test/spec/crud/v2/findOneAndUpdate-hint.json b/test/spec/crud/v2/findOneAndUpdate-hint.json new file mode 100644 index 00000000000..451eecc0138 --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint.json @@ -0,0 +1,136 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/crud/v2/findOneAndUpdate-hint.yml b/test/spec/crud/v2/findOneAndUpdate-hint.yml new file mode 100644 index 00000000000..36b169c2a1b --- /dev/null +++ b/test/spec/crud/v2/findOneAndUpdate-hint.yml @@ -0,0 +1,55 @@ +runOn: + - { minServerVersion: "4.3.1" } + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +collection_name: &collection_name 'findOneAndUpdate_hint' + +tests: + - + description: "FindOneAndUpdate with hint string" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: &filter { _id: 1 } + update: &update { $inc: { x: 1 }} + hint: "_id_" + # original document is returned by default + result: &result { _id: 1, x: 11 } + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *update + hint: "_id_" + outcome: &outcome + collection: + data: + - { _id: 1, x: 12 } + - { _id: 2, x: 22 } + - + description: "FindOneAndUpdate with hint document" + operations: + - + object: collection + name: findOneAndUpdate + arguments: + filter: *filter + update: *update + hint: { _id: 1 } + result: *result + expectations: + - + command_started_event: + command: + findAndModify: *collection_name + query: *filter + update: *update + hint: { _id: 1 } + outcome: *outcome diff --git a/test/spec/crud/v2/updateWithPipelines.json b/test/spec/crud/v2/updateWithPipelines.json index 02286b1a288..a310f2825f2 100644 --- a/test/spec/crud/v2/updateWithPipelines.json +++ b/test/spec/crud/v2/updateWithPipelines.json @@ -1,4 +1,9 @@ { + "runOn": [ + { + "minServerVersion": "4.1.11" + } + ], "data": [ { "_id": 1, @@ -16,7 +21,6 @@ "y": 1 } ], - "minServerVersion": "4.1.11", "collection_name": "test", "database_name": "crud-tests", "tests": [ @@ -234,6 +238,171 @@ ] } } + }, + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + ] + }, + "command_name": "update", + "database_name": "crud-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + } + }, + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "result": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ] + }, + "command_name": "update", + "database_name": "crud-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + } } ] } diff --git a/test/spec/crud/v2/updateWithPipelines.yml b/test/spec/crud/v2/updateWithPipelines.yml index 534a869d6d0..08eef7b0b33 100644 --- a/test/spec/crud/v2/updateWithPipelines.yml +++ b/test/spec/crud/v2/updateWithPipelines.yml @@ -1,9 +1,11 @@ +runOn: + - + minServerVersion: "4.1.11" + data: - { _id: 1, x: 1, y: 1, t: {u: {v: 1}} } - { _id: 2, x: 2, y: 1 } -minServerVersion: '4.1.11' - collection_name: &collection_name "test" database_name: &database_name "crud-tests" @@ -88,3 +90,68 @@ tests: data: - { _id: 1, x: 1, foo: 1 } - { _id: 2, x: 2, y: 1 } + - + description: "UpdateOne in bulk write using pipelines" + operations: + - + name: "bulkWrite" + arguments: + requests: + - + name: "updateOne" + arguments: + filter: { _id: 1 } + update: [ { $replaceRoot: { newRoot: "$t" } }, { $addFields: { foo: 1 } } ] + result: + matchedCount: 1 + modifiedCount: 1 + upsertedCount: 0 + expectations: + - + command_started_event: + command: + update: *collection_name + updates: + - + q: { _id: 1 } + u: [ { $replaceRoot: { newRoot: "$t" } }, { $addFields: { foo: 1 } } ] + command_name: update + database_name: *database_name + outcome: + collection: + data: + - { _id: 1, u: {v: 1}, foo: 1 } + - { _id: 2, x: 2, y: 1 } + - + description: "UpdateMany in bulk write using pipelines" + operations: + - + name: "bulkWrite" + arguments: + requests: + - + name: "updateMany" + arguments: + filter: {} + update: [ { $project: { x: 1 } }, { $addFields: { foo: 1 } } ] + result: + matchedCount: 2 + modifiedCount: 2 + upsertedCount: 0 + expectations: + - + command_started_event: + command: + update: *collection_name + updates: + - + q: { } + u: [ { $project: { x: 1 } }, { $addFields: { foo: 1 } } ] + multi: true + command_name: update + database_name: *database_name + outcome: + collection: + data: + - { _id: 1, x: 1, foo: 1 } + - { _id: 2, x: 2, foo: 1 } diff --git a/test/spec/dns-txt-records/README.rst b/test/spec/dns-txt-records/README.rst deleted file mode 100644 index 5999557948e..00000000000 --- a/test/spec/dns-txt-records/README.rst +++ /dev/null @@ -1,92 +0,0 @@ -==================================== -Initial DNS Seedlist Discovery tests -==================================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Initial DNS Seedlist Discovery spec. - -Test Setup ----------- - -Start a three-node replica set on localhost, on ports 27017, 27018, and 27019, -with replica set name "repl0". The replica set MUST be started with SSL -enabled. - -To run the tests that accompany this spec, you need to configure the SRV and -TXT records with a real name server. The following records are required for -these tests:: - - Record TTL Class Address - localhost.test.build.10gen.cc. 86400 IN A 127.0.0.1 - localhost.sub.test.build.10gen.cc. 86400 IN A 127.0.0.1 - - Record TTL Class Port Target - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. - _mongodb._tcp.test3.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test5.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test6.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test7.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test8.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test10.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test11.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test12.test.build.10gen.cc. 86400 IN SRV 27017 localhost.build.10gen.cc. - _mongodb._tcp.test13.test.build.10gen.cc. 86400 IN SRV 27017 test.build.10gen.cc. - _mongodb._tcp.test14.test.build.10gen.cc. 86400 IN SRV 27017 localhost.not-test.build.10gen.cc. - _mongodb._tcp.test15.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.not-build.10gen.cc. - _mongodb._tcp.test16.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.not-10gen.cc. - _mongodb._tcp.test17.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.not-cc. - _mongodb._tcp.test18.test.build.10gen.cc. 86400 IN SRV 27017 localhost.sub.test.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.evil.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - - Record TTL Class Text - test5.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0&authSource=thisDB" - test6.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0" - test6.test.build.10gen.cc. 86400 IN TXT "authSource=otherDB" - test7.test.build.10gen.cc. 86400 IN TXT "ssl=false" - test8.test.build.10gen.cc. 86400 IN TXT "authSource" - test10.test.build.10gen.cc. 86400 IN TXT "socketTimeoutMS=500" - test11.test.build.10gen.cc. 86400 IN TXT "replicaS" "et=rep" "l0" - -Note that ``test4`` is omitted deliberately to test what happens with no SRV -record. ``test9`` is missing because it was deleted during the development of -the tests. The missing ``test.`` sub-domain in the SRV record target for -``test12`` is deliberate. - -In our tests we have used ``localhost.test.build.10gen.cc`` as the domain, and -then configured ``localhost.test.build.10gen.cc`` to resolve to 127.0.0.1. - -You need to adapt the records shown above to replace ``test.build.10gen.cc`` -with your own domain name, and update the "uri" field in the YAML or JSON files -in this directory with the actual domain. - -Test Format and Use -------------------- - -These YAML and JSON files contain the following fields: - -- ``uri``: a mongodb+srv connection string -- ``seeds``: the expected set of initial seeds discovered from the SRV record -- ``hosts``: the discovered topology's list of hosts once SDAM completes a scan -- ``options``: the parsed connection string options as discovered from URI and - TXT records -- ``parsed_options``: additional options present in the URI such as user/password -credentials -- ``error``: indicates that the parsing of the URI, or the resolving or - contents of the SRV or TXT records included errors. -- ``comment``: a comment to indicate why a test would fail. - -For each file, create MongoClient initialized with the mongodb+srv connection -string. You SHOULD verify that the client's initial seed list matches the list of -seeds. You MUST verify that the set of ServerDescriptions in the client's -TopologyDescription eventually matches the list of hosts. You MUST verify that -each of the values of the Connection String Options under ``options`` match the -Client's parsed value for that option. There may be other options parsed by -the Client as well, which a test does not verify. In ``uri-with-auth`` the URI -contains a user/password set and additional options are provided in -``parsed_options`` so that tests can verify authentication is maintained when -evaluating URIs. You MUST verify that an error has been thrown if ``error`` is -present. diff --git a/test/spec/dns-txt-records/longer-parent-in-return.json b/test/spec/dns-txt-records/longer-parent-in-return.json deleted file mode 100644 index 9a8267eaeb1..00000000000 --- a/test/spec/dns-txt-records/longer-parent-in-return.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.sub.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - }, - "comment": "Is correct, as returned host name shared the URI root \"test.build.10gen.cc\"." -} diff --git a/test/spec/dns-txt-records/longer-parent-in-return.yml b/test/spec/dns-txt-records/longer-parent-in-return.yml deleted file mode 100644 index e77c4570d3c..00000000000 --- a/test/spec/dns-txt-records/longer-parent-in-return.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.sub.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true -comment: Is correct, as returned host name shared the URI root "test.build.10gen.cc". diff --git a/test/spec/dns-txt-records/misformatted-option.json b/test/spec/dns-txt-records/misformatted-option.json deleted file mode 100644 index 3c8c29ace68..00000000000 --- a/test/spec/dns-txt-records/misformatted-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test8.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the options in the TXT record are incorrectly formatted (misses value)." -} diff --git a/test/spec/dns-txt-records/misformatted-option.yml b/test/spec/dns-txt-records/misformatted-option.yml deleted file mode 100644 index 9669772cb3d..00000000000 --- a/test/spec/dns-txt-records/misformatted-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test8.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because the options in the TXT record are incorrectly formatted (misses value). diff --git a/test/spec/dns-txt-records/no-results.json b/test/spec/dns-txt-records/no-results.json deleted file mode 100644 index c1dc02d281d..00000000000 --- a/test/spec/dns-txt-records/no-results.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test4.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because no SRV records are present for this URI." -} diff --git a/test/spec/dns-txt-records/no-results.yml b/test/spec/dns-txt-records/no-results.yml deleted file mode 100644 index e09bd060c2a..00000000000 --- a/test/spec/dns-txt-records/no-results.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test4.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because no SRV records are present for this URI. diff --git a/test/spec/dns-txt-records/not-enough-parts.json b/test/spec/dns-txt-records/not-enough-parts.json deleted file mode 100644 index 7cfce2ec57e..00000000000 --- a/test/spec/dns-txt-records/not-enough-parts.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because host in URI does not have {hostname}, {domainname} and {tld}." -} diff --git a/test/spec/dns-txt-records/not-enough-parts.yml b/test/spec/dns-txt-records/not-enough-parts.yml deleted file mode 100644 index b36fa4a5de4..00000000000 --- a/test/spec/dns-txt-records/not-enough-parts.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because host in URI does not have {hostname}, {domainname} and {tld}. diff --git a/test/spec/dns-txt-records/one-result-default-port.json b/test/spec/dns-txt-records/one-result-default-port.json deleted file mode 100644 index cebb3b1ec32..00000000000 --- a/test/spec/dns-txt-records/one-result-default-port.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "uri": "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-result-default-port.yml b/test/spec/dns-txt-records/one-result-default-port.yml deleted file mode 100644 index 395bcdc9683..00000000000 --- a/test/spec/dns-txt-records/one-result-default-port.yml +++ /dev/null @@ -1,10 +0,0 @@ -uri: "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/one-txt-record-multiple-strings.json b/test/spec/dns-txt-records/one-txt-record-multiple-strings.json deleted file mode 100644 index 622668c351f..00000000000 --- a/test/spec/dns-txt-records/one-txt-record-multiple-strings.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "uri": "mongodb+srv://test11.test.build.10gen.cc/", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml b/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml deleted file mode 100644 index 90a702cdbeb..00000000000 --- a/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml +++ /dev/null @@ -1,10 +0,0 @@ -uri: "mongodb+srv://test11.test.build.10gen.cc/" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/one-txt-record.json b/test/spec/dns-txt-records/one-txt-record.json deleted file mode 100644 index 2385021ad4b..00000000000 --- a/test/spec/dns-txt-records/one-txt-record.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "thisDB", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-txt-record.yml b/test/spec/dns-txt-records/one-txt-record.yml deleted file mode 100644 index 9356eaa2c28..00000000000 --- a/test/spec/dns-txt-records/one-txt-record.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: thisDB - ssl: true diff --git a/test/spec/dns-txt-records/parent-part-mismatch1.json b/test/spec/dns-txt-records/parent-part-mismatch1.json deleted file mode 100644 index 8d0147a48b8..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch1.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test14.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-test\" mismatches URI parent part \"test\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch1.yml b/test/spec/dns-txt-records/parent-part-mismatch1.yml deleted file mode 100644 index e35dfdf6d54..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch1.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test14.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-test" mismatches URI parent part "test". diff --git a/test/spec/dns-txt-records/parent-part-mismatch2.json b/test/spec/dns-txt-records/parent-part-mismatch2.json deleted file mode 100644 index 996249eb99d..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch2.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test15.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-build\" mismatches URI parent part \"build\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch2.yml b/test/spec/dns-txt-records/parent-part-mismatch2.yml deleted file mode 100644 index 595e5493c41..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch2.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test15.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-build" mismatches URI parent part "build". diff --git a/test/spec/dns-txt-records/parent-part-mismatch3.json b/test/spec/dns-txt-records/parent-part-mismatch3.json deleted file mode 100644 index 69e724af6c7..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch3.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test16.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-10gen\" mismatches URI parent part \"10gen\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch3.yml b/test/spec/dns-txt-records/parent-part-mismatch3.yml deleted file mode 100644 index 64ca2e708d6..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch3.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test16.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-10gen" mismatches URI parent part "10gen". diff --git a/test/spec/dns-txt-records/parent-part-mismatch4.json b/test/spec/dns-txt-records/parent-part-mismatch4.json deleted file mode 100644 index 254168e34ca..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch4.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test17.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's TLD \"not-cc\" mismatches URI TLD \"cc\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch4.yml b/test/spec/dns-txt-records/parent-part-mismatch4.yml deleted file mode 100644 index 226d6fa3bc1..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch4.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test17.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's TLD "not-cc" mismatches URI TLD "cc". diff --git a/test/spec/dns-txt-records/parent-part-mismatch5.json b/test/spec/dns-txt-records/parent-part-mismatch5.json deleted file mode 100644 index 92c024b4f34..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch5.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test19.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because one of the returned host names' domain name parts \"evil\" mismatches \"test\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch5.yml b/test/spec/dns-txt-records/parent-part-mismatch5.yml deleted file mode 100644 index 1ed2bda4ebe..00000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch5.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test19.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because one of the returned host names' domain name parts "evil" mismatches "test". diff --git a/test/spec/dns-txt-records/returned-parent-too-short.json b/test/spec/dns-txt-records/returned-parent-too-short.json deleted file mode 100644 index 676eb0c0d09..00000000000 --- a/test/spec/dns-txt-records/returned-parent-too-short.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test13.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's parent (build.10gen.cc) misses \"test.\"" -} diff --git a/test/spec/dns-txt-records/returned-parent-too-short.yml b/test/spec/dns-txt-records/returned-parent-too-short.yml deleted file mode 100644 index 397aec89539..00000000000 --- a/test/spec/dns-txt-records/returned-parent-too-short.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test13.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's parent (build.10gen.cc) misses "test." diff --git a/test/spec/dns-txt-records/returned-parent-wrong.json b/test/spec/dns-txt-records/returned-parent-wrong.json deleted file mode 100644 index 3aabfd81962..00000000000 --- a/test/spec/dns-txt-records/returned-parent-wrong.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test12.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name is too short and mismatches a parent." -} diff --git a/test/spec/dns-txt-records/returned-parent-wrong.yml b/test/spec/dns-txt-records/returned-parent-wrong.yml deleted file mode 100644 index 1fc3867a0eb..00000000000 --- a/test/spec/dns-txt-records/returned-parent-wrong.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test12.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name is too short and mismatches a parent. diff --git a/test/spec/dns-txt-records/two-results-default-port.json b/test/spec/dns-txt-records/two-results-default-port.json deleted file mode 100644 index 66028310a6d..00000000000 --- a/test/spec/dns-txt-records/two-results-default-port.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017", - "localhost.test.build.10gen.cc:27018" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/two-results-default-port.yml b/test/spec/dns-txt-records/two-results-default-port.yml deleted file mode 100644 index 61d38b5e829..00000000000 --- a/test/spec/dns-txt-records/two-results-default-port.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 - - localhost.test.build.10gen.cc:27018 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/two-results-nonstandard-port.json b/test/spec/dns-txt-records/two-results-nonstandard-port.json deleted file mode 100644 index 4900f7cff1c..00000000000 --- a/test/spec/dns-txt-records/two-results-nonstandard-port.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27018", - "localhost.test.build.10gen.cc:27019" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/two-results-nonstandard-port.yml b/test/spec/dns-txt-records/two-results-nonstandard-port.yml deleted file mode 100644 index 7185f52cd68..00000000000 --- a/test/spec/dns-txt-records/two-results-nonstandard-port.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27018 - - localhost.test.build.10gen.cc:27019 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/two-txt-records.json b/test/spec/dns-txt-records/two-txt-records.json deleted file mode 100644 index f0654ef6cb2..00000000000 --- a/test/spec/dns-txt-records/two-txt-records.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test6.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because there are two TXT records." -} diff --git a/test/spec/dns-txt-records/two-txt-records.yml b/test/spec/dns-txt-records/two-txt-records.yml deleted file mode 100644 index c6093613d4c..00000000000 --- a/test/spec/dns-txt-records/two-txt-records.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test6.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because there are two TXT records. diff --git a/test/spec/dns-txt-records/txt-record-not-allowed-option.json b/test/spec/dns-txt-records/txt-record-not-allowed-option.json deleted file mode 100644 index 2a5cf2f0070..00000000000 --- a/test/spec/dns-txt-records/txt-record-not-allowed-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because socketTimeoutMS is not an allowed option." -} diff --git a/test/spec/dns-txt-records/txt-record-not-allowed-option.yml b/test/spec/dns-txt-records/txt-record-not-allowed-option.yml deleted file mode 100644 index f4ff1cfd156..00000000000 --- a/test/spec/dns-txt-records/txt-record-not-allowed-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because socketTimeoutMS is not an allowed option. diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json b/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json deleted file mode 100644 index 0ebc737bd5f..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/?ssl=false", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "thisDB", - "ssl": false - } -} diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml b/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml deleted file mode 100644 index 2a922aa2342..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/?ssl=false" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: thisDB - ssl: false diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json b/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json deleted file mode 100644 index 2626ba60839..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "otherDB", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml b/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml deleted file mode 100644 index a9015599e7c..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: otherDB - ssl: true diff --git a/test/spec/dns-txt-records/txt-record-with-unallowed-option.json b/test/spec/dns-txt-records/txt-record-with-unallowed-option.json deleted file mode 100644 index 0d333a459dd..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-unallowed-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test7.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because \"ssl\" is not an allowed option." -} diff --git a/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml b/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml deleted file mode 100644 index ba3877ee9f9..00000000000 --- a/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test7.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because "ssl" is not an allowed option. diff --git a/test/spec/dns-txt-records/uri-with-auth.json b/test/spec/dns-txt-records/uri-with-auth.json deleted file mode 100644 index cc7257d85ba..00000000000 --- a/test/spec/dns-txt-records/uri-with-auth.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "uri": "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017", - "localhost.test.build.10gen.cc:27018" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "parsed_options": { - "user": "auser", - "password": "apass" - }, - "comment": "Should preserve auth credentials" -} diff --git a/test/spec/dns-txt-records/uri-with-auth.yml b/test/spec/dns-txt-records/uri-with-auth.yml deleted file mode 100644 index 9ecfca73ea9..00000000000 --- a/test/spec/dns-txt-records/uri-with-auth.yml +++ /dev/null @@ -1,12 +0,0 @@ -uri: "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 - - localhost.test.build.10gen.cc:27018 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -parsed_options: - user: auser - password: apass -comment: Should preserve auth credentials diff --git a/test/spec/dns-txt-records/uri-with-port.json b/test/spec/dns-txt-records/uri-with-port.json deleted file mode 100644 index b981e2a1bfc..00000000000 --- a/test/spec/dns-txt-records/uri-with-port.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the mongodb+srv URI includes a port." -} diff --git a/test/spec/dns-txt-records/uri-with-port.yml b/test/spec/dns-txt-records/uri-with-port.yml deleted file mode 100644 index f1944dcdd9a..00000000000 --- a/test/spec/dns-txt-records/uri-with-port.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because the mongodb+srv URI includes a port. diff --git a/test/spec/dns-txt-records/uri-with-two-hosts.json b/test/spec/dns-txt-records/uri-with-two-hosts.json deleted file mode 100644 index 5261a39cfa7..00000000000 --- a/test/spec/dns-txt-records/uri-with-two-hosts.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the mongodb+srv URI includes two host names." -} diff --git a/test/spec/dns-txt-records/uri-with-two-hosts.yml b/test/spec/dns-txt-records/uri-with-two-hosts.yml deleted file mode 100644 index 3b2189d48b1..00000000000 --- a/test/spec/dns-txt-records/uri-with-two-hosts.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because the mongodb+srv URI includes two host names. diff --git a/test/spec/read-write-concern/README.rst b/test/spec/read-write-concern/README.rst new file mode 100644 index 00000000000..5995590136a --- /dev/null +++ b/test/spec/read-write-concern/README.rst @@ -0,0 +1,80 @@ +======================= +Connection String Tests +======================= + +The YAML and JSON files in this directory tree are platform-independent tests +that drivers can use to prove their conformance to the Read and Write Concern +specification. + +Version +------- + +Files in the "specifications" repository have no version scheme. They are not +tied to a MongoDB server version. + +Format +------ + +Connection String +~~~~~~~~~~~~~~~~~ + +These tests are designed to exercise the connection string parsing related +to read concern and write concern. + +Each YAML file contains an object with a single ``tests`` key. This key is an +array of test case objects, each of which have the following keys: + +- ``description``: A string describing the test. +- ``uri``: A string containing the URI to be parsed. +- ``valid:``: a boolean indicating if parsing the uri should result in an error. +- ``writeConcern:`` A document indicating the expected write concern. +- ``readConcern:`` A document indicating the expected read concern. + +If a test case includes a null value for one of these keys, or if the key is missing, +no assertion is necessary. This both simplifies parsing of the test files and allows flexibility +for drivers that might substitute default values *during* parsing. + +Document +~~~~~~~~ + +These tests are designed to ensure compliance with the spec in relation to what should be +sent to the server. + +Each YAML file contains an object with a single ``tests`` key. This key is an +array of test case objects, each of which have the following keys: + +- ``description``: A string describing the test. +- ``valid:``: a boolean indicating if the write concern created from the document is valid. +- ``writeConcern:`` A document indicating the write concern to use. +- ``writeConcernDocument:`` A document indicating the write concern to be sent to the server. +- ``readConcern:`` A document indicating the read concern to use. +- ``readConcernDocument:`` A document indicating the read concern to be sent to the server. +- ``isServerDefault:`` Indicates whether the read or write concern is considered the server's default. +- ``isAcknowledged:`` Indicates if the write concern should be considered acknowledged. + +Operation +~~~~~~~~~ + +These tests check that the default write concern is omitted in operations. + +The spec test format is an extension of `transactions spec tests `_ with the following additions: + +- ``writeConcern`` in the ``databaseOptions`` or ``collectionOptions`` may be an empty document to indicate a `server default write concern `_. For example, in libmongoc: + + .. code:: c + + /* Create a default write concern, and set on a collection object. */ + mongoc_write_concern_t *wc = mongoc_write_concern_new (); + mongoc_collection_set_write_concern (collection, wc); + + If the driver has no way to explicitly set a default write concern on a database or collection, ignore the empty ``writeConcern`` document and continue with the test. +- The operations ``createIndex``, ``dropIndex`` are introduced. + + +Use as unit tests +================= + +Testing whether a URI is valid or not should simply be a matter of checking +whether URI parsing raises an error or exception. +Testing for emitted warnings may require more legwork (e.g. configuring a log +handler and watching for output). diff --git a/test/spec/read-write-concern/connection-string/read-concern.json b/test/spec/read-write-concern/connection-string/read-concern.json new file mode 100644 index 00000000000..1ecad8c268d --- /dev/null +++ b/test/spec/read-write-concern/connection-string/read-concern.json @@ -0,0 +1,47 @@ +{ + "tests": [ + { + "description": "Default", + "uri": "mongodb://localhost/", + "valid": true, + "warning": false, + "readConcern": {} + }, + { + "description": "local specified", + "uri": "mongodb://localhost/?readConcernLevel=local", + "valid": true, + "warning": false, + "readConcern": { + "level": "local" + } + }, + { + "description": "majority specified", + "uri": "mongodb://localhost/?readConcernLevel=majority", + "valid": true, + "warning": false, + "readConcern": { + "level": "majority" + } + }, + { + "description": "linearizable specified", + "uri": "mongodb://localhost/?readConcernLevel=linearizable", + "valid": true, + "warning": false, + "readConcern": { + "level": "linearizable" + } + }, + { + "description": "available specified", + "uri": "mongodb://localhost/?readConcernLevel=available", + "valid": true, + "warning": false, + "readConcern": { + "level": "available" + } + } + ] +} diff --git a/test/spec/read-write-concern/connection-string/read-concern.yml b/test/spec/read-write-concern/connection-string/read-concern.yml new file mode 100644 index 00000000000..499405fbae3 --- /dev/null +++ b/test/spec/read-write-concern/connection-string/read-concern.yml @@ -0,0 +1,32 @@ +tests: + - + description: "Default" + uri: "mongodb://localhost/" + valid: true + warning: false + readConcern: { } + - + description: "local specified" + uri: "mongodb://localhost/?readConcernLevel=local" + valid: true + warning: false + readConcern: { level: "local" } + - + description: "majority specified" + uri: "mongodb://localhost/?readConcernLevel=majority" + valid: true + warning: false + readConcern: { level: "majority" } + - + description: "linearizable specified" + uri: "mongodb://localhost/?readConcernLevel=linearizable" + valid: true + warning: false + readConcern: { level: "linearizable" } + - + description: "available specified" + uri: "mongodb://localhost/?readConcernLevel=available" + valid: true + warning: false + readConcern: { level: "available" } + diff --git a/test/spec/read-write-concern/connection-string/write-concern.json b/test/spec/read-write-concern/connection-string/write-concern.json new file mode 100644 index 00000000000..51bdf821c34 --- /dev/null +++ b/test/spec/read-write-concern/connection-string/write-concern.json @@ -0,0 +1,118 @@ +{ + "tests": [ + { + "description": "Default", + "uri": "mongodb://localhost/", + "valid": true, + "warning": false, + "writeConcern": {} + }, + { + "description": "w as a valid number", + "uri": "mongodb://localhost/?w=1", + "valid": true, + "warning": false, + "writeConcern": { + "w": 1 + } + }, + { + "description": "w as an invalid number", + "uri": "mongodb://localhost/?w=-2", + "valid": false, + "warning": null + }, + { + "description": "w as a string", + "uri": "mongodb://localhost/?w=majority", + "valid": true, + "warning": false, + "writeConcern": { + "w": "majority" + } + }, + { + "description": "wtimeoutMS as a valid number", + "uri": "mongodb://localhost/?wtimeoutMS=500", + "valid": true, + "warning": false, + "writeConcern": { + "wtimeoutMS": 500 + } + }, + { + "description": "wtimeoutMS as an invalid number", + "uri": "mongodb://localhost/?wtimeoutMS=-500", + "valid": false, + "warning": null + }, + { + "description": "journal as false", + "uri": "mongodb://localhost/?journal=false", + "valid": true, + "warning": false, + "writeConcern": { + "journal": false + } + }, + { + "description": "journal as true", + "uri": "mongodb://localhost/?journal=true", + "valid": true, + "warning": false, + "writeConcern": { + "journal": true + } + }, + { + "description": "All options combined", + "uri": "mongodb://localhost/?w=3&wtimeoutMS=500&journal=true", + "valid": true, + "warning": false, + "writeConcern": { + "w": 3, + "wtimeoutMS": 500, + "journal": true + } + }, + { + "description": "Unacknowledged with w", + "uri": "mongodb://localhost/?w=0", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0 + } + }, + { + "description": "Unacknowledged with w and journal", + "uri": "mongodb://localhost/?w=0&journal=false", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0, + "journal": false + } + }, + { + "description": "Unacknowledged with w and wtimeoutMS", + "uri": "mongodb://localhost/?w=0&wtimeoutMS=500", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0, + "wtimeoutMS": 500 + } + }, + { + "description": "Acknowledged with w as 0 and journal true", + "uri": "mongodb://localhost/?w=0&journal=true", + "valid": false, + "warning": false, + "writeConcern": { + "w": 0, + "journal": true + } + } + ] +} diff --git a/test/spec/read-write-concern/connection-string/write-concern.yml b/test/spec/read-write-concern/connection-string/write-concern.yml new file mode 100644 index 00000000000..ca610858651 --- /dev/null +++ b/test/spec/read-write-concern/connection-string/write-concern.yml @@ -0,0 +1,77 @@ +tests: + - + description: "Default" + uri: "mongodb://localhost/" + valid: true + warning: false + writeConcern: { } + - + description: "w as a valid number" + uri: "mongodb://localhost/?w=1" + valid: true + warning: false + writeConcern: { w: 1 } + - + description: "w as an invalid number" + uri: "mongodb://localhost/?w=-2" + valid: false + warning: ~ + - + description: "w as a string" + uri: "mongodb://localhost/?w=majority" + valid: true + warning: false + writeConcern: { w: "majority" } + - + description: "wtimeoutMS as a valid number" + uri: "mongodb://localhost/?wtimeoutMS=500" + valid: true + warning: false + writeConcern: { wtimeoutMS: 500 } + - + description: "wtimeoutMS as an invalid number" + uri: "mongodb://localhost/?wtimeoutMS=-500" + valid: false + warning: ~ + - + description: "journal as false" + uri: "mongodb://localhost/?journal=false" + valid: true + warning: false + writeConcern: { journal: false } + - + description: "journal as true" + uri: "mongodb://localhost/?journal=true" + valid: true + warning: false + writeConcern: { journal: true } + - + description: "All options combined" + uri: "mongodb://localhost/?w=3&wtimeoutMS=500&journal=true" + valid: true + warning: false + writeConcern: { w: 3, wtimeoutMS: 500, journal: true } + - + description: "Unacknowledged with w" + uri: "mongodb://localhost/?w=0" + valid: true + warning: false + writeConcern: { w: 0 } + - + description: "Unacknowledged with w and journal" + uri: "mongodb://localhost/?w=0&journal=false" + valid: true + warning: false + writeConcern: { w: 0, journal: false } + - + description: "Unacknowledged with w and wtimeoutMS" + uri: "mongodb://localhost/?w=0&wtimeoutMS=500" + valid: true + warning: false + writeConcern: { w: 0, wtimeoutMS: 500 } + - + description: "Acknowledged with w as 0 and journal true" + uri: "mongodb://localhost/?w=0&journal=true" + valid: false + warning: false + writeConcern: { w: 0, journal: true } diff --git a/test/spec/read-write-concern/document/read-concern.json b/test/spec/read-write-concern/document/read-concern.json new file mode 100644 index 00000000000..187397dae57 --- /dev/null +++ b/test/spec/read-write-concern/document/read-concern.json @@ -0,0 +1,66 @@ +{ + "tests": [ + { + "description": "Default", + "valid": true, + "readConcern": {}, + "readConcernDocument": {}, + "isServerDefault": true + }, + { + "description": "Majority", + "valid": true, + "readConcern": { + "level": "majority" + }, + "readConcernDocument": { + "level": "majority" + }, + "isServerDefault": false + }, + { + "description": "Local", + "valid": true, + "readConcern": { + "level": "local" + }, + "readConcernDocument": { + "level": "local" + }, + "isServerDefault": false + }, + { + "description": "Linearizable", + "valid": true, + "readConcern": { + "level": "linearizable" + }, + "readConcernDocument": { + "level": "linearizable" + }, + "isServerDefault": false + }, + { + "description": "Snapshot", + "valid": true, + "readConcern": { + "level": "snapshot" + }, + "readConcernDocument": { + "level": "snapshot" + }, + "isServerDefault": false + }, + { + "description": "Available", + "valid": true, + "readConcern": { + "level": "available" + }, + "readConcernDocument": { + "level": "available" + }, + "isServerDefault": false + } + ] +} diff --git a/test/spec/read-write-concern/document/read-concern.yml b/test/spec/read-write-concern/document/read-concern.yml new file mode 100644 index 00000000000..7d774752d97 --- /dev/null +++ b/test/spec/read-write-concern/document/read-concern.yml @@ -0,0 +1,37 @@ +tests: + - + description: "Default" + valid: true + readConcern: {} + readConcernDocument: {} + isServerDefault: true + - + description: "Majority" + valid: true + readConcern: { level: "majority" } + readConcernDocument: { level: "majority" } + isServerDefault: false + - + description: "Local" + valid: true + readConcern: { level: "local" } + readConcernDocument: { level: "local" } + isServerDefault: false + - + description: "Linearizable" + valid: true + readConcern: { level: "linearizable" } + readConcernDocument: { level: "linearizable" } + isServerDefault: false + - + description: "Snapshot" + valid: true + readConcern: { level: "snapshot" } + readConcernDocument: {level: "snapshot" } + isServerDefault: false + - + description: "Available" + valid: true + readConcern: { level: "available" } + readConcernDocument: { level: "available" } + isServerDefault: false diff --git a/test/spec/read-write-concern/document/write-concern.json b/test/spec/read-write-concern/document/write-concern.json new file mode 100644 index 00000000000..64cd5d0eae2 --- /dev/null +++ b/test/spec/read-write-concern/document/write-concern.json @@ -0,0 +1,174 @@ +{ + "tests": [ + { + "description": "Default", + "valid": true, + "writeConcern": {}, + "writeConcernDocument": {}, + "isServerDefault": true, + "isAcknowledged": true + }, + { + "description": "W as a number", + "valid": true, + "writeConcern": { + "w": 3 + }, + "writeConcernDocument": { + "w": 3 + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "W as an invalid number", + "valid": false, + "writeConcern": { + "w": -3 + }, + "writeConcernDocument": null, + "isServerDefault": null, + "isAcknowledged": null + }, + { + "description": "W as majority", + "valid": true, + "writeConcern": { + "w": "majority" + }, + "writeConcernDocument": { + "w": "majority" + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "W as a custom string", + "valid": true, + "writeConcern": { + "w": "my_mode" + }, + "writeConcernDocument": { + "w": "my_mode" + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "WTimeoutMS", + "valid": true, + "writeConcern": { + "wtimeoutMS": 1000 + }, + "writeConcernDocument": { + "wtimeout": 1000 + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "WTimeoutMS as an invalid number", + "valid": false, + "writeConcern": { + "wtimeoutMS": -1000 + }, + "writeConcernDocument": null, + "isServerDefault": null, + "isAcknowledged": null + }, + { + "description": "Journal as true", + "valid": true, + "writeConcern": { + "journal": true + }, + "writeConcernDocument": { + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Journal as false", + "valid": true, + "writeConcern": { + "journal": false + }, + "writeConcernDocument": { + "j": false + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Unacknowledged with only w", + "valid": true, + "writeConcern": { + "w": 0 + }, + "writeConcernDocument": { + "w": 0 + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "Unacknowledged with wtimeoutMS", + "valid": true, + "writeConcern": { + "w": 0, + "wtimeoutMS": 500 + }, + "writeConcernDocument": { + "w": 0, + "wtimeout": 500 + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "Unacknowledged with journal", + "valid": true, + "writeConcern": { + "w": 0, + "journal": false + }, + "writeConcernDocument": { + "w": 0, + "j": false + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "W is 0 with journal true", + "valid": false, + "writeConcern": { + "w": 0, + "journal": true + }, + "writeConcernDocument": { + "w": 0, + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Everything", + "valid": true, + "writeConcern": { + "w": 3, + "wtimeoutMS": 1000, + "journal": true + }, + "writeConcernDocument": { + "w": 3, + "wtimeout": 1000, + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + } + ] +} diff --git a/test/spec/read-write-concern/document/write-concern.yml b/test/spec/read-write-concern/document/write-concern.yml new file mode 100644 index 00000000000..bd82fdd59d7 --- /dev/null +++ b/test/spec/read-write-concern/document/write-concern.yml @@ -0,0 +1,99 @@ +tests: + - + description: "Default" + valid: true + writeConcern: {} + writeConcernDocument: {} + isServerDefault: true + isAcknowledged: true + - + description: "W as a number" + valid: true + writeConcern: { w: 3 } + writeConcernDocument: { w: 3 } + isServerDefault: false + isAcknowledged: true + - + description: "W as an invalid number" + valid: false + writeConcern: { w: -3 } + writeConcernDocument: ~ + isServerDefault: ~ + isAcknowledged: ~ + - + description: "W as majority" + valid: true + writeConcern: { w: "majority" } + writeConcernDocument: { w: "majority" } + isServerDefault: false + isAcknowledged: true + - + description: "W as a custom string" + valid: true + writeConcern: { w: "my_mode" } + writeConcernDocument: { w: "my_mode" } + isServerDefault: false + isAcknowledged: true + - + description: "WTimeoutMS" + valid: true + writeConcern: { wtimeoutMS: 1000 } + writeConcernDocument: { wtimeout: 1000 } + isServerDefault: false + isAcknowledged: true + - + description: "WTimeoutMS as an invalid number" + valid: false + writeConcern: { wtimeoutMS: -1000 } + writeConcernDocument: ~ + isServerDefault: ~ + isAcknowledged: ~ + - + description: "Journal as true" + valid: true + writeConcern: { journal: true } + writeConcernDocument: { j: true } + isServerDefault: false + isAcknowledged: true + - + description: "Journal as false" + valid: true + writeConcern: { journal: false } + writeConcernDocument: { j: false } + isServerDefault: false + isAcknowledged: true + - + description: "Unacknowledged with only w" + valid: true + writeConcern: { w: 0 } + writeConcernDocument: { w: 0 } + isServerDefault: false + isAcknowledged: false + - + description: "Unacknowledged with wtimeoutMS" + valid: true + writeConcern: { w: 0, wtimeoutMS: 500 } + writeConcernDocument: { w: 0, wtimeout: 500 } + isServerDefault: false + isAcknowledged: false + - + description: "Unacknowledged with journal" + valid: true + writeConcern: { w: 0, journal: false } + writeConcernDocument: { w: 0, j: false } + isServerDefault: false + isAcknowledged: false + - + description: "W is 0 with journal true" + valid: false + writeConcern: { w: 0, journal: true } + writeConcernDocument: { w: 0, j: true } + isServerDefault: false + isAcknowledged: true + - + description: "Everything" + valid: true + writeConcern: { w: 3, wtimeoutMS: 1000, journal: true } + writeConcernDocument: { w: 3, wtimeout: 1000, j: true } + isServerDefault: false + isAcknowledged: true diff --git a/test/spec/read-write-concern/operation/default-write-concern-2.6.json b/test/spec/read-write-concern/operation/default-write-concern-2.6.json new file mode 100644 index 00000000000..c623298cd78 --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-2.6.json @@ -0,0 +1,544 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "2.6" + } + ], + "tests": [ + { + "description": "DeleteOne omits default write concern", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "DeleteMany omits default write concern", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "BulkWrite with all models omits default write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "ordered": true, + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": {} + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 3 + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 3 + } + } + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 1 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 2 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 2 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 3 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "InsertOne and InsertMany omit default write concern", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "insertMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "UpdateOne, UpdateMany, and ReplaceOne omit default write concern", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 3 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 3 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "x": 3 + } + } + ], + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/spec/read-write-concern/operation/default-write-concern-2.6.yml b/test/spec/read-write-concern/operation/default-write-concern-2.6.yml new file mode 100644 index 00000000000..725bcfca124 --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-2.6.yml @@ -0,0 +1,215 @@ +# Test that setting a default write concern does not add a write concern +# to the command sent over the wire. +# Test operations that require 2.6+ server. + +data: + - {_id: 1, x: 11} + - {_id: 2, x: 22} +collection_name: &collection_name default_write_concern_coll +database_name: &database_name default_write_concern_db + +runOn: + - minServerVersion: "2.6" + +tests: + - description: DeleteOne omits default write concern + operations: + - name: deleteOne + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {} + result: + deletedCount: 1 + expectations: + - command_started_event: + command: + delete: *collection_name + deletes: + - {q: {}, limit: 1} + writeConcern: null + - description: DeleteMany omits default write concern + operations: + - name: deleteMany + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {} + result: + deletedCount: 2 + expectations: + - command_started_event: + command: + delete: *collection_name + deletes: [{q: {}, limit: 0}] + writeConcern: null + - description: BulkWrite with all models omits default write concern + operations: + - name: bulkWrite + object: collection + collectionOptions: {writeConcern: {}} + arguments: + ordered: true + requests: + - name: deleteMany + arguments: + filter: {} + - name: insertOne + arguments: + document: {_id: 1} + - name: updateOne + arguments: + filter: {_id: 1} + update: {$set: {x: 1}} + - name: insertOne + arguments: + document: {_id: 2} + - name: replaceOne + arguments: + filter: {_id: 1} + replacement: {x: 2} + - name: insertOne + arguments: + document: {_id: 3} + - name: updateMany + arguments: + filter: {_id: 1} + update: {$set: {x: 3}} + - name: deleteOne + arguments: + filter: {_id: 3} + outcome: + collection: + name: *collection_name + data: + - {_id: 1, x: 3} + - {_id: 2} + expectations: + - command_started_event: + command: + delete: *collection_name + deletes: [{q: {}, limit: 0}] + writeConcern: null + - command_started_event: + command: + insert: *collection_name + documents: + - {_id: 1} + writeConcern: null + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 1}, u: {$set: {x: 1}}} + writeConcern: null + - command_started_event: + command: + insert: *collection_name + documents: + - {_id: 2} + writeConcern: null + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 1}, u: {x: 2}} + writeConcern: null + - command_started_event: + command: + insert: *collection_name + documents: + - {_id: 3} + writeConcern: null + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 1}, u: {$set: {x: 3}}, multi: true} + writeConcern: null + - command_started_event: + command: + delete: *collection_name + deletes: [{q: {_id: 3}, limit: 1}] + writeConcern: null + - description: 'InsertOne and InsertMany omit default write concern' + operations: + - name: insertOne + object: collection + collectionOptions: {writeConcern: {}} + arguments: + document: {_id: 3} + - name: insertMany + object: collection + collectionOptions: {writeConcern: {}} + arguments: + documents: + - {_id: 4} + - {_id: 5} + outcome: + collection: + name: *collection_name + data: + - {_id: 1, x: 11} + - {_id: 2, x: 22} + - {_id: 3} + - {_id: 4} + - {_id: 5} + expectations: + - command_started_event: + command: + insert: *collection_name + documents: + - {_id: 3} + writeConcern: null + - command_started_event: + command: + insert: *collection_name + documents: + - {_id: 4} + - {_id: 5} + writeConcern: null + - description: 'UpdateOne, UpdateMany, and ReplaceOne omit default write concern' + operations: + - name: updateOne + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 1} + update: {$set: {x: 1}} + - name: updateMany + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 2} + update: {$set: {x: 2}} + - name: replaceOne + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 2} + replacement: {x: 3} + outcome: + collection: + name: *collection_name + data: + - {_id: 1, x: 1} + - {_id: 2, x: 3} + expectations: + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 1}, u: {$set: {x: 1}}} + writeConcern: null + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 2}, u: {$set: {x: 2}}, multi: true} + writeConcern: null + - command_started_event: + command: + update: *collection_name + updates: + - {q: {_id: 2}, u: {x: 3}} + writeConcern: null \ No newline at end of file diff --git a/test/spec/read-write-concern/operation/default-write-concern-3.2.json b/test/spec/read-write-concern/operation/default-write-concern-3.2.json new file mode 100644 index 00000000000..04dd231f040 --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-3.2.json @@ -0,0 +1,125 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.2" + } + ], + "tests": [ + { + "description": "findAndModify operations omit default write concern", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "update": { + "x": 2 + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "remove": true, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/spec/read-write-concern/operation/default-write-concern-3.2.yml b/test/spec/read-write-concern/operation/default-write-concern-3.2.yml new file mode 100644 index 00000000000..dccb7e0d079 --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-3.2.yml @@ -0,0 +1,58 @@ +# Test that setting a default write concern does not add a write concern +# to the command sent over the wire. +# Test operations that require 3.2+ server, where findAndModify started +# to accept a write concern. + +data: + - {_id: 1, x: 11} + - {_id: 2, x: 22} +collection_name: &collection_name default_write_concern_coll +database_name: &database_name default_write_concern_db + +runOn: + - minServerVersion: "3.2" + +tests: + - description: 'findAndModify operations omit default write concern' + operations: + - name: findOneAndUpdate + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 1} + update: {$set: {x: 1}} + - name: findOneAndReplace + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 2} + replacement: {x: 2} + - name: findOneAndDelete + object: collection + collectionOptions: {writeConcern: {}} + arguments: + filter: {_id: 2} + outcome: + collection: + name: *collection_name + data: + - {_id: 1, x: 1} + expectations: + - command_started_event: + command: + findAndModify: *collection_name + query: {_id: 1} + update: {$set: {x: 1}} + writeConcern: null + - command_started_event: + command: + findAndModify: *collection_name + query: {_id: 2} + update: {x: 2} + writeConcern: null + - command_started_event: + command: + findAndModify: *collection_name + query: {_id: 2} + remove: true + writeConcern: null \ No newline at end of file diff --git a/test/spec/read-write-concern/operation/default-write-concern-3.4.json b/test/spec/read-write-concern/operation/default-write-concern-3.4.json new file mode 100644 index 00000000000..6519f6f089e --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-3.4.json @@ -0,0 +1,216 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.4" + } + ], + "tests": [ + { + "description": "Aggregate with $out omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "RunCommand with a write command omits default write concern (runCommand should never inherit write concern)", + "operations": [ + { + "object": "database", + "databaseOptions": { + "writeConcern": {} + }, + "name": "runCommand", + "command_name": "delete", + "arguments": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ] + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "CreateIndex and dropIndex omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "createIndex", + "arguments": { + "keys": { + "x": 1 + } + } + }, + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "dropIndex", + "arguments": { + "name": "x_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "createIndexes": "default_write_concern_coll", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "dropIndexes": "default_write_concern_coll", + "index": "x_1", + "writeConcern": null + } + } + } + ] + }, + { + "description": "MapReduce omits default write concern", + "operations": [ + { + "name": "mapReduce", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "mapReduce": "default_write_concern_coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + }, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/spec/read-write-concern/operation/default-write-concern-3.4.yml b/test/spec/read-write-concern/operation/default-write-concern-3.4.yml new file mode 100644 index 00000000000..c7b586cadce --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-3.4.yml @@ -0,0 +1,95 @@ +# Test that setting a default write concern does not add a write concern +# to the command sent over the wire. +# Test operations that require 3.4+ server, where all commands started +# to accept a write concern. + +data: + - {_id: 1, x: 11} + - {_id: 2, x: 22} +collection_name: &collection_name default_write_concern_coll +database_name: &database_name default_write_concern_db + +runOn: + - minServerVersion: "3.4" + +tests: + - description: Aggregate with $out omits default write concern + operations: + - object: collection + collectionOptions: {writeConcern: {}} + name: aggregate + arguments: + pipeline: &out_pipeline + - $match: {_id: {$gt: 1}} + - $out: &other_collection_name "other_collection_name" + outcome: + collection: + name: *other_collection_name + data: + - {_id: 2, x: 22} + expectations: + - command_started_event: + command: + aggregate: *collection_name + pipeline: *out_pipeline + writeConcern: null + - description: RunCommand with a write command omits default write concern (runCommand should never inherit write concern) + operations: + - object: database + databaseOptions: {writeConcern: {}} + name: runCommand + command_name: delete + arguments: + command: + delete: *collection_name + deletes: + - {q: {}, limit: 1} + expectations: + - command_started_event: + command: + delete: *collection_name + deletes: + - {q: {}, limit: 1} + writeConcern: null + - description: CreateIndex and dropIndex omits default write concern + operations: + - object: collection + collectionOptions: {writeConcern: {}} + name: createIndex + arguments: + keys: {x: 1} + - object: collection + collectionOptions: {writeConcern: {}} + name: dropIndex + arguments: + name: x_1 + expectations: + - command_started_event: + command: + createIndexes: *collection_name + indexes: + - name: x_1 + key: {x: 1} + writeConcern: null + - command_started_event: + command: + dropIndexes: *collection_name + index: x_1 + writeConcern: null + - description: MapReduce omits default write concern + operations: + - name: mapReduce + object: collection + collectionOptions: {writeConcern: {}} + arguments: + map: { $code: 'function inc() { return emit(0, this.x + 1) }' } + reduce: { $code: 'function sum(key, values) { return values.reduce((acc, x) => acc + x); }' } + out: { inline: 1 } + expectations: + - command_started_event: + command: + mapReduce: *collection_name + map: { $code: 'function inc() { return emit(0, this.x + 1) }' } + reduce: { $code: 'function sum(key, values) { return values.reduce((acc, x) => acc + x); }' } + out: { inline: 1 } + writeConcern: null \ No newline at end of file diff --git a/test/spec/read-write-concern/operation/default-write-concern-4.2.json b/test/spec/read-write-concern/operation/default-write-concern-4.2.json new file mode 100644 index 00000000000..fef192d1a39 --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-4.2.json @@ -0,0 +1,87 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "4.2" + } + ], + "tests": [ + { + "description": "Aggregate with $merge omits default write concern", + "operations": [ + { + "object": "collection", + "databaseOptions": { + "writeConcern": {} + }, + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ], + "writeConcern": null + } + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/read-write-concern/operation/default-write-concern-4.2.yml b/test/spec/read-write-concern/operation/default-write-concern-4.2.yml new file mode 100644 index 00000000000..6039b55814f --- /dev/null +++ b/test/spec/read-write-concern/operation/default-write-concern-4.2.yml @@ -0,0 +1,36 @@ +# Test that setting a default write concern does not add a write concern +# to the command sent over the wire. +# Test operations that require 4.2+ server. + +data: + - {_id: 1, x: 11} + - {_id: 2, x: 22} +collection_name: &collection_name default_write_concern_coll +database_name: &database_name default_write_concern_db + +runOn: + - minServerVersion: "4.2" + +tests: + - description: Aggregate with $merge omits default write concern + operations: + - object: collection + databaseOptions: {writeConcern: {}} + collectionOptions: {writeConcern: {}} + name: aggregate + arguments: + pipeline: &merge_pipeline + - $match: {_id: {$gt: 1}} + - $merge: {into: &other_collection_name "other_collection_name" } + expectations: + - command_started_event: + command: + aggregate: *collection_name + pipeline: *merge_pipeline + # "null" fields will be checked for non-existence + writeConcern: null + outcome: + collection: + name: *other_collection_name + data: + - {_id: 2, x: 22} \ No newline at end of file diff --git a/test/spec/retryable-writes/README.rst b/test/spec/retryable-writes/README.rst index 60b9dfc5516..3504c7430cc 100644 --- a/test/spec/retryable-writes/README.rst +++ b/test/spec/retryable-writes/README.rst @@ -19,11 +19,15 @@ Tests will require a MongoClient created with options defined in the tests. Integration tests will require a running MongoDB cluster with server versions 3.6.0 or later. The ``{setFeatureCompatibilityVersion: 3.6}`` admin command will also need to have been executed to enable support for retryable writes on -the cluster. +the cluster. Some tests may have more stringent version requirements depending +on the fail points used. Server Fail Point ================= +onPrimaryTransactionalWrite +--------------------------- + Some tests depend on a server fail point, ``onPrimaryTransactionalWrite``, which allows us to force a network error before the server would return a write result to the client. The fail point also allows control whether the server will @@ -64,24 +68,36 @@ may be combined if desired: If set, the specified exception code will be thrown and the write will not be committed. If unset, the write will be allowed to commit. -Disabling Fail Point after Test Execution ------------------------------------------ +failCommand +----------- + +Some tests depend on a server fail point, ``failCommand``, which allows the +client to force the server to return an error. Unlike +``onPrimaryTransactionalWrite``, ``failCommand`` does not allow the client to +directly control whether the server will commit the operation (execution of the +write depends on whether the ``closeConnection`` and/or ``errorCode`` options +are specified). See: `failCommand <../../transactions/tests#failcommand>`_ in +the Transactions spec test suite for more information. + +Disabling Fail Points after Test Execution +------------------------------------------ -After each test that configures a fail point, drivers should disable the -``onPrimaryTransactionalWrite`` fail point to avoid spurious failures in -subsequent tests. The fail point may be disabled like so:: +After each test that configures a fail point, drivers should disable the fail +point to avoid spurious failures in subsequent tests. The fail point may be +disabled like so:: db.runCommand({ - configureFailPoint: "onPrimaryTransactionalWrite", + configureFailPoint: , mode: "off" }); -Network Error Tests -=================== +Use as Integration Tests +======================== -Network error tests are expressed in YAML and should be run against a replica -set. These tests cannot be run against a shard cluster because mongos does not -support the necessary fail point. +Integration tests are expressed in YAML and can be run against a replica set or +sharded cluster as denoted by the top-level ``runOn`` field. Tests that rely on +the ``onPrimaryTransactionalWrite`` fail point cannot be run against a sharded +cluster because the fail point is not supported by mongos. The tests exercise the following scenarios: @@ -121,17 +137,29 @@ Test Format Each YAML file has the following keys: -- ``data``: The data that should exist in the collection under test before each - test run. +- ``runOn`` (optional): An array of server version and/or topology requirements + for which the tests can be run. If the test environment satisfies one or more + of these requirements, the tests may be executed; otherwise, this file should + be skipped. If this field is omitted, the tests can be assumed to have no + particular requirements and should be executed. Each element will have some or + all of the following fields: -- ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the test. If this field is not present, it should - be assumed that there is no lower bound on the required server version. + - ``minServerVersion`` (optional): The minimum server version (inclusive) + required to successfully run the tests. If this field is omitted, it should + be assumed that there is no lower bound on the required server version. -- ``maxServerVersion`` (optional): The maximum server version (exclusive) - against which this test can run successfully. If this field is not present, - it should be assumed that there is no upper bound on the required server - version. + - ``maxServerVersion`` (optional): The maximum server version (inclusive) + against which the tests can be run successfully. If this field is omitted, + it should be assumed that there is no upper bound on the required server + version. + + - ``topology`` (optional): An array of server topologies against which the + tests can be run successfully. Valid topologies are "single", "replicaset", + and "sharded". If this field is omitted, the default is all topologies (i.e. + ``["single", "replicaset", "sharded"]``). + +- ``data``: The data that should exist in the collection under test before each + test run. - ``tests``: An array of tests that are to be run independently of each other. Each test will have some or all of the following fields: @@ -140,9 +168,15 @@ Each YAML file has the following keys: - ``clientOptions``: Parameters to pass to MongoClient(). + - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this + test should be initialized with multiple mongos seed addresses. If ``false`` + or omitted, only a single mongos address should be specified. This field has + no effect for non-sharded topologies. + - ``failPoint`` (optional): The ``configureFailPoint`` command document to run to configure a fail point on the primary server. Drivers must ensure that - ``configureFailPoint`` is the first field in the command. + ``configureFailPoint`` is the first field in the command. This option and + ``useMultipleMongoses: true`` are mutually exclusive. - ``operation``: Document describing the operation to be executed. The operation should be executed through a collection object derived from a @@ -171,6 +205,12 @@ Each YAML file has the following keys: result object if their BulkWriteException (or equivalent) provides access to a write result object. + - ``errorLabelsContain``: A list of error label strings that the + error is expected to have. + + - ``errorLabelsOmit``: A list of error label strings that the + error is expected not to have. + - ``collection``: - ``name`` (optional): The name of the collection to verify. If this isn't @@ -187,9 +227,9 @@ The YAML tests specify bulk write operations that are split by command type operations may also be split due to ``maxWriteBatchSize``, ``maxBsonObjectSize``, or ``maxMessageSizeBytes``. -For instance, an insertMany operation with five 10 MB documents executed using +For instance, an insertMany operation with five 10 MiB documents executed using OP_MSG payload type 0 (i.e. entire command in one document) would be split into -five insert commands in order to respect the 16 MB ``maxBsonObjectSize`` limit. +five insert commands in order to respect the 16 MiB ``maxBsonObjectSize`` limit. The same insertMany operation executed using OP_MSG payload type 1 (i.e. command arguments pulled out into a separate payload vector) would be split into two insert commands in order to respect the 48 MB ``maxMessageSizeBytes`` limit. @@ -210,59 +250,6 @@ testing an update or delete that is split into two commands, the ``skip`` should be set to the number of statements in the first command to allow the fail point to trigger on the second command. -Replica Set Failover Test -========================= - -In addition to network errors, writes should also be retried in the event of a -primary failover, which results in a "not master" command error (or similar). -The ``stepdownHangBeforePerformingPostMemberStateUpdateActions`` fail point -implemented in `d4eb562`_ for `SERVER-31355`_ may be used for this test, as it -allows a primary to keep its client connections open after a step down. This -fail point operates by hanging the step down procedure (i.e. ``replSetStepDown`` -command) until the fail point is later deactivated. - -.. _d4eb562: https://github.com/mongodb/mongo/commit/d4eb562ac63717904f24de4a22e395070687bc62 -.. _SERVER-31355: https://jira.mongodb.org/browse/SERVER-31355 - -The following test requires three MongoClient instances and will generally -require two execution contexts (async drivers may get by with a single thread). - -- The client under test will connect to the replica set and be used to execute - write operations. -- The fail point client will connect directly to the initial primary and be used - to toggle the fail point. -- The step down client will connect to the replica set and be used to step down - the primary. This client will generally require its own execution context, - since the step down will hang. - -In order to guarantee that the client under test does not detect the stepped -down primary's state change via SDAM, it must be configured with a large -`heartbeatFrequencyMS`_ value (e.g. 60 seconds). Single-threaded drivers may -also need to set `serverSelectionTryOnce`_ to ``false`` to ensure that server -selection for the retry attempt waits until a new primary is elected. - -.. _heartbeatFrequencyMS: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms -.. _serverSelectionTryOnce: https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#serverselectiontryonce - -The test proceeds as follows: - -- Using the client under test, insert a document and observe a successful write - result. This will ensure that initial discovery takes place. -- Using the fail point client, activate the fail point by setting ``mode`` - to ``"alwaysOn"``. -- Using the step down client, step down the primary by executing the command - ``{ replSetStepDown: 60, force: true}``. This operation will hang so long as - the fail point is activated. When the fail point is later deactivated, the - step down will complete and the primary's client connections will be dropped. - At that point, any ensuing network error should be ignored. -- Using the client under test, insert a document and observe a successful write - result. The test MUST assert that the insert command fails once against the - stepped down node and is successfully retried on the newly elected primary - (after SDAM discovers the topology change). The test MAY use APM or another - means to observe both attempts. -- Using the fail point client, deactivate the fail point by setting ``mode`` - to ``"off"``. - Command Construction Tests ========================== @@ -293,7 +280,7 @@ unsupported write operations: * Unsupported write commands - - ``aggregate`` with ``$out`` pipeline operator + - ``aggregate`` with write stage (e.g. ``$out``, ``$merge``) Drivers should test that transactions IDs are always included in commands for supported write operations: @@ -314,3 +301,35 @@ supported write operations: - ``insertMany()`` with ``ordered=false`` - ``bulkWrite()`` with ``ordered=true`` (no ``UpdateMany`` or ``DeleteMany``) - ``bulkWrite()`` with ``ordered=false`` (no ``UpdateMany`` or ``DeleteMany``) + +Prose Tests +=========== + +The following tests ensure that retryable writes work properly with replica sets +and sharded clusters. + +#. Test that retryable writes raise an exception when using the MMAPv1 storage + engine. For this test, execute a write operation, such as ``insertOne``, + which should generate an exception. Assert that the error message is the + replacement error message:: + + This MongoDB deployment does not support retryable writes. Please add + retryWrites=false to your connection string. + + and the error code is 20. + +Changelog +========= + +:2019-10-21: Add ``errorLabelsContain`` and ``errorLabelsContain`` fields to ``result`` + +:2019-08-07: Add Prose Tests section + +:2019-06-07: Mention $merge stage for aggregate alongside $out + +:2019-03-01: Add top-level ``runOn`` field to denote server version and/or + topology requirements requirements for the test file. Removes the + ``minServerVersion`` and ``maxServerVersion`` top-level fields, + which are now expressed within ``runOn`` elements. + + Add test-level ``useMultipleMongoses`` field. diff --git a/test/spec/retryable-writes/bulkWrite-errorLabels.json b/test/spec/retryable-writes/bulkWrite-errorLabels.json new file mode 100644 index 00000000000..94ea3ea989f --- /dev/null +++ b/test/spec/retryable-writes/bulkWrite-errorLabels.json @@ -0,0 +1,182 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "BulkWrite succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/bulkWrite-errorLabels.yml b/test/spec/retryable-writes/bulkWrite-errorLabels.yml new file mode 100644 index 00000000000..60fc18d73f7 --- /dev/null +++ b/test/spec/retryable-writes/bulkWrite-errorLabels.yml @@ -0,0 +1,77 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "BulkWrite succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "bulkWrite" + arguments: + requests: + - name: "deleteOne" + arguments: + filter: { _id: 1 } + - name: "insertOne" + arguments: + document: { _id: 3, x: 33 } + - name: "updateOne" + arguments: + filter: { _id: 2 } + update: { $inc: { x: 1 } } + options: { ordered: true } + outcome: # Driver retries operation and it succeeds + result: + deletedCount: 1 + insertedCount: 1 + insertedIds: { 1: 3 } + matchedCount: 1 + modifiedCount: 1 + upsertedCount: 0 + upsertedIds: {} + collection: + data: + - { _id: 2, x: 23 } + - { _id: 3, x: 33 } + + - description: "BulkWrite fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "bulkWrite" + arguments: + requests: + - name: "deleteOne" + arguments: + filter: { _id: 1 } + - name: "insertOne" + arguments: + document: { _id: 3, x: 33 } + - name: "updateOne" + arguments: + filter: { _id: 2 } + update: { $inc: { x: 1 } } + options: { ordered: true } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/test/spec/retryable-writes/bulkWrite-serverErrors.json b/test/spec/retryable-writes/bulkWrite-serverErrors.json index a208019f779..d9561d568c0 100644 --- a/test/spec/retryable-writes/bulkWrite-serverErrors.json +++ b/test/spec/retryable-writes/bulkWrite-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "BulkWrite succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -96,9 +109,6 @@ }, { "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -110,7 +120,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -179,6 +192,81 @@ ] } } + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/bulkWrite-serverErrors.yml b/test/spec/retryable-writes/bulkWrite-serverErrors.yml index 387f680b84f..2444923e7a0 100644 --- a/test/spec/retryable-writes/bulkWrite-serverErrors.yml +++ b/test/spec/retryable-writes/bulkWrite-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "BulkWrite succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["update"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "bulkWrite" arguments: @@ -49,8 +53,6 @@ tests: - { _id: 3, x: 33 } - description: "BulkWrite succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -59,6 +61,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "bulkWrite" arguments: @@ -90,3 +93,38 @@ tests: data: - { _id: 2, x: 23 } - { _id: 3, x: 33 } + + - + description: "BulkWrite fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + closeConnection: true + operation: + name: "bulkWrite" + arguments: + requests: + - + name: "deleteOne" + arguments: + filter: { _id: 1 } + - + name: "insertOne" + arguments: + document: { _id: 3, x: 33 } + - + name: "updateOne" + arguments: + filter: { _id: 2 } + update: { $inc: { x : 1 }} + options: { ordered: true } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/test/spec/retryable-writes/bulkWrite.json b/test/spec/retryable-writes/bulkWrite.json index d94dc6152e1..72a8d018939 100644 --- a/test/spec/retryable-writes/bulkWrite.json +++ b/test/spec/retryable-writes/bulkWrite.json @@ -1,17 +1,21 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, "x": 11 } ], - "minServerVersion": "3.6", "tests": [ { "description": "First command is retried", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -82,9 +86,6 @@ }, { "description": "All commands are retried", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -216,9 +217,6 @@ }, { "description": "Both commands are retried after their first statement fails", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -298,9 +296,6 @@ }, { "description": "Second command is retried after its second statement fails", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -380,9 +375,6 @@ }, { "description": "BulkWrite with unordered execution", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -450,9 +442,6 @@ }, { "description": "First insertOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -525,9 +514,6 @@ }, { "description": "Second updateOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -606,9 +592,6 @@ }, { "description": "Third updateOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -692,9 +675,6 @@ }, { "description": "Single-document write following deleteMany is retried", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -755,9 +735,6 @@ }, { "description": "Single-document write following updateMany is retried", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/bulkWrite.yml b/test/spec/retryable-writes/bulkWrite.yml index 4d561159f8d..939dacf7728 100644 --- a/test/spec/retryable-writes/bulkWrite.yml +++ b/test/spec/retryable-writes/bulkWrite.yml @@ -1,13 +1,14 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } -minServerVersion: '3.6' - tests: - description: "First command is retried" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -46,8 +47,6 @@ tests: # that each write command consists of a single statement, which will # fail on the first attempt and succeed on the second, retry attempt. description: "All commands are retried" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 7 } @@ -105,8 +104,6 @@ tests: - { _id: 5, x: 55 } - description: "Both commands are retried after their first statement fails" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } @@ -144,8 +141,6 @@ tests: - { _id: 2, x: 23 } - description: "Second command is retried after its second statement fails" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { skip: 2 } @@ -183,8 +178,6 @@ tests: - { _id: 2, x: 23 } - description: "BulkWrite with unordered execution" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -217,8 +210,6 @@ tests: - { _id: 3, x: 33 } - description: "First insertOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } @@ -256,8 +247,6 @@ tests: - { _id: 1, x: 11 } - description: "Second updateOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { skip: 1 } @@ -296,8 +285,6 @@ tests: - { _id: 2, x: 22 } - description: "Third updateOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { skip: 2 } @@ -341,8 +328,6 @@ tests: # affect the initial deleteMany and will trigger once (and only once) # for the first insertOne attempt. description: "Single-document write following deleteMany is retried" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -378,8 +363,6 @@ tests: # affect the initial updateMany and will trigger once (and only once) # for the first insertOne attempt. description: "Single-document write following updateMany is retried" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } diff --git a/test/spec/retryable-writes/deleteMany.json b/test/spec/retryable-writes/deleteMany.json index 0a2b88a520e..642ad11fb47 100644 --- a/test/spec/retryable-writes/deleteMany.json +++ b/test/spec/retryable-writes/deleteMany.json @@ -1,4 +1,13 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset", + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +18,10 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "DeleteMany ignores retryWrites", - "clientOptions": { - "retryWrites": true - }, + "useMultipleMongoses": true, "operation": { "name": "deleteMany", "arguments": { diff --git a/test/spec/retryable-writes/deleteMany.yml b/test/spec/retryable-writes/deleteMany.yml index 2574f99e039..c206fa56bb5 100644 --- a/test/spec/retryable-writes/deleteMany.yml +++ b/test/spec/retryable-writes/deleteMany.yml @@ -1,14 +1,16 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset", "sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "DeleteMany ignores retryWrites" - clientOptions: - retryWrites: true + useMultipleMongoses: true operation: name: "deleteMany" arguments: diff --git a/test/spec/retryable-writes/deleteOne-errorLabels.json b/test/spec/retryable-writes/deleteOne-errorLabels.json new file mode 100644 index 00000000000..bff02e1f94d --- /dev/null +++ b/test/spec/retryable-writes/deleteOne-errorLabels.json @@ -0,0 +1,106 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "DeleteOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/deleteOne-errorLabels.yml b/test/spec/retryable-writes/deleteOne-errorLabels.yml new file mode 100644 index 00000000000..195f0c7912f --- /dev/null +++ b/test/spec/retryable-writes/deleteOne-errorLabels.yml @@ -0,0 +1,48 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "DeleteOne succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "deleteOne" + arguments: + filter: { _id: 1 } + outcome: # Driver retries operation and it succeeds + result: + deletedCount: 1 + collection: + data: + - { _id: 2, x: 22 } + + - description: "DeleteOne fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "deleteOne" + arguments: + filter: { _id: 1 } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/deleteOne-serverErrors.json b/test/spec/retryable-writes/deleteOne-serverErrors.json index 606fc66f5cc..69d225759c4 100644 --- a/test/spec/retryable-writes/deleteOne-serverErrors.json +++ b/test/spec/retryable-writes/deleteOne-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "DeleteOne succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "delete" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -52,9 +65,6 @@ }, { "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -66,7 +76,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -91,6 +104,49 @@ ] } } + }, + { + "description": "DeleteOne fails with RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/deleteOne-serverErrors.yml b/test/spec/retryable-writes/deleteOne-serverErrors.yml index 006f151383a..1f433d283e6 100644 --- a/test/spec/retryable-writes/deleteOne-serverErrors.yml +++ b/test/spec/retryable-writes/deleteOne-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "DeleteOne succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["delete"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "deleteOne" arguments: @@ -28,8 +32,6 @@ tests: - { _id: 2, x: 22 } - description: "DeleteOne succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -38,6 +40,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "deleteOne" arguments: @@ -48,3 +51,23 @@ tests: collection: data: - { _id: 2, x: 22 } + - + description: "DeleteOne fails with RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + closeConnection: true + operation: + name: "deleteOne" + arguments: + filter: { _id: 1 } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/deleteOne.json b/test/spec/retryable-writes/deleteOne.json index 8e42e9c4f05..592937acedd 100644 --- a/test/spec/retryable-writes/deleteOne.json +++ b/test/spec/retryable-writes/deleteOne.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "DeleteOne is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -46,9 +50,6 @@ }, { "description": "DeleteOne is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -82,9 +83,6 @@ }, { "description": "DeleteOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/deleteOne.yml b/test/spec/retryable-writes/deleteOne.yml index d82bfc054fb..b15c991cda7 100644 --- a/test/spec/retryable-writes/deleteOne.yml +++ b/test/spec/retryable-writes/deleteOne.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "DeleteOne is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -24,8 +25,6 @@ tests: - { _id: 2, x: 22 } - description: "DeleteOne is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -42,8 +41,6 @@ tests: - { _id: 2, x: 22 } - description: "DeleteOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/findOneAndDelete-errorLabels.json b/test/spec/retryable-writes/findOneAndDelete-errorLabels.json new file mode 100644 index 00000000000..efa62dba2e6 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndDelete-errorLabels.json @@ -0,0 +1,117 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndDelete succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/findOneAndDelete-errorLabels.yml b/test/spec/retryable-writes/findOneAndDelete-errorLabels.yml new file mode 100644 index 00000000000..af86ee2ecb5 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndDelete-errorLabels.yml @@ -0,0 +1,49 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "FindOneAndDelete succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "findOneAndDelete" + arguments: + filter: { x: { $gte: 11 } } + sort: { x: 1 } + outcome: # Driver retries operation and it succeeds + result: { _id: 1, x: 11 } + collection: + data: + - { _id: 2, x: 22 } + + - description: "FindOneAndDelete fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "findOneAndDelete" + arguments: + filter: { x: { $gte: 11 } } + sort: { x: 1 } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndDelete-serverErrors.json b/test/spec/retryable-writes/findOneAndDelete-serverErrors.json index 306b1423f47..0785e5d035c 100644 --- a/test/spec/retryable-writes/findOneAndDelete-serverErrors.json +++ b/test/spec/retryable-writes/findOneAndDelete-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "FindOneAndDelete succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -58,9 +71,6 @@ }, { "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -72,7 +82,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -103,6 +116,54 @@ ] } } + }, + { + "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/findOneAndDelete-serverErrors.yml b/test/spec/retryable-writes/findOneAndDelete-serverErrors.yml index 75071a36f29..fff52e94ca2 100644 --- a/test/spec/retryable-writes/findOneAndDelete-serverErrors.yml +++ b/test/spec/retryable-writes/findOneAndDelete-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "FindOneAndDelete succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["findAndModify"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndDelete" arguments: @@ -28,8 +32,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -38,6 +40,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndDelete" arguments: @@ -48,3 +51,24 @@ tests: collection: data: - { _id: 2, x: 22 } + - + description: "FindOneAndDelete fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + closeConnection: true + operation: + name: "findOneAndDelete" + arguments: + filter: { x: { $gte: 11 } } + sort: { x: 1 } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndDelete.json b/test/spec/retryable-writes/findOneAndDelete.json index 5963c4329d9..0cbe18108bd 100644 --- a/test/spec/retryable-writes/findOneAndDelete.json +++ b/test/spec/retryable-writes/findOneAndDelete.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "FindOneAndDelete is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -52,9 +56,6 @@ }, { "description": "FindOneAndDelete is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -94,9 +95,6 @@ }, { "description": "FindOneAndDelete is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/findOneAndDelete.yml b/test/spec/retryable-writes/findOneAndDelete.yml index 4f5d71caa39..1456ad71626 100644 --- a/test/spec/retryable-writes/findOneAndDelete.yml +++ b/test/spec/retryable-writes/findOneAndDelete.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "FindOneAndDelete is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -24,8 +25,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndDelete is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -42,8 +41,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndDelete is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/findOneAndReplace-errorLabels.json b/test/spec/retryable-writes/findOneAndReplace-errorLabels.json new file mode 100644 index 00000000000..d9473d139a0 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndReplace-errorLabels.json @@ -0,0 +1,121 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndReplace succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/findOneAndReplace-errorLabels.yml b/test/spec/retryable-writes/findOneAndReplace-errorLabels.yml new file mode 100644 index 00000000000..afc0494e5b6 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndReplace-errorLabels.yml @@ -0,0 +1,52 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "FindOneAndReplace succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "findOneAndReplace" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + returnDocument: "Before" + outcome: # Driver retries operation and it succeeds + result: { _id: 1, x: 11 } + collection: + data: + - { _id: 1, x: 111 } + - { _id: 2, x: 22 } + + - description: "FindOneAndReplace fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "findOneAndReplace" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + returnDocument: "Before" + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndReplace-serverErrors.json b/test/spec/retryable-writes/findOneAndReplace-serverErrors.json index ab209b8d1c4..6ebe057cfd4 100644 --- a/test/spec/retryable-writes/findOneAndReplace-serverErrors.json +++ b/test/spec/retryable-writes/findOneAndReplace-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "FindOneAndReplace succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -62,9 +75,6 @@ }, { "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -76,7 +86,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -111,6 +124,54 @@ ] } } + }, + { + "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/findOneAndReplace-serverErrors.yml b/test/spec/retryable-writes/findOneAndReplace-serverErrors.yml index 8b6a2012939..492bbf874e4 100644 --- a/test/spec/retryable-writes/findOneAndReplace-serverErrors.yml +++ b/test/spec/retryable-writes/findOneAndReplace-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "FindOneAndReplace succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["findAndModify"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndReplace" arguments: @@ -30,8 +34,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -40,6 +42,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndReplace" arguments: @@ -52,3 +55,26 @@ tests: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } + + - + description: "FindOneAndReplace fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + closeConnection: true + operation: + name: "findOneAndReplace" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + returnDocument: "Before" + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndReplace.json b/test/spec/retryable-writes/findOneAndReplace.json index 20b9e0bc312..e1f9ab7f8c3 100644 --- a/test/spec/retryable-writes/findOneAndReplace.json +++ b/test/spec/retryable-writes/findOneAndReplace.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "FindOneAndReplace is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -56,9 +60,6 @@ }, { "description": "FindOneAndReplace is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -102,9 +103,6 @@ }, { "description": "FindOneAndReplace is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/findOneAndReplace.yml b/test/spec/retryable-writes/findOneAndReplace.yml index 6e26d7cfe06..36d81d461ed 100644 --- a/test/spec/retryable-writes/findOneAndReplace.yml +++ b/test/spec/retryable-writes/findOneAndReplace.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "FindOneAndReplace is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -26,8 +27,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndReplace is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -46,8 +45,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndReplace is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/findOneAndUpdate-errorLabels.json b/test/spec/retryable-writes/findOneAndUpdate-errorLabels.json new file mode 100644 index 00000000000..1926d7fa5c3 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndUpdate-errorLabels.json @@ -0,0 +1,123 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/findOneAndUpdate-errorLabels.yml b/test/spec/retryable-writes/findOneAndUpdate-errorLabels.yml new file mode 100644 index 00000000000..f0bff611567 --- /dev/null +++ b/test/spec/retryable-writes/findOneAndUpdate-errorLabels.yml @@ -0,0 +1,52 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "FindOneAndUpdate succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "findOneAndUpdate" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: "Before" + outcome: # Driver retries operation and it succeeds + result: { _id: 1, x: 11 } + collection: + data: + - { _id: 1, x: 12 } + - { _id: 2, x: 22 } + + - description: "FindOneAndUpdate fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "findOneAndUpdate" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: "Before" + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndUpdate-serverErrors.json b/test/spec/retryable-writes/findOneAndUpdate-serverErrors.json index 92f09ce9a17..e6e369c1393 100644 --- a/test/spec/retryable-writes/findOneAndUpdate-serverErrors.json +++ b/test/spec/retryable-writes/findOneAndUpdate-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -63,9 +76,6 @@ }, { "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -77,7 +87,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -113,6 +126,55 @@ ] } } + }, + { + "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/findOneAndUpdate-serverErrors.yml b/test/spec/retryable-writes/findOneAndUpdate-serverErrors.yml index 7eddcdc09b0..ae24542b1ba 100644 --- a/test/spec/retryable-writes/findOneAndUpdate-serverErrors.yml +++ b/test/spec/retryable-writes/findOneAndUpdate-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "FindOneAndUpdate succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["findAndModify"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndUpdate" arguments: @@ -30,8 +34,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -40,6 +42,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "findOneAndUpdate" arguments: @@ -52,3 +55,25 @@ tests: data: - { _id: 1, x: 12 } - { _id: 2, x: 22 } + - + description: "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + closeConnection: true + operation: + name: "findOneAndUpdate" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: "Before" + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/findOneAndUpdate.json b/test/spec/retryable-writes/findOneAndUpdate.json index 92d4f54bda8..9ae2d87d821 100644 --- a/test/spec/retryable-writes/findOneAndUpdate.json +++ b/test/spec/retryable-writes/findOneAndUpdate.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "FindOneAndUpdate is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -57,9 +61,6 @@ }, { "description": "FindOneAndUpdate is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -104,9 +105,6 @@ }, { "description": "FindOneAndUpdate is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/findOneAndUpdate.yml b/test/spec/retryable-writes/findOneAndUpdate.yml index a82bd034456..9235526be75 100644 --- a/test/spec/retryable-writes/findOneAndUpdate.yml +++ b/test/spec/retryable-writes/findOneAndUpdate.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "FindOneAndUpdate is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -26,8 +27,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -46,8 +45,6 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/insertMany-errorLabels.json b/test/spec/retryable-writes/insertMany-errorLabels.json new file mode 100644 index 00000000000..c78946e90a3 --- /dev/null +++ b/test/spec/retryable-writes/insertMany-errorLabels.json @@ -0,0 +1,129 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + } + ], + "tests": [ + { + "description": "InsertMany succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertMany fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/insertMany-errorLabels.yml b/test/spec/retryable-writes/insertMany-errorLabels.yml new file mode 100644 index 00000000000..70551bad754 --- /dev/null +++ b/test/spec/retryable-writes/insertMany-errorLabels.yml @@ -0,0 +1,54 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + +tests: + - description: "InsertMany succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "insertMany" + arguments: + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + options: { ordered: true } + outcome: # Driver retries operation and it succeeds + result: + insertedIds: { 0: 2, 1: 3 } + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + + - description: "InsertMany fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "insertMany" + arguments: + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + options: { ordered: true } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } diff --git a/test/spec/retryable-writes/insertMany-serverErrors.json b/test/spec/retryable-writes/insertMany-serverErrors.json index 17ba9b14089..1c6ebafc28b 100644 --- a/test/spec/retryable-writes/insertMany-serverErrors.json +++ b/test/spec/retryable-writes/insertMany-serverErrors.json @@ -1,17 +1,27 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, "x": 11 } ], - "minServerVersion": "3.99", "tests": [ { "description": "InsertMany succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -21,7 +31,10 @@ "failCommands": [ "insert" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -69,9 +82,6 @@ }, { "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -83,7 +93,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -129,6 +142,55 @@ ] } } + }, + { + "description": "InsertMany fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/insertMany-serverErrors.yml b/test/spec/retryable-writes/insertMany-serverErrors.yml index aafaee8197f..f1933bb1538 100644 --- a/test/spec/retryable-writes/insertMany-serverErrors.yml +++ b/test/spec/retryable-writes/insertMany-serverErrors.yml @@ -1,20 +1,24 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "InsertMany succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertMany" arguments: @@ -32,8 +36,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertMany succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -42,6 +44,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertMany" arguments: @@ -57,3 +60,25 @@ tests: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } + - + description: "InsertMany fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + closeConnection: true + operation: + name: "insertMany" + arguments: + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + options: { ordered: true } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } diff --git a/test/spec/retryable-writes/insertMany.json b/test/spec/retryable-writes/insertMany.json index 74dd4a7a690..0ad326e2dc9 100644 --- a/test/spec/retryable-writes/insertMany.json +++ b/test/spec/retryable-writes/insertMany.json @@ -1,17 +1,21 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, "x": 11 } ], - "minServerVersion": "3.6", "tests": [ { "description": "InsertMany succeeds after one network error", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -63,9 +67,6 @@ }, { "description": "InsertMany with unordered execution", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -117,9 +118,6 @@ }, { "description": "InsertMany fails after multiple network errors", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": "alwaysOn", diff --git a/test/spec/retryable-writes/insertMany.yml b/test/spec/retryable-writes/insertMany.yml index 7559e340147..eed450e0a3e 100644 --- a/test/spec/retryable-writes/insertMany.yml +++ b/test/spec/retryable-writes/insertMany.yml @@ -1,13 +1,14 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } -minServerVersion: '3.6' - tests: - description: "InsertMany succeeds after one network error" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -28,8 +29,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertMany with unordered execution" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -50,8 +49,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertMany fails after multiple network errors" - clientOptions: - retryWrites: true failPoint: # Normally, a mongod will insert the documents as a batch with a # single commit. If this fails, mongod may try to insert each diff --git a/test/spec/retryable-writes/insertOne-errorLabels.json b/test/spec/retryable-writes/insertOne-errorLabels.json new file mode 100644 index 00000000000..9b8d13d5240 --- /dev/null +++ b/test/spec/retryable-writes/insertOne-errorLabels.json @@ -0,0 +1,90 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [], + "tests": [ + { + "description": "InsertOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/insertOne-errorLabels.yml b/test/spec/retryable-writes/insertOne-errorLabels.yml new file mode 100644 index 00000000000..0aa7498a04b --- /dev/null +++ b/test/spec/retryable-writes/insertOne-errorLabels.yml @@ -0,0 +1,44 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: [] + +tests: + - description: "InsertOne succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "insertOne" + arguments: + document: { _id: 1, x: 11 } + outcome: # Driver retries operation and it succeeds + result: + insertedId: 1 + collection: + data: + - { _id: 1, x: 11 } + + - description: "InsertOne fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "insertOne" + arguments: + document: { _id: 1, x: 11 } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: [] diff --git a/test/spec/retryable-writes/insertOne-serverErrors.json b/test/spec/retryable-writes/insertOne-serverErrors.json index 9ef8437062a..77fe29c00eb 100644 --- a/test/spec/retryable-writes/insertOne-serverErrors.json +++ b/test/spec/retryable-writes/insertOne-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "InsertOne succeeds after connection failure", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -61,9 +71,6 @@ }, { "description": "InsertOne succeeds after NotMaster", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -74,6 +81,9 @@ "insert" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -110,9 +120,6 @@ }, { "description": "InsertOne succeeds after NotMasterOrSecondary", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -123,6 +130,9 @@ "insert" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -159,9 +169,6 @@ }, { "description": "InsertOne succeeds after NotMasterNoSlaveOk", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -172,6 +179,9 @@ "insert" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -208,9 +218,6 @@ }, { "description": "InsertOne succeeds after InterruptedDueToReplStateChange", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -221,6 +228,9 @@ "insert" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -257,9 +267,6 @@ }, { "description": "InsertOne succeeds after InterruptedAtShutdown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -270,6 +277,9 @@ "insert" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -306,9 +316,6 @@ }, { "description": "InsertOne succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -319,6 +326,9 @@ "insert" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -355,9 +365,6 @@ }, { "description": "InsertOne succeeds after ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -368,6 +375,9 @@ "insert" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -404,9 +414,6 @@ }, { "description": "InsertOne succeeds after HostNotFound", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -417,6 +424,9 @@ "insert" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -453,9 +463,6 @@ }, { "description": "InsertOne succeeds after HostUnreachable", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -466,6 +473,9 @@ "insert" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -502,9 +512,6 @@ }, { "description": "InsertOne succeeds after SocketException", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -515,6 +522,9 @@ "insert" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -551,9 +561,6 @@ }, { "description": "InsertOne succeeds after NetworkTimeout", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -564,6 +571,9 @@ "insert" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -599,10 +609,56 @@ } }, { - "description": "InsertOne fails after Interrupted", - "clientOptions": { - "retryWrites": true + "description": "InsertOne succeeds after ExceededTimeLimit", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "result": { + "insertedId": 3 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertOne fails after Interrupted", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -627,6 +683,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -643,9 +704,6 @@ }, { "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -657,7 +715,10 @@ ], "writeConcernError": { "code": 11600, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -694,9 +755,6 @@ }, { "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -708,7 +766,10 @@ ], "writeConcernError": { "code": 11602, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -745,9 +806,6 @@ }, { "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -759,7 +817,10 @@ ], "writeConcernError": { "code": 189, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -796,9 +857,6 @@ }, { "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -810,7 +868,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -847,9 +908,6 @@ }, { "description": "InsertOne fails after multiple retryable writeConcernErrors", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -876,6 +934,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -896,9 +959,6 @@ }, { "description": "InsertOne fails after WriteConcernError Interrupted", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -925,6 +985,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -945,9 +1010,6 @@ }, { "description": "InsertOne fails after WriteConcernError WriteConcernFailed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -978,6 +1040,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -995,6 +1062,50 @@ ] } } + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/insertOne-serverErrors.yml b/test/spec/retryable-writes/insertOne-serverErrors.yml index 69173b9c18e..dd1be21f953 100644 --- a/test/spec/retryable-writes/insertOne-serverErrors.yml +++ b/test/spec/retryable-writes/insertOne-serverErrors.yml @@ -1,15 +1,18 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "InsertOne succeeds after connection failure" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -30,14 +33,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after NotMaster" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 10107 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -53,14 +55,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after NotMasterOrSecondary" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 13436 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -76,14 +77,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after NotMasterNoSlaveOk" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 13435 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -99,14 +99,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after InterruptedDueToReplStateChange" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 11602 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -122,14 +121,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after InterruptedAtShutdown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 11600 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -145,14 +143,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -168,14 +165,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 91 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -191,14 +187,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after HostNotFound" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 7 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -214,14 +209,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after HostUnreachable" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 6 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -237,14 +231,13 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after SocketException" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 9001 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -260,14 +253,35 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after NetworkTimeout" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["insert"] errorCode: 89 + errorLabels: ["RetryableWriteError"] # SPEC-1565 + closeConnection: false + operation: + name: "insertOne" + arguments: + document: { _id: 3, x: 33 } + outcome: + result: + insertedId: 3 + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - + description: "InsertOne succeeds after ExceededTimeLimit" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 262 + errorLabels: ["RetryableWriteError"] # SPEC-1565 closeConnection: false operation: name: "insertOne" @@ -283,8 +297,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne fails after Interrupted" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -298,14 +310,14 @@ tests: document: { _id: 3, x: 33 } outcome: error: true + result: + errorLabelsOmit: ["RetryableWriteError"] collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - description: "InsertOne succeeds after WriteConcernError InterruptedAtShutdown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -314,6 +326,7 @@ tests: writeConcernError: code: 11600 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertOne" arguments: @@ -328,8 +341,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -338,6 +349,7 @@ tests: writeConcernError: code: 11602 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertOne" arguments: @@ -352,8 +364,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after WriteConcernError PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -362,6 +372,7 @@ tests: writeConcernError: code: 189 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertOne" arguments: @@ -376,8 +387,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -386,6 +395,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "insertOne" arguments: @@ -400,8 +410,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne fails after multiple retryable writeConcernErrors" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 2 } @@ -416,6 +424,8 @@ tests: document: { _id: 3, x: 33 } outcome: error: true + result: + errorLabelsContain: ["RetryableWriteError"] collection: data: - { _id: 1, x: 11 } @@ -423,8 +433,6 @@ tests: - { _id: 3, x: 33 } # The write was still applied. - description: "InsertOne fails after WriteConcernError Interrupted" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -439,6 +447,8 @@ tests: document: { _id: 3, x: 33 } outcome: error: true + result: + errorLabelsOmit: ["RetryableWriteError"] collection: data: - { _id: 1, x: 11 } @@ -446,8 +456,6 @@ tests: - { _id: 3, x: 33 } # The write was still applied. - description: "InsertOne fails after WriteConcernError WriteConcernFailed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -464,8 +472,31 @@ tests: document: { _id: 3, x: 33 } outcome: error: true + result: + errorLabelsOmit: ["RetryableWriteError"] collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } # The write was still applied. + + - + description: "InsertOne fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + closeConnection: true + operation: + name: "insertOne" + arguments: + document: { _id: 1, x: 11 } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/insertOne.json b/test/spec/retryable-writes/insertOne.json index 123d51edb60..04dee6dd68a 100644 --- a/test/spec/retryable-writes/insertOne.json +++ b/test/spec/retryable-writes/insertOne.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "InsertOne is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -55,9 +59,6 @@ }, { "description": "InsertOne is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -100,9 +101,6 @@ }, { "description": "InsertOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/insertOne.yml b/test/spec/retryable-writes/insertOne.yml index 0e535ddbda0..ebfdf23e662 100644 --- a/test/spec/retryable-writes/insertOne.yml +++ b/test/spec/retryable-writes/insertOne.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "InsertOne is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -26,8 +27,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -46,8 +45,6 @@ tests: - { _id: 3, x: 33 } - description: "InsertOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/replaceOne-errorLabels.json b/test/spec/retryable-writes/replaceOne-errorLabels.json new file mode 100644 index 00000000000..06867e5159c --- /dev/null +++ b/test/spec/retryable-writes/replaceOne-errorLabels.json @@ -0,0 +1,120 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "ReplaceOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "ReplaceOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/replaceOne-errorLabels.yml b/test/spec/retryable-writes/replaceOne-errorLabels.yml new file mode 100644 index 00000000000..cb5d69ff377 --- /dev/null +++ b/test/spec/retryable-writes/replaceOne-errorLabels.yml @@ -0,0 +1,53 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "ReplaceOne succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "replaceOne" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + outcome: # Driver retries operation and it succeeds + result: + matchedCount: 1 + modifiedCount: 1 + upsertedCount: 0 + collection: + data: + - { _id: 1, x: 111 } + - { _id: 2, x: 22 } + + - description: "ReplaceOne fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "replaceOne" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/replaceOne-serverErrors.json b/test/spec/retryable-writes/replaceOne-serverErrors.json index b9c449a869a..af18bcf1a2e 100644 --- a/test/spec/retryable-writes/replaceOne-serverErrors.json +++ b/test/spec/retryable-writes/replaceOne-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "ReplaceOne succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -62,9 +75,6 @@ }, { "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -76,7 +86,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -111,6 +124,53 @@ ] } } + }, + { + "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/replaceOne-serverErrors.yml b/test/spec/retryable-writes/replaceOne-serverErrors.yml index bd2bb0f30fe..bd67b97c27b 100644 --- a/test/spec/retryable-writes/replaceOne-serverErrors.yml +++ b/test/spec/retryable-writes/replaceOne-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "ReplaceOne succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["update"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "replaceOne" arguments: @@ -32,8 +36,6 @@ tests: - { _id: 2, x: 22 } - description: "ReplaceOne succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -42,6 +44,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "replaceOne" arguments: @@ -56,3 +59,24 @@ tests: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } + - + description: "ReplaceOne fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + closeConnection: true + operation: + name: "replaceOne" + arguments: + filter: { _id: 1 } + replacement: { _id: 1, x: 111 } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/replaceOne.json b/test/spec/retryable-writes/replaceOne.json index 6e9107b799c..e5b8cf8eabb 100644 --- a/test/spec/retryable-writes/replaceOne.json +++ b/test/spec/retryable-writes/replaceOne.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "ReplaceOne is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -56,9 +60,6 @@ }, { "description": "ReplaceOne is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -102,9 +103,6 @@ }, { "description": "ReplaceOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/replaceOne.yml b/test/spec/retryable-writes/replaceOne.yml index 1eed95edff5..0000904a401 100644 --- a/test/spec/retryable-writes/replaceOne.yml +++ b/test/spec/retryable-writes/replaceOne.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "ReplaceOne is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -28,8 +29,6 @@ tests: - { _id: 2, x: 22 } - description: "ReplaceOne is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -50,8 +49,6 @@ tests: - { _id: 2, x: 22 } - description: "ReplaceOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/retryable-writes/updateMany.json b/test/spec/retryable-writes/updateMany.json index a2e32cdbb4e..14288c2860d 100644 --- a/test/spec/retryable-writes/updateMany.json +++ b/test/spec/retryable-writes/updateMany.json @@ -1,4 +1,13 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset", + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +18,10 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "UpdateMany ignores retryWrites", - "clientOptions": { - "retryWrites": true - }, + "useMultipleMongoses": true, "operation": { "name": "updateMany", "arguments": { diff --git a/test/spec/retryable-writes/updateMany.yml b/test/spec/retryable-writes/updateMany.yml index 24f9ef54221..31faee4bca5 100644 --- a/test/spec/retryable-writes/updateMany.yml +++ b/test/spec/retryable-writes/updateMany.yml @@ -1,14 +1,16 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset", "sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "UpdateMany ignores retryWrites" - clientOptions: - retryWrites: true + useMultipleMongoses: true operation: name: "updateMany" arguments: diff --git a/test/spec/retryable-writes/updateOne-errorLabels.json b/test/spec/retryable-writes/updateOne-errorLabels.json new file mode 100644 index 00000000000..4a6be3ffbaa --- /dev/null +++ b/test/spec/retryable-writes/updateOne-errorLabels.json @@ -0,0 +1,122 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "UpdateOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/spec/retryable-writes/updateOne-errorLabels.yml b/test/spec/retryable-writes/updateOne-errorLabels.yml new file mode 100644 index 00000000000..810351e3164 --- /dev/null +++ b/test/spec/retryable-writes/updateOne-errorLabels.yml @@ -0,0 +1,53 @@ +runOn: + - minServerVersion: "4.3.1" + topology: ["replicaset", "sharded"] + +data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +tests: + - description: "UpdateOne succeeds with RetryableWriteError from server" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 112 # WriteConflict, not a retryable error code + errorLabels: ["RetryableWriteError"] # Override server behavior: send RetryableWriteError label with non-retryable error code + operation: + name: "updateOne" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + outcome: # Driver retries operation and it succeeds + result: + matchedCount: 1 + modifiedCount: 1 + upsertedCount: 0 + collection: + data: + - { _id: 1, x: 12 } + - { _id: 2, x: 22 } + + - description: "UpdateOne fails if server does not return RetryableWriteError" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + errorCode: 11600 # InterruptedAtShutdown, normally a retryable error code + errorLabels: [] # Override server behavior: do not send RetryableWriteError label with retryable code + operation: + name: "updateOne" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + outcome: + error: true # Driver does not retry operation because there was no RetryableWriteError label on response + result: + errorLabelsOmit: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/updateOne-serverErrors.json b/test/spec/retryable-writes/updateOne-serverErrors.json index 84ebf877ad9..bb442eb68a9 100644 --- a/test/spec/retryable-writes/updateOne-serverErrors.json +++ b/test/spec/retryable-writes/updateOne-serverErrors.json @@ -1,4 +1,18 @@ { + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +23,9 @@ "x": 22 } ], - "minServerVersion": "3.99", "tests": [ { "description": "UpdateOne succeeds after PrimarySteppedDown", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -25,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -63,9 +76,6 @@ }, { "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -77,7 +87,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -113,6 +126,54 @@ ] } } + }, + { + "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/spec/retryable-writes/updateOne-serverErrors.yml b/test/spec/retryable-writes/updateOne-serverErrors.yml index 1d468861d6f..02d8380d237 100644 --- a/test/spec/retryable-writes/updateOne-serverErrors.yml +++ b/test/spec/retryable-writes/updateOne-serverErrors.yml @@ -1,21 +1,25 @@ +runOn: + - + minServerVersion: "4.0" + topology: ["replicaset"] + - + minServerVersion: "4.1.7" + topology: ["sharded"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -# TODO: this should change to 4.0 once 4.0.0 is released. -minServerVersion: '3.99' - tests: - description: "UpdateOne succeeds after PrimarySteppedDown" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } data: failCommands: ["update"] errorCode: 189 + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "updateOne" arguments: @@ -32,8 +36,6 @@ tests: - { _id: 2, x: 22 } - description: "UpdateOne succeeds after WriteConcernError ShutdownInProgress" - clientOptions: - retryWrites: true failPoint: configureFailPoint: failCommand mode: { times: 1 } @@ -42,6 +44,7 @@ tests: writeConcernError: code: 91 errmsg: Replication is being shut down + errorLabels: ["RetryableWriteError"] # SPEC-1565 operation: name: "updateOne" arguments: @@ -56,3 +59,24 @@ tests: data: - { _id: 1, x: 12 } - { _id: 2, x: 22 } + - + description: "UpdateOne fails with a RetryableWriteError label after two connection failures" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + closeConnection: true + operation: + name: "updateOne" + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + outcome: + error: true + result: + errorLabelsContain: ["RetryableWriteError"] + collection: + data: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/test/spec/retryable-writes/updateOne.json b/test/spec/retryable-writes/updateOne.json index c342c562788..0f806dc3d84 100644 --- a/test/spec/retryable-writes/updateOne.json +++ b/test/spec/retryable-writes/updateOne.json @@ -1,4 +1,12 @@ { + "runOn": [ + { + "minServerVersion": "3.6", + "topology": [ + "replicaset" + ] + } + ], "data": [ { "_id": 1, @@ -9,13 +17,9 @@ "x": 22 } ], - "minServerVersion": "3.6", "tests": [ { "description": "UpdateOne is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -57,9 +61,6 @@ }, { "description": "UpdateOne is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -104,9 +105,6 @@ }, { "description": "UpdateOne is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -147,9 +145,6 @@ }, { "description": "UpdateOne with upsert is committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -198,9 +193,6 @@ }, { "description": "UpdateOne with upsert is not committed on first attempt", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { @@ -252,9 +244,6 @@ }, { "description": "UpdateOne with upsert is never committed", - "clientOptions": { - "retryWrites": true - }, "failPoint": { "configureFailPoint": "onPrimaryTransactionalWrite", "mode": { diff --git a/test/spec/retryable-writes/updateOne.yml b/test/spec/retryable-writes/updateOne.yml index 900d4e7fb3f..56b7d822b4f 100644 --- a/test/spec/retryable-writes/updateOne.yml +++ b/test/spec/retryable-writes/updateOne.yml @@ -1,14 +1,15 @@ +runOn: + - + minServerVersion: "3.6" + topology: ["replicaset"] + data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } -minServerVersion: '3.6' - tests: - description: "UpdateOne is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -28,8 +29,6 @@ tests: - { _id: 2, x: 22 } - description: "UpdateOne is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -50,8 +49,6 @@ tests: - { _id: 2, x: 22 } - description: "UpdateOne is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } @@ -69,8 +66,6 @@ tests: - { _id: 2, x: 22 } - description: "UpdateOne with upsert is committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -93,8 +88,6 @@ tests: - { _id: 3, x: 34 } - description: "UpdateOne with upsert is not committed on first attempt" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 1 } @@ -118,8 +111,6 @@ tests: - { _id: 3, x: 34 } - description: "UpdateOne with upsert is never committed" - clientOptions: - retryWrites: true failPoint: configureFailPoint: onPrimaryTransactionalWrite mode: { times: 2 } diff --git a/test/spec/server-discovery-and-monitoring/rs/repeated.json b/test/spec/server-discovery-and-monitoring/rs/repeated.json new file mode 100644 index 00000000000..392d4857947 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/repeated.json @@ -0,0 +1,140 @@ +{ + "description": "Repeated ismaster response must be processed", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/rs/repeated.yml b/test/spec/server-discovery-and-monitoring/rs/repeated.yml new file mode 100644 index 00000000000..141e41c9e20 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/repeated.yml @@ -0,0 +1,101 @@ +description: Repeated ismaster response must be processed + +uri: "mongodb://a,b/?replicaSet=rs" + +phases: + # Phase 1 - a says it's not primary and suggests c may be the primary + - responses: + - + - "a:27017" + - ok: 1 + ismaster: false + secondary: true + hidden: true + hosts: ["a:27017", "c:27017"] + setName: "rs" + minWireVersion: 0 + maxWireVersion: 6 + outcome: + servers: + "a:27017": + type: "RSOther" + setName: "rs" + + "b:27017": + type: Unknown + + "c:27017": + type: Unknown + topologyType: "ReplicaSetNoPrimary" + logicalSessionTimeoutMinutes: ~ + setName: "rs" + + # Phase 2 - c says it's a standalone, is removed + - responses: + - + - "c:27017" + - ok: 1 + ismaster: true + minWireVersion: 0 + maxWireVersion: 6 + outcome: + servers: + "a:27017": + type: "RSOther" + setName: "rs" + + "b:27017": + type: Unknown + topologyType: "ReplicaSetNoPrimary" + logicalSessionTimeoutMinutes: ~ + setName: "rs" + + # Phase 3 - response from a is repeated, and must be processed; c added again + - responses: + - + - "a:27017" + - ok: 1 + ismaster: false + secondary: true + hidden: true + hosts: ["a:27017", "c:27017"] + setName: "rs" + minWireVersion: 0 + maxWireVersion: 6 + outcome: + servers: + "a:27017": + type: "RSOther" + setName: "rs" + + "b:27017": + type: Unknown + + "c:27017": + type: Unknown + topologyType: "ReplicaSetNoPrimary" + logicalSessionTimeoutMinutes: ~ + setName: "rs" + + # Phase 4 - c is now a primary + - responses: + - + - "c:27017" + - ok: 1 + ismaster: true + hosts: ["a:27017", "c:27017"] + setName: "rs" + minWireVersion: 0 + maxWireVersion: 6 + outcome: + servers: + "a:27017": + type: "RSOther" + setName: "rs" + + "c:27017": + type: RSPrimary + setName: rs + topologyType: "ReplicaSetWithPrimary" + logicalSessionTimeoutMinutes: ~ + setName: "rs" diff --git a/test/tools/atlas_connectivity_tests.js b/test/tools/atlas_connectivity_tests.js index e6947bbdbc6..156346145c9 100644 --- a/test/tools/atlas_connectivity_tests.js +++ b/test/tools/atlas_connectivity_tests.js @@ -12,11 +12,7 @@ const CONFIGS = ['ATLAS_REPL', 'ATLAS_SHRD', 'ATLAS_FREE', 'ATLAS_TLS11', 'ATLAS ); function runConnectionTest(config) { - const client = new MongoClient(config.url, { - useNewUrlParser: true, - // TODO: We should test both the unified and not-unified cases - useUnifiedTopology: false - }); + const client = new MongoClient(config.url); return Promise.resolve() .then(() => console.log(`testing ${config.name}`)) .then(() => client.connect()) diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index faca173b6fb..365f1488097 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -4,8 +4,17 @@ const qs = require('querystring'); const util = require('util'); const MongoClient = require('../../../lib/mongo_client'); -const TopologyType = require('../../../lib/core/sdam/common').TopologyType; -const core = require('../../../lib/core'); +const { Topology } = require('../../../lib/sdam/topology'); +const { TopologyType } = require('../../../lib/sdam/common'); + +function convertToConnStringMap(obj) { + let result = []; + Object.keys(obj).forEach(key => { + result.push(`${key}:${obj[key]}`); + }); + + return result.join(','); +} class NativeConfiguration { constructor(parsedURI, context) { @@ -13,6 +22,7 @@ class NativeConfiguration { this.clientSideEncryption = context.clientSideEncryption; this.options = Object.assign( { + auth: parsedURI.auth, hosts: parsedURI.hosts, host: parsedURI.hosts[0] ? parsedURI.hosts[0].host : 'localhost', port: parsedURI.hosts[0] ? parsedURI.hosts[0].port : 27017, @@ -21,7 +31,6 @@ class NativeConfiguration { parsedURI.options ); - this.mongo = this.require = require('../../..'); this.writeConcern = function() { return { w: 1 }; }; @@ -52,8 +61,12 @@ class NativeConfiguration { return this.options.replicaSet; } - usingUnifiedTopology() { - return !!process.env.MONGODB_UNIFIED_TOPOLOGY; + get mongo() { + throw new TypeError('fix this!'); + } + + get require() { + throw new TypeError('fix this!'); } newClient(dbOptions, serverOptions) { @@ -61,18 +74,16 @@ class NativeConfiguration { if (typeof dbOptions === 'string') { return new MongoClient( dbOptions, - this.usingUnifiedTopology() - ? Object.assign({ useUnifiedTopology: true, minHeartbeatFrequencyMS: 100 }, serverOptions) - : serverOptions + Object.assign({ minHeartbeatFrequencyMS: 100 }, serverOptions) ); } dbOptions = dbOptions || {}; - serverOptions = Object.assign({}, { haInterval: 100 }, serverOptions); - if (this.usingUnifiedTopology()) { - serverOptions.useUnifiedTopology = true; - serverOptions.minHeartbeatFrequencyMS = 100; - } + serverOptions = Object.assign( + {}, + { haInterval: 100, minHeartbeatFrequencyMS: 100 }, + serverOptions + ); // Fall back let dbHost = (serverOptions && serverOptions.host) || this.options.host; @@ -81,19 +92,41 @@ class NativeConfiguration { dbHost = qs.escape(dbHost); } + if (this.options.authMechanism) { + Object.assign(dbOptions, { + authMechanism: this.options.authMechanism + }); + } + + if (this.options.authMechanismProperties) { + Object.assign(dbOptions, { + authMechanismProperties: convertToConnStringMap(this.options.authMechanismProperties) + }); + } + if (this.options.replicaSet) { Object.assign(dbOptions, { replicaSet: this.options.replicaSet, auto_reconnect: false }); } - const connectionString = url.format({ + const urlOptions = { protocol: 'mongodb', slashes: true, hostname: dbHost, port: dbPort, query: dbOptions, pathname: '/' - }); + }; + + if (this.options.auth) { + let auth = this.options.auth.username; + if (this.options.auth.password) { + auth = `${auth}:${this.options.auth.password}`; + } + + urlOptions.auth = auth; + } + const connectionString = url.format(urlOptions); return new MongoClient(connectionString, serverOptions); } @@ -106,24 +139,12 @@ class NativeConfiguration { options = Object.assign({}, options); const hosts = host == null ? [].concat(this.options.hosts) : [{ host, port }]; - if (this.usingUnifiedTopology()) { - return new core.Topology(hosts, options); - } - - if (this.topologyType === TopologyType.ReplicaSetWithPrimary) { - options.poolSize = 1; - options.autoReconnect = false; - return new core.ReplSet(hosts, options); - } - - if (this.topologyType === TopologyType.Sharded) { - return new core.Mongos(hosts, options); - } - - return new core.Server(Object.assign({ host, port }, options)); + return new Topology(hosts, options); } - url(username, password) { + url(username, password, options) { + options = options || {}; + const query = {}; if (this.options.replicaSet) { Object.assign(query, { replicaSet: this.options.replicaSet, auto_reconnect: false }); @@ -131,12 +152,19 @@ class NativeConfiguration { let multipleHosts; if (this.options.hosts.length > 1) { - multipleHosts = this.options.hosts - .reduce((built, host) => { - built.push(`${host.host}:${host.port}`); - return built; - }, []) - .join(','); + // NOTE: The only way to force a sharded topology with the driver is to duplicate + // the host entry. This will eventually be solved by autodetection. + if (this.topologyType === TopologyType.Sharded) { + const firstHost = this.options.hosts[0]; + multipleHosts = `${firstHost.host}:${firstHost.port},${firstHost.host}:${firstHost.port}`; + } else { + multipleHosts = this.options.hosts + .reduce((built, host) => { + built.push(`${host.host}:${host.port}`); + return built; + }, []) + .join(','); + } } const urlObject = { @@ -157,6 +185,20 @@ class NativeConfiguration { if (username || password) { urlObject.auth = password == null ? username : `${username}:${password}`; + + if (options.authMechanism || this.options.authMechanism) { + Object.assign(query, { + authMechanism: options.authMechanism || this.options.authMechanism + }); + } + + if (options.authMechanismProperties || this.options.authMechanismProperties) { + Object.assign(query, { + authMechanismProperties: convertToConnStringMap( + options.authMechanismProperties || this.options.authMechanismProperties + ) + }); + } } if (multipleHosts) { diff --git a/test/tools/runner/filters/mongodb_topology_filter.js b/test/tools/runner/filters/mongodb_topology_filter.js index 2725499f92d..b9323786b0b 100755 --- a/test/tools/runner/filters/mongodb_topology_filter.js +++ b/test/tools/runner/filters/mongodb_topology_filter.js @@ -1,7 +1,5 @@ 'use strict'; - -const topologyType = require('../../../../lib/core/topologies/shared').topologyType; -const TopologyType = require('../../../../lib/core/sdam/common').TopologyType; +const { TopologyType } = require('../../../../lib/sdam/common'); /** * Filter for the MongoDB toopology required for the test @@ -15,7 +13,7 @@ const TopologyType = require('../../../../lib/core/sdam/common').TopologyType; */ class MongoDBTopologyFilter { initializeFilter(client, context, callback) { - let type = topologyType(client.topology); + let type = client.topology.description.type; context.topologyType = type; this.runtimeTopology = topologyTypeToString(type); console.log(`[ topology type: ${this.runtimeTopology} ]`); diff --git a/test/tools/runner/filters/unified_filter.js b/test/tools/runner/filters/unified_filter.js deleted file mode 100644 index 67b155c9098..00000000000 --- a/test/tools/runner/filters/unified_filter.js +++ /dev/null @@ -1,25 +0,0 @@ -'use strict'; - -/** - * Filter for tests that require the unified topology - * - * example: - * metadata: { - * requires: { - * unifiedTopology: - * } - * } - */ -class UnifiedTopologyFilter { - filter(test) { - const unifiedTopology = - test.metadata && test.metadata.requires && test.metadata.requires.unifiedTopology; - - return ( - typeof unifiedTopology !== 'boolean' || - unifiedTopology === !!process.env.MONGODB_UNIFIED_TOPOLOGY - ); - } -} - -module.exports = UnifiedTopologyFilter; diff --git a/test/tools/runner/index.js b/test/tools/runner/index.js index d70967837b3..d8029de044b 100644 --- a/test/tools/runner/index.js +++ b/test/tools/runner/index.js @@ -2,12 +2,11 @@ const path = require('path'); const fs = require('fs'); -const MongoClient = require('../../..').MongoClient; +const { MongoClient } = require('../../..'); const TestConfiguration = require('./config'); -const parseConnectionString = require('../../../lib/core/uri_parser'); -const eachAsync = require('../../../lib/core/utils').eachAsync; +const { parseConnectionString } = require('../../../lib/connection_string'); +const { eachAsync } = require('../../../lib/utils'); const mock = require('mongodb-mock-server'); -const chalk = require('chalk'); const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017'; const filters = []; @@ -53,14 +52,15 @@ function filterOutTests(suite) { } before(function(_done) { - const usingUnifiedTopology = !!process.env.MONGODB_UNIFIED_TOPOLOGY; - console.log( - `connecting to: ${chalk.bold(MONGODB_URI)} using ${chalk.bold( - usingUnifiedTopology ? 'unified' : 'legacy' - )} topology` - ); - - const client = new MongoClient(MONGODB_URI, { useNewUrlParser: true, useUnifiedTopology: true }); + // NOTE: if we first parse the connection string and redact auth, then we can reenable this + // const usingUnifiedTopology = !!process.env.MONGODB_UNIFIED_TOPOLOGY; + // console.log( + // `connecting to: ${chalk.bold(MONGODB_URI)} using ${chalk.bold( + // usingUnifiedTopology ? 'unified' : 'legacy' + // )} topology` + // ); + + const client = new MongoClient(MONGODB_URI); const done = err => client.close(err2 => _done(err || err2)); client.connect(err => { diff --git a/test/tools/runner/plugins/client_leak_checker.js b/test/tools/runner/plugins/client_leak_checker.js index ac2863df507..6ee18fa0622 100644 --- a/test/tools/runner/plugins/client_leak_checker.js +++ b/test/tools/runner/plugins/client_leak_checker.js @@ -33,14 +33,9 @@ function unifiedTopologyIsConnected(client) { after(function() { wtfnode.dump(); - const isUnifiedTopology = this.configuration.usingUnifiedTopology; const traces = []; const openClientCount = activeClients.reduce((count, client) => { - const isConnected = isUnifiedTopology - ? unifiedTopologyIsConnected(client) - : client.isConnected(); - - if (isConnected) { + if (unifiedTopologyIsConnected(client)) { traces.push(client.trace); return count + 1; } diff --git a/test/tools/runner/plugins/session_leak_checker.js b/test/tools/runner/plugins/session_leak_checker.js index 96c0167d7aa..58e8b0d7aea 100644 --- a/test/tools/runner/plugins/session_leak_checker.js +++ b/test/tools/runner/plugins/session_leak_checker.js @@ -2,9 +2,9 @@ const expect = require('chai').expect; const sinon = require('sinon'); -const core = require('../../../../lib/core'); +const { Topology } = require('../../../../lib/sdam/topology'); const MongoClient = require('../../../../lib/mongo_client'); -const ServerSessionPool = core.Sessions.ServerSessionPool; +const { ServerSessionPool } = require('../../../../lib/sessions'); const sandbox = sinon.createSandbox(); let activeSessions, pooledSessions, activeSessionsBeforeClose; @@ -54,7 +54,7 @@ beforeEach('Session Leak Before Each - setup session tracking', function() { return _endAllPooledSessions.apply(this, arguments); }); - [core.Server, core.ReplSet, core.Mongos].forEach(topology => { + [Topology].forEach(topology => { const _endSessions = topology.prototype.endSessions; sandbox.stub(topology.prototype, 'endSessions').callsFake(function(sessions) { sessions = Array.isArray(sessions) ? sessions : [sessions]; diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index 31b69e81453..2426ee13000 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -9,8 +9,6 @@ const argv = require('yargs') .demandCommand(1) .help('h') .describe('workload', 'Simulate a read workload') - .describe('legacy', 'Use the legacy topology types') - .alias('l', 'legacy') .alias('w', 'workload') .alias('h', 'help').argv; @@ -19,17 +17,10 @@ function print(msg) { } const uri = argv._[0]; -const client = new MongoClient(uri, { - useNewUrlParser: true, - useUnifiedTopology: !argv.legacy -}); +const client = new MongoClient(uri); async function run() { - print( - `connecting to: ${chalk.bold(uri)} using ${chalk.bold( - argv.legacy ? 'legacy' : 'unified' - )} topology` - ); + print(`connecting to: ${chalk.bold(uri)}`); visualizeMonitoringEvents(client); await client.connect(); diff --git a/test/tools/utils.js b/test/tools/utils.js index a766d8cda57..c7d91f98caf 100644 --- a/test/tools/utils.js +++ b/test/tools/utils.js @@ -1,9 +1,7 @@ 'use strict'; -const Logger = require('../../lib/core').Logger; -const deprecateOptions = require('../../lib/utils').deprecateOptions; -const arrayStrictEqual = require('../../lib/core/utils').arrayStrictEqual; -const errorStrictEqual = require('../../lib/core/utils').errorStrictEqual; +const Logger = require('../../lib/logger'); +const { deprecateOptions, arrayStrictEqual, errorStrictEqual } = require('../../lib/utils'); const chalk = require('chalk'); const chai = require('chai'); const expect = chai.expect; diff --git a/test/unit/bypass_validation.test.js b/test/unit/bypass_validation.test.js index 628cb260d68..e5a1fd75dcb 100644 --- a/test/unit/bypass_validation.test.js +++ b/test/unit/bypass_validation.test.js @@ -79,7 +79,7 @@ describe('bypass document validation', function() { test.server.setMessageHandler(request => { const doc = request.document; - if (doc.mapreduce) { + if (doc.mapReduce) { try { expect(doc.bypassDocumentValidation).equal(config.expected); request.reply({ diff --git a/test/unit/change_stream_resume.test.js b/test/unit/change_stream_resume.test.js index 61efd11a3f2..6b86cd98b26 100644 --- a/test/unit/change_stream_resume.test.js +++ b/test/unit/change_stream_resume.test.js @@ -183,10 +183,7 @@ describe('Change Stream Resume Tests', function() { metadata: { requires: { topology: 'single' } }, test: function() { const configuration = this.configuration; - if (!configuration.usingUnifiedTopology()) { - // These tests take way too long with the non-unified topology, so we will skip them - return this.skip(); - } + test.server.setMessageHandler(makeServerHandler(config)); client = configuration.newClient(`mongodb://${test.server.uri()}`, { socketTimeoutMS: 300 diff --git a/test/unit/client.test.js b/test/unit/client.test.js index 5901c3c98c9..f86c074b410 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -24,7 +24,6 @@ describe('Client (unit)', function() { }); const client = this.configuration.newClient(`mongodb://${server.uri()}/`, { - useUnifiedTopology: true, driverInfo: { name: 'mongoose', version: '5.7.10', diff --git a/test/unit/client_metadata.test.js b/test/unit/client_metadata.test.js deleted file mode 100644 index 21b51274189..00000000000 --- a/test/unit/client_metadata.test.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict'; -const mock = require('mongodb-mock-server'); -const expect = require('chai').expect; - -describe('Client Metadata', function() { - let mockServer; - before(() => mock.createServer().then(server => (mockServer = server))); - after(() => mock.cleanup()); - - it('should report the correct platform in client metadata', function(done) { - const ismasters = []; - mockServer.setMessageHandler(request => { - const doc = request.document; - if (doc.ismaster) { - ismasters.push(doc); - request.reply(mock.DEFAULT_ISMASTER); - } else { - request.reply({ ok: 1 }); - } - }); - - const isUnifiedTopology = this.configuration.usingUnifiedTopology(); - const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); - client.connect(err => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - client.db().command({ ping: 1 }, err => { - expect(err).to.not.exist; - - if (isUnifiedTopology) { - expect(ismasters).to.have.length.greaterThan(1); - ismasters.forEach(ismaster => - expect(ismaster) - .nested.property('client.platform') - .to.match(/unified/) - ); - } else { - expect(ismasters).to.have.length(1); - ismasters.forEach(ismaster => - expect(ismaster) - .nested.property('client.platform') - .to.match(/legacy/) - ); - } - - done(); - }); - }); - }); -}); diff --git a/test/unit/cmap/connection.test.js b/test/unit/cmap/connection.test.js index d944c6c4099..91b468528a8 100644 --- a/test/unit/cmap/connection.test.js +++ b/test/unit/cmap/connection.test.js @@ -2,7 +2,7 @@ const BSON = require('bson'); const mock = require('mongodb-mock-server'); -const connect = require('../../../lib/core/connection/connect'); +const connect = require('../../../lib/cmap/connect'); const Connection = require('../../../lib/cmap/connection').Connection; const expect = require('chai').expect; diff --git a/test/unit/cmap/message_stream.test.js b/test/unit/cmap/message_stream.test.js index aed67e2c4ec..d2a82461ac2 100644 --- a/test/unit/cmap/message_stream.test.js +++ b/test/unit/cmap/message_stream.test.js @@ -3,7 +3,7 @@ const BSON = require('bson'); const Readable = require('stream').Readable; const Writable = require('stream').Writable; const MessageStream = require('../../../lib/cmap/message_stream'); -const Msg = require('../../../lib/core/connection/msg').Msg; +const { Msg } = require('../../../lib/cmap/commands'); const expect = require('chai').expect; function bufferToStream(buffer) { diff --git a/test/unit/core/apm.test.js b/test/unit/core/apm.test.js index 56fade72e8d..701c2a38565 100644 --- a/test/unit/core/apm.test.js +++ b/test/unit/core/apm.test.js @@ -1,17 +1,12 @@ 'use strict'; -const Pool = require('../../../lib/core/connection/pool'); const BSON = require('bson'); -const apm = require('../../../lib/core/connection/apm'); -const expect = require('chai').expect; - -const commands = require('../../../lib/core/connection/commands'); -const Query = commands.Query; -const KillCursor = commands.KillCursor; -const GetMore = commands.GetMore; +const { expect } = require('chai'); +const { Query, KillCursor, GetMore } = require('../../../lib/cmap/commands'); +const { CommandStartedEvent } = require('../../../lib/cmap/events'); const bson = new BSON(); -const pool = new Pool({}, { bson }); +const conn = { id: '', address: '' }; describe('APM tests', function() { describe('CommandStartedEvent', function() { @@ -32,8 +27,7 @@ describe('APM tests', function() { {} ); - const startEvent = new apm.CommandStartedEvent(pool, query); - + const startEvent = new CommandStartedEvent(conn, query); expect(startEvent).to.have.property('commandName', 'testCmd'); expect(startEvent).to.have.property('databaseName', db); expect(startEvent).to.have.property('requestId', query.requestId); @@ -50,7 +44,7 @@ describe('APM tests', function() { const coll = 'testingKillCursors'; const killCursor = new KillCursor(bson, `${db}.${coll}`, [12, 42, 57]); - const startEvent = new apm.CommandStartedEvent(pool, killCursor); + const startEvent = new CommandStartedEvent(conn, killCursor); expect(startEvent).to.have.property('commandName', 'killCursors'); expect(startEvent).to.have.property('databaseName', db); @@ -72,7 +66,7 @@ describe('APM tests', function() { const numberToReturn = 321; const getMore = new GetMore(bson, `${db}.${coll}`, 5525, { numberToReturn }); - const startEvent = new apm.CommandStartedEvent(pool, getMore); + const startEvent = new CommandStartedEvent(conn, getMore); expect(startEvent).to.have.property('commandName', 'getMore'); expect(startEvent).to.have.property('databaseName', db); @@ -110,7 +104,7 @@ describe('APM tests', function() { {} ); - const startEvent = new apm.CommandStartedEvent(pool, query); + const startEvent = new CommandStartedEvent(conn, query); expect(startEvent).to.have.property('commandName', 'testCmd'); expect(startEvent).to.have.property('databaseName', db); @@ -140,7 +134,7 @@ describe('APM tests', function() { {} ); - const startEvent = new apm.CommandStartedEvent(pool, query); + const startEvent = new CommandStartedEvent(conn, query); expect(startEvent).to.have.property('commandName', 'find'); expect(startEvent).to.have.property('databaseName', db); diff --git a/test/unit/core/common.js b/test/unit/core/common.js index afa956d5b3f..4fa8def73c2 100644 --- a/test/unit/core/common.js +++ b/test/unit/core/common.js @@ -13,7 +13,7 @@ class ReplSetFixture { setup(options) { options = options || {}; - const ismaster = options.ismaster ? options.ismaster : mock.DEFAULT_ISMASTER; + const ismaster = options.ismaster ? options.ismaster : mock.DEFAULT_ISMASTER_36; return Promise.all([mock.createServer(), mock.createServer(), mock.createServer()]).then( servers => { diff --git a/test/unit/core/connect.test.js b/test/unit/core/connect.test.js index 312553771cc..1de82946d43 100644 --- a/test/unit/core/connect.test.js +++ b/test/unit/core/connect.test.js @@ -2,13 +2,13 @@ const BSON = require('bson'); const mock = require('mongodb-mock-server'); -const expect = require('chai').expect; +const { expect } = require('chai'); const EventEmitter = require('events'); -const connect = require('../../../lib/core/connection/connect'); -const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; -const genClusterTime = require('./common').genClusterTime; -const MongoNetworkError = require('../../../lib/core/error').MongoNetworkError; +const connect = require('../../../lib/cmap/connect'); +const { MongoCredentials } = require('../../../lib/cmap/auth/mongo_credentials'); +const { genClusterTime } = require('./common'); +const { MongoNetworkError } = require('../../../lib/error'); describe('Connect Tests', function() { const test = {}; @@ -112,66 +112,4 @@ describe('Connect Tests', function() { done(); }); }); - - describe('runCommand', function() { - const metadata = { requires: { topology: 'single' } }; - class MockConnection extends EventEmitter { - constructor(conn) { - super(); - this.options = { bson: new BSON() }; - this.conn = conn; - } - - get address() { - return 'mocked'; - } - - setSocketTimeout() {} - resetSocketTimeout() {} - destroy() { - this.conn.destroy(); - } - } - - it('should treat non-Error generating error-like events as errors', metadata, function(done) { - class ConnectionFailingWithClose extends MockConnection { - write() { - this.emit('close'); - } - } - - connect( - { host: '127.0.0.1', port: 27017, connectionType: ConnectionFailingWithClose }, - (err, conn) => { - expect(err).to.exist; - expect(err.message).to.match(/runCommand failed/); - expect(conn).to.not.exist; - done(); - } - ); - }); - - it( - 'should not crash the application if multiple error-like events are emitted on `runCommand`', - metadata, - function(done) { - class ConnectionFailingWithAllEvents extends MockConnection { - write() { - this.emit('close'); - this.emit('timeout'); - this.emit('error'); - } - } - - connect( - { host: '127.0.0.1', port: 27017, connectionType: ConnectionFailingWithAllEvents }, - (err, conn) => { - expect(err).to.exist; - expect(conn).to.not.exist; - done(); - } - ); - } - ); - }); }); diff --git a/test/unit/core/connection_string.test.js b/test/unit/core/connection_string.test.js index 6e5c914216c..3ddb05b7e2b 100644 --- a/test/unit/core/connection_string.test.js +++ b/test/unit/core/connection_string.test.js @@ -1,9 +1,9 @@ 'use strict'; -const parseConnectionString = require('../../../lib/core/uri_parser'); +const { parseConnectionString } = require('../../../lib/connection_string'); const punycode = require('punycode'); -const MongoParseError = require('../../../lib/core/error').MongoParseError; -const loadSpecTests = require('../../spec').loadSpecTests; +const { MongoParseError } = require('../../../lib/error'); +const { loadSpecTests } = require('../../spec'); const chai = require('chai'); const expect = chai.expect; chai.use(require('chai-subset')); diff --git a/test/unit/core/mongodb_srv.test.js b/test/unit/core/mongodb_srv.test.js index 846d3715d24..6d45fd554c9 100644 --- a/test/unit/core/mongodb_srv.test.js +++ b/test/unit/core/mongodb_srv.test.js @@ -1,7 +1,7 @@ 'use strict'; const fs = require('fs'); const path = require('path'); -const parseConnectionString = require('../../../lib/core/uri_parser'); +const parseConnectionString = require('../../../lib/connection_string').parseConnectionString; const expect = require('chai').expect; describe('mongodb+srv', function() { diff --git a/test/unit/core/response_test.js.test.js b/test/unit/core/response_test.js.test.js index 4a6dd68d486..000e9d36c07 100644 --- a/test/unit/core/response_test.js.test.js +++ b/test/unit/core/response_test.js.test.js @@ -1,10 +1,10 @@ 'use strict'; const expect = require('chai').expect; -const MongoError = require('../../../lib/core/error').MongoError; +const { MongoError } = require('../../../lib/error'); const mock = require('mongodb-mock-server'); -const Server = require('../../../lib/core/topologies/server'); -const Long = require('bson').Long; +const { Topology } = require('../../../lib/sdam/topology'); +const { Long } = require('bson'); const test = {}; describe('Response', function() { @@ -22,7 +22,7 @@ describe('Response', function() { errmsg: 'Cursor not found (namespace: "liveearth.entityEvents", id: 2018648316188432590).' }; - const client = new Server(test.server.address()); + const client = new Topology(test.server.address()); test.server.setMessageHandler(request => { const doc = request.document; diff --git a/test/unit/core/scram_iterations.test.js b/test/unit/core/scram_iterations.test.js index 1464fe3b7e4..d4d30ad2147 100644 --- a/test/unit/core/scram_iterations.test.js +++ b/test/unit/core/scram_iterations.test.js @@ -1,10 +1,10 @@ 'use strict'; -const expect = require('chai').expect; +const { expect } = require('chai'); const mock = require('mongodb-mock-server'); -const Server = require('../../../lib/core/topologies/server'); -const Buffer = require('safe-buffer').Buffer; -const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; +const { Topology } = require('../../../lib/sdam/topology'); +const { Buffer } = require('safe-buffer'); +const { MongoCredentials } = require('../../../lib/cmap/auth/mongo_credentials'); describe('SCRAM Iterations Tests', function() { const test = {}; @@ -48,7 +48,7 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { let testErr; try { @@ -100,7 +100,7 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { expect(err).to.not.be.null; expect(err) @@ -143,12 +143,12 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { expect(err).to.not.be.null; expect(err) .to.have.property('message') - .that.matches(/failed to connect to server/); + .that.matches(/connection(.+)closed/); client.destroy(done); }); diff --git a/test/unit/core/sessions.test.js b/test/unit/core/sessions.test.js index bf58185ba4f..df1bda4991e 100644 --- a/test/unit/core/sessions.test.js +++ b/test/unit/core/sessions.test.js @@ -1,15 +1,10 @@ 'use strict'; const mock = require('mongodb-mock-server'); -const expect = require('chai').expect; -const genClusterTime = require('./common').genClusterTime; -const sessionCleanupHandler = require('./common').sessionCleanupHandler; - -const core = require('../../../lib/core'); -const Server = core.Server; -const ServerSessionPool = core.Sessions.ServerSessionPool; -const ServerSession = core.Sessions.ServerSession; -const ClientSession = core.Sessions.ClientSession; +const { expect } = require('chai'); +const { genClusterTime, sessionCleanupHandler } = require('./common'); +const { Topology } = require('../../../lib/sdam/topology'); +const { ServerSessionPool, ServerSession, ClientSession } = require('../../../lib/sessions'); let test = {}; describe('Sessions', function() { @@ -34,8 +29,8 @@ describe('Sessions', function() { it('should default to `null` for `clusterTime`', { metadata: { requires: { topology: 'single' } }, test: function(done) { - const client = new Server(); - const sessionPool = new ServerSessionPool(client); + const client = new Topology('localhost:27017'); + const sessionPool = client.s.sessionPool; const session = new ClientSession(client, sessionPool); done = sessionCleanupHandler(session, sessionPool, done); @@ -48,8 +43,8 @@ describe('Sessions', function() { metadata: { requires: { topology: 'single' } }, test: function(done) { const clusterTime = genClusterTime(Date.now()); - const client = new Server(); - const sessionPool = new ServerSessionPool(client); + const client = new Topology('localhost:27017'); + const sessionPool = client.s.sessionPool; const session = new ClientSession(client, sessionPool, { initialClusterTime: clusterTime }); done = sessionCleanupHandler(session, sessionPool, done); @@ -80,7 +75,7 @@ describe('Sessions', function() { }); }) .then(() => { - test.client = new Server(test.server.address()); + test.client = new Topology(test.server.address()); return new Promise((resolve, reject) => { test.client.once('error', reject); diff --git a/test/unit/core/write_concern_error.test.js b/test/unit/core/write_concern_error.test.js index b7374d44f43..965e90e091b 100644 --- a/test/unit/core/write_concern_error.test.js +++ b/test/unit/core/write_concern_error.test.js @@ -1,20 +1,12 @@ 'use strict'; -const ReplSet = require('../../../lib/core/topologies/replset'); +const { Topology } = require('../../../lib/sdam/topology'); const mock = require('mongodb-mock-server'); -const ReplSetFixture = require('./common').ReplSetFixture; -const MongoWriteConcernError = require('../../../lib/core/error').MongoWriteConcernError; -const expect = require('chai').expect; +const { ReplSetFixture } = require('./common'); +const { MongoWriteConcernError } = require('../../../lib/error'); +const { expect } = require('chai'); describe('WriteConcernError', function() { let test; - - // mock ops store from node-mongodb-native - const mockDisconnectHandler = { - add: () => {}, - execute: () => {}, - flush: () => {} - }; - const RAW_USER_WRITE_CONCERN_CMD = { createUser: 'foo2', pwd: 'pwd', @@ -44,16 +36,12 @@ describe('WriteConcernError', function() { function makeAndConnectReplSet(cb) { let invoked = false; - const replSet = new ReplSet( + console.log({ + uri: `mongodb://${test.primaryServer.uri()},${test.firstSecondaryServer.uri()}/?replicaSet=rs` + }); + const replSet = new Topology( [test.primaryServer.address(), test.firstSecondaryServer.address()], - { - setName: 'rs', - haInterval: 10000, - connectionTimeout: 3000, - disconnectHandler: mockDisconnectHandler, - secondaryOnlyConnectionAllowed: true, - size: 1 - } + { replicaSet: 'rs' } ); replSet.once('error', err => { @@ -61,14 +49,16 @@ describe('WriteConcernError', function() { return; } invoked = true; - cb(err, null); + cb(err); }); + replSet.on('connect', () => { - if (invoked || !replSet.s.replicaSetState.hasPrimary()) { + if (invoked) { return; } + invoked = true; - cb(null, replSet); + cb(undefined, replSet); }); replSet.connect(); @@ -86,7 +76,7 @@ describe('WriteConcernError', function() { makeAndConnectReplSet((err, replSet) => { // cleanup the server before calling done - const cleanup = err => replSet.destroy(err2 => done(err || err2)); + const cleanup = err => replSet.close({ force: true }, err2 => done(err || err2)); if (err) { return cleanup(err); diff --git a/test/unit/db.test.js b/test/unit/db.test.js index aef7e4b4c44..b28c889a486 100644 --- a/test/unit/db.test.js +++ b/test/unit/db.test.js @@ -24,6 +24,10 @@ class MockTopology extends EventEmitter { return false; } + shouldCheckForSessionSupport() { + return false; + } + command(namespace, command, options, callback) { callback(null, { result: { ok: 1 } }); } diff --git a/test/unit/sdam/monitoring.test.js b/test/unit/sdam/monitoring.test.js index 2aceaae6ade..0c8d8f4c7d9 100644 --- a/test/unit/sdam/monitoring.test.js +++ b/test/unit/sdam/monitoring.test.js @@ -1,10 +1,10 @@ 'use strict'; const mock = require('mongodb-mock-server'); const BSON = require('bson'); -const Topology = require('../../../lib/core/sdam/topology').Topology; -const Monitor = require('../../../lib/core/sdam/monitor').Monitor; -const ServerType = require('../../../lib/core/sdam/common').ServerType; -const expect = require('chai').expect; +const { ServerType } = require('../../../lib/sdam/common'); +const { Topology } = require('../../../lib/sdam/topology'); +const { Monitor } = require('../../../lib/sdam/monitor'); +const { expect } = require('chai'); class MockServer { constructor(options) { diff --git a/test/unit/sdam/server_description.test.js b/test/unit/sdam/server_description.test.js index 30de1592691..ff1bbc60f5e 100644 --- a/test/unit/sdam/server_description.test.js +++ b/test/unit/sdam/server_description.test.js @@ -1,5 +1,5 @@ 'use strict'; -const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; +const { ServerDescription } = require('../../../lib/sdam/server_description'); const expect = require('chai').expect; describe('ServerDescription', function() { diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index 218b3f76f61..d931934d4a5 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -1,8 +1,8 @@ 'use strict'; -const ReadPreference = require('../../../../lib/core/topologies/read_preference'); -const Topology = require('../../../../lib/core/sdam/topology').Topology; -const Server = require('../../../../lib/core/sdam/server').Server; -const expect = require('chai').expect; +const { Topology } = require('../../../../lib/sdam/topology'); +const { Server } = require('../../../../lib/sdam/server'); +const ReadPreference = require('../../../../lib/read_preference'); +const { expect } = require('chai'); const sinon = require('sinon'); describe('selectServer', function() { @@ -85,7 +85,6 @@ describe('selectServer', function() { let completed = 0; function finish() { completed++; - console.log(completed); if (completed === toSelect) done(); } diff --git a/test/unit/sdam/server_selection/spec.test.js b/test/unit/sdam/server_selection/spec.test.js index c30a2054c5c..4d7e8abb72d 100644 --- a/test/unit/sdam/server_selection/spec.test.js +++ b/test/unit/sdam/server_selection/spec.test.js @@ -1,16 +1,13 @@ 'use strict'; const path = require('path'); const fs = require('fs'); -const core = require('../../../../lib/core'); -const Topology = core.Topology; -const MongoServerSelectionError = core.MongoServerSelectionError; -const ReadPreference = core.ReadPreference; - -// TODO: these should be from `core` when legacy topologies are removed -const Server = require('../../../../lib/core/sdam/server').Server; -const ServerType = require('../../../../lib/core/sdam/common').ServerType; -const ServerDescription = require('../../../../lib/core/sdam/server_description').ServerDescription; -const ServerSelectors = require('../../../../lib/core/sdam/server_selection'); +const { Topology } = require('../../../../lib/sdam/topology'); +const { Server } = require('../../../../lib/sdam/server'); +const { ServerType } = require('../../../../lib/sdam/common'); +const { ServerDescription } = require('../../../../lib/sdam/server_description'); +const ReadPreference = require('../../../../lib/read_preference'); +const { MongoServerSelectionError } = require('../../../../lib/error'); +const ServerSelectors = require('../../../../lib/sdam/server_selection'); const EJSON = require('mongodb-extjson'); diff --git a/test/unit/sdam/spec.test.js b/test/unit/sdam/spec.test.js index 8d8eaa3f705..a3217ea2346 100644 --- a/test/unit/sdam/spec.test.js +++ b/test/unit/sdam/spec.test.js @@ -1,11 +1,11 @@ 'use strict'; const fs = require('fs'); const path = require('path'); -const Topology = require('../../../lib/core/sdam/topology').Topology; -const Server = require('../../../lib/core/sdam/server').Server; -const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; -const sdamEvents = require('../../../lib/core/sdam/events'); -const parse = require('../../../lib/core/uri_parser'); +const { Topology } = require('../../../lib/sdam/topology'); +const { Server } = require('../../../lib/sdam/server'); +const { ServerDescription } = require('../../../lib/sdam/server_description'); +const sdamEvents = require('../../../lib/sdam/events'); +const parse = require('../../../lib/connection_string').parseConnectionString; const sinon = require('sinon'); const EJSON = require('mongodb-extjson'); diff --git a/test/unit/sdam/srv_polling.test.js b/test/unit/sdam/srv_polling.test.js index ccf824a1976..97fc9a45e30 100644 --- a/test/unit/sdam/srv_polling.test.js +++ b/test/unit/sdam/srv_polling.test.js @@ -1,12 +1,10 @@ 'use strict'; -const Topology = require('../../../lib/core/sdam/topology').Topology; -const TopologyDescription = require('../../../lib/core/sdam/topology_description') - .TopologyDescription; -const TopologyType = require('../../../lib/core/sdam/common').TopologyType; -const sdamEvents = require('../../../lib/core/sdam/events'); -const SrvPoller = require('../../../lib/core/sdam/srv_polling').SrvPoller; -const SrvPollingEvent = require('../../../lib/core/sdam/srv_polling').SrvPollingEvent; +const { Topology } = require('../../../lib/sdam/topology'); +const { TopologyDescription } = require('../../../lib/sdam/topology_description'); +const { TopologyType } = require('../../../lib/sdam/common'); +const { SrvPoller, SrvPollingEvent } = require('../../../lib/sdam/srv_polling'); +const sdamEvents = require('../../../lib/sdam/events'); const dns = require('dns'); const EventEmitter = require('events').EventEmitter; diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index b84502ef757..775554138c8 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -1,12 +1,70 @@ 'use strict'; -const Topology = require('../../../lib/core/sdam/topology').Topology; -const Server = require('../../../lib/core/sdam/server').Server; -const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; + const mock = require('mongodb-mock-server'); -const expect = require('chai').expect; +const { expect } = require('chai'); const sinon = require('sinon'); +const { Topology } = require('../../../lib/sdam/topology'); +const { Server } = require('../../../lib/sdam/server'); +const { ServerDescription } = require('../../../lib/sdam/server_description'); +const BSON = require('../../../lib/utils').retrieveBSON(); describe('Topology (unit)', function() { + describe('client metadata', function() { + let mockServer; + before(() => mock.createServer().then(server => (mockServer = server))); + after(() => mock.cleanup()); + + it('should correctly pass appname', { + metadata: { requires: { topology: 'single' } }, + + test: function(done) { + // Attempt to connect + var server = new Topology( + [{ host: this.configuration.host, port: this.configuration.port }], + { + bson: new BSON(), + appname: 'My application name' + } + ); + + expect(server.clientMetadata.application.name).to.equal('My application name'); + done(); + } + }); + + it('should report the correct platform in client metadata', function(done) { + const ismasters = []; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + ismasters.push(doc); + request.reply(mock.DEFAULT_ISMASTER); + } else { + request.reply({ ok: 1 }); + } + }); + + const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db().command({ ping: 1 }, err => { + expect(err).to.not.exist; + + expect(ismasters).to.have.length.greaterThan(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/unified/) + ); + + done(); + }); + }); + }); + }); + describe('shouldCheckForSessionSupport', function() { beforeEach(function() { this.sinon = sinon.sandbox.create(); diff --git a/test/unit/utils.test.js b/test/unit/utils.test.js index 367e43625b7..443464261d6 100644 --- a/test/unit/utils.test.js +++ b/test/unit/utils.test.js @@ -1,5 +1,5 @@ 'use strict'; -const eachAsync = require('../../lib/core/utils').eachAsync; +const { eachAsync } = require('../../lib/utils'); const expect = require('chai').expect; describe('utils', function() {