diff --git a/.evergreen/atlas b/.evergreen/atlas new file mode 120000 index 0000000000..ef871b9a28 --- /dev/null +++ b/.evergreen/atlas @@ -0,0 +1 @@ +../.mod/drivers-evergreen-tools/.evergreen/atlas \ No newline at end of file diff --git a/.evergreen/config.yml b/.evergreen/config.yml index bf5671c769..934829aae4 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -123,7 +123,14 @@ functions: export ATLAS_SERVERLESS_URI="${atlas_serverless_uri}" export ATLAS_SERVERLESS_LB_URI="${atlas_serverless_lb_uri}" export RVM_RUBY="${RVM_RUBY}" - export SERVERLESS_DRIVERS_GROUP="${SERVERLESS_DRIVERS_GROUP}" + + if [[ -n "${USE_PROXY_SERVERLESS}" ]]; + then + export SERVERLESS_DRIVERS_GROUP="${PROXY_SERVERLESS_DRIVERS_GROUP}" + else + export SERVERLESS_DRIVERS_GROUP="${SERVERLESS_DRIVERS_GROUP}" + fi + export SERVERLESS_API_PUBLIC_KEY="${SERVERLESS_API_PUBLIC_KEY}" export SERVERLESS_API_PRIVATE_KEY="${SERVERLESS_API_PRIVATE_KEY}" export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" @@ -179,7 +186,7 @@ functions: IAM_AUTH_ECS_SECURITY_GROUP="${iam_auth_ecs_security_group}" IAM_AUTH_ECS_SUBNET_A="${iam_auth_ecs_subnet_a}" IAM_AUTH_ECS_SUBNET_B="${iam_auth_ecs_subnet_b}" - IAM_AUTH_ECS_TASK_DEFINITION="${iam_auth_ecs_task_definition}" + IAM_AUTH_ECS_TASK_DEFINITION="${iam_auth_ecs_task_definition_ubuntu2004}" IAM_WEB_IDENTITY_ISSUER="${iam_web_identity_issuer}" IAM_WEB_IDENTITY_JWKS_URI="${iam_web_identity_jwks_uri}" @@ -453,7 +460,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - SERVERLESS=1 SSL=ssl RVM_RUBY="${RVM_RUBY}" SINGLE_MONGOS="${SINGLE_MONGOS}" SERVERLESS_URI="${SERVERLESS_URI}" FLE="${FLE}" SERVERLESS_MONGODB_VERSION="${SERVERLESS_MONGODB_VERSION}" .evergreen/run-tests-serverless.sh + CRYPT_SHARED_LIB_PATH="${CRYPT_SHARED_LIB_PATH}" SERVERLESS=1 SSL=ssl RVM_RUBY="${RVM_RUBY}" SINGLE_MONGOS="${SINGLE_MONGOS}" SERVERLESS_URI="${SERVERLESS_URI}" FLE="${FLE}" SERVERLESS_MONGODB_VERSION="${SERVERLESS_MONGODB_VERSION}" .evergreen/run-tests-serverless.sh pre: - func: "fetch source" @@ -482,6 +489,8 @@ task_groups: script: | ${PREPARE_SHELL} + echo "Setting up Atlas cluster" + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ @@ -492,6 +501,8 @@ task_groups: task_id="${task_id}" \ execution="${execution}" \ $DRIVERS_TOOLS/.evergreen/atlas/setup-atlas-cluster.sh + + echo "MONGODB_URI=${MONGODB_URI}" - command: expansions.update params: file: src/atlas-expansion.yml @@ -513,6 +524,52 @@ task_groups: tasks: - test-full-atlas-task + - name: test_aws_lambda_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: fetch source + - func: create expansions + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + + echo "Setting up Atlas cluster" + + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ + DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ + DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ + DRIVERS_ATLAS_LAMBDA_USER="${DRIVERS_ATLAS_LAMBDA_USER}" \ + DRIVERS_ATLAS_LAMBDA_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" \ + LAMBDA_STACK_NAME="dbx-ruby-lambda" \ + MONGODB_VERSION="7.0" \ + task_id="${task_id}" \ + execution="${execution}" \ + $DRIVERS_TOOLS/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/atlas-expansion.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ + DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ + DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ + LAMBDA_STACK_NAME="dbx-ruby-lambda" \ + task_id="${task_id}" \ + execution="${execution}" \ + $DRIVERS_TOOLS/.evergreen/atlas/teardown-atlas-cluster.sh + tasks: + - test-aws-lambda-deployed + - name: testgcpkms_task_group setup_group_can_fail_task: true setup_group_timeout_secs: 1800 # 30 minutes @@ -702,6 +759,38 @@ tasks: export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH="/tmp/testazurekms_private_key_file" AZUREKMS_CMD="TEST_FLE_AZURE_AUTO=1 RVM_RUBY=ruby-3.1 FLE=helper TOPOLOGY=standalone MONGODB_VERSION=6.0 MONGO_RUBY_DRIVER_AZURE_TENANT_ID="${MONGO_RUBY_DRIVER_AZURE_TENANT_ID}" MONGO_RUBY_DRIVER_AZURE_CLIENT_ID="${MONGO_RUBY_DRIVER_AZURE_CLIENT_ID}" MONGO_RUBY_DRIVER_AZURE_CLIENT_SECRET="${MONGO_RUBY_DRIVER_AZURE_CLIENT_SECRET}" MONGO_RUBY_DRIVER_AZURE_IDENTITY_PLATFORM_ENDPOINT="${MONGO_RUBY_DRIVER_AZURE_IDENTITY_PLATFORM_ENDPOINT}" MONGO_RUBY_DRIVER_AZURE_KEY_VAULT_ENDPOINT="${testazurekms_keyvaultendpoint}" MONGO_RUBY_DRIVER_AZURE_KEY_NAME="${testazurekms_keyname}" ./.evergreen/run-tests-azure.sh" .evergreen/csfle/azurekms/run-command.sh + + - name: "test-aws-lambda-deployed" + commands: + - command: ec2.assume_role + params: + role_arn: ${LAMBDA_AWS_ROLE_ARN} + duration_seconds: 3600 + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export MONGODB_URI=${MONGODB_URI} + .evergreen/run-tests-deployed-lambda.sh + env: + TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/spec/faas/ruby-sam-app + AWS_REGION: us-east-1 + PROJECT_DIRECTORY: ${PROJECT_DIRECTORY} + DRIVERS_TOOLS: ${DRIVERS_TOOLS} + DRIVERS_ATLAS_PUBLIC_API_KEY: ${DRIVERS_ATLAS_PUBLIC_API_KEY} + DRIVERS_ATLAS_PRIVATE_API_KEY: ${DRIVERS_ATLAS_PRIVATE_API_KEY} + DRIVERS_ATLAS_LAMBDA_USER: ${DRIVERS_ATLAS_LAMBDA_USER} + DRIVERS_ATLAS_LAMBDA_PASSWORD: ${DRIVERS_ATLAS_LAMBDA_PASSWORD} + DRIVERS_ATLAS_GROUP_ID: ${DRIVERS_ATLAS_GROUP_ID} + AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} + AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} + AWS_SESSION_TOKEN: ${AWS_SESSION_TOKEN} + LAMBDA_STACK_NAME: "dbx-ruby-lambda" + RVM_RUBY: ruby-3.2 + MONGODB_URI: ${MONGODB_URI} axes: - id: preload @@ -890,7 +979,7 @@ axes: display_name: jruby-9.4 variables: RVM_RUBY: "jruby-9.4" - + - id: "os" display_name: OS values: @@ -900,6 +989,9 @@ axes: - id: ubuntu2204 display_name: "Ubuntu 22.04" run_on: ubuntu2204-small + - id: ubuntu2004 + display_name: "Ubuntu 20.04" + run_on: ubuntu2004-small - id: rhel8 display_name: "RHEL 8" run_on: rhel80-small @@ -1112,6 +1204,16 @@ axes: - id: no display_name: No + - id: serverless-project + display_name: serverless project + values: + - id: original + display_name: Original + - id: proxy + display_name: Proxy + variables: + USE_PROXY_SERVERLESS: 1 + buildvariants: - matrix_name: "auth/ssl" matrix_spec: @@ -1304,24 +1406,21 @@ buildvariants: tasks: - name: "test-mlaunch" - # The X.509 tests are failing with exceptions being expected, but never - # raised. (RUBY-3267) - # - # - matrix_name: "x509-tests" - # matrix_spec: - # auth-and-ssl: "x509" - # ruby: "ruby-3.2" - # # needs the latest_5x_mdb because run-tests.sh uses `mongo` to configure - # # the server for certain auth mechanisms. Once run-tests.sh is made smart - # # enough to install mongosh, and then use either mongo or mongosh - # # (depending on server version and what's available), we can bump this to - # # the latest stable db version. - # mongodb-version: "5.3" - # topology: standalone - # os: rhel8 - # display_name: "${mongodb-version} ${topology} ${auth-and-ssl} ${ruby}" - # tasks: - # - name: "test-mlaunch" + - matrix_name: "x509-tests" + matrix_spec: + auth-and-ssl: "x509" + ruby: "ruby-3.2" + # needs the latest_5x_mdb because run-tests.sh uses `mongo` to configure + # the server for certain auth mechanisms. Once run-tests.sh is made smart + # enough to install mongosh, and then use either mongo or mongosh + # (depending on server version and what's available), we can bump this to + # the latest stable db version. + mongodb-version: "5.3" + topology: standalone + os: rhel8 + display_name: "${mongodb-version} ${topology} ${auth-and-ssl} ${ruby}" + tasks: + - name: "test-mlaunch" - matrix_name: "jruby-auth" matrix_spec: @@ -1451,7 +1550,7 @@ buildvariants: # (depending on server version and what's available), we can bump this to # the latest stable db version. mongodb-version: "5.3" - os: rhel8 + os: ubuntu2004 display_name: "AWS ${auth-and-ssl} ${mongodb-version} ${ruby}" tasks: - name: "test-aws-auth" @@ -1689,6 +1788,15 @@ buildvariants: ruby: ["ruby-3.2", "ruby-3.1", "ruby-3.0", "ruby-2.7", "ruby-2.6", "ruby-2.5"] fle: path os: rhel8 - display_name: "Atlas serverless ${ruby} single mongos" + serverless-project: [ original, proxy ] + display_name: "Atlas serverless ${ruby} ${serverless-project}" tasks: - name: test-serverless + + - matrix_name: "aws-lambda" + matrix_spec: + ruby: "ruby-3.2" + os: ubuntu2204 + display_name: "AWS Lambda" + tasks: + - name: test_aws_lambda_task_group diff --git a/.evergreen/config/axes.yml.erb b/.evergreen/config/axes.yml.erb index e6f101e8bd..074cba7a06 100644 --- a/.evergreen/config/axes.yml.erb +++ b/.evergreen/config/axes.yml.erb @@ -186,7 +186,7 @@ axes: display_name: jruby-9.4 variables: RVM_RUBY: "jruby-9.4" - + - id: "os" display_name: OS values: @@ -196,6 +196,9 @@ axes: - id: ubuntu2204 display_name: "Ubuntu 22.04" run_on: ubuntu2204-small + - id: ubuntu2004 + display_name: "Ubuntu 20.04" + run_on: ubuntu2004-small - id: rhel8 display_name: "RHEL 8" run_on: rhel80-small @@ -402,3 +405,13 @@ axes: API_VERSION_REQUIRED: 1 - id: no display_name: No + + - id: serverless-project + display_name: serverless project + values: + - id: original + display_name: Original + - id: proxy + display_name: Proxy + variables: + USE_PROXY_SERVERLESS: 1 diff --git a/.evergreen/config/common.yml.erb b/.evergreen/config/common.yml.erb index 3892f442dc..54eee122a1 100644 --- a/.evergreen/config/common.yml.erb +++ b/.evergreen/config/common.yml.erb @@ -120,7 +120,14 @@ functions: export ATLAS_SERVERLESS_URI="${atlas_serverless_uri}" export ATLAS_SERVERLESS_LB_URI="${atlas_serverless_lb_uri}" export RVM_RUBY="${RVM_RUBY}" - export SERVERLESS_DRIVERS_GROUP="${SERVERLESS_DRIVERS_GROUP}" + + if [[ -n "${USE_PROXY_SERVERLESS}" ]]; + then + export SERVERLESS_DRIVERS_GROUP="${PROXY_SERVERLESS_DRIVERS_GROUP}" + else + export SERVERLESS_DRIVERS_GROUP="${SERVERLESS_DRIVERS_GROUP}" + fi + export SERVERLESS_API_PUBLIC_KEY="${SERVERLESS_API_PUBLIC_KEY}" export SERVERLESS_API_PRIVATE_KEY="${SERVERLESS_API_PRIVATE_KEY}" export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" @@ -176,7 +183,7 @@ functions: IAM_AUTH_ECS_SECURITY_GROUP="${iam_auth_ecs_security_group}" IAM_AUTH_ECS_SUBNET_A="${iam_auth_ecs_subnet_a}" IAM_AUTH_ECS_SUBNET_B="${iam_auth_ecs_subnet_b}" - IAM_AUTH_ECS_TASK_DEFINITION="${iam_auth_ecs_task_definition}" + IAM_AUTH_ECS_TASK_DEFINITION="${iam_auth_ecs_task_definition_ubuntu2004}" IAM_WEB_IDENTITY_ISSUER="${iam_web_identity_issuer}" IAM_WEB_IDENTITY_JWKS_URI="${iam_web_identity_jwks_uri}" @@ -450,7 +457,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - SERVERLESS=1 SSL=ssl RVM_RUBY="${RVM_RUBY}" SINGLE_MONGOS="${SINGLE_MONGOS}" SERVERLESS_URI="${SERVERLESS_URI}" FLE="${FLE}" SERVERLESS_MONGODB_VERSION="${SERVERLESS_MONGODB_VERSION}" .evergreen/run-tests-serverless.sh + CRYPT_SHARED_LIB_PATH="${CRYPT_SHARED_LIB_PATH}" SERVERLESS=1 SSL=ssl RVM_RUBY="${RVM_RUBY}" SINGLE_MONGOS="${SINGLE_MONGOS}" SERVERLESS_URI="${SERVERLESS_URI}" FLE="${FLE}" SERVERLESS_MONGODB_VERSION="${SERVERLESS_MONGODB_VERSION}" .evergreen/run-tests-serverless.sh pre: - func: "fetch source" @@ -479,6 +486,8 @@ task_groups: script: | ${PREPARE_SHELL} + echo "Setting up Atlas cluster" + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ @@ -489,6 +498,8 @@ task_groups: task_id="${task_id}" \ execution="${execution}" \ $DRIVERS_TOOLS/.evergreen/atlas/setup-atlas-cluster.sh + + echo "MONGODB_URI=${MONGODB_URI}" - command: expansions.update params: file: src/atlas-expansion.yml @@ -510,6 +521,52 @@ task_groups: tasks: - test-full-atlas-task + - name: test_aws_lambda_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: fetch source + - func: create expansions + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + + echo "Setting up Atlas cluster" + + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ + DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ + DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ + DRIVERS_ATLAS_LAMBDA_USER="${DRIVERS_ATLAS_LAMBDA_USER}" \ + DRIVERS_ATLAS_LAMBDA_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" \ + LAMBDA_STACK_NAME="dbx-ruby-lambda" \ + MONGODB_VERSION="7.0" \ + task_id="${task_id}" \ + execution="${execution}" \ + $DRIVERS_TOOLS/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/atlas-expansion.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + + DRIVERS_ATLAS_PUBLIC_API_KEY="${DRIVERS_ATLAS_PUBLIC_API_KEY}" \ + DRIVERS_ATLAS_PRIVATE_API_KEY="${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ + DRIVERS_ATLAS_GROUP_ID="${DRIVERS_ATLAS_GROUP_ID}" \ + LAMBDA_STACK_NAME="dbx-ruby-lambda" \ + task_id="${task_id}" \ + execution="${execution}" \ + $DRIVERS_TOOLS/.evergreen/atlas/teardown-atlas-cluster.sh + tasks: + - test-aws-lambda-deployed + - name: testgcpkms_task_group setup_group_can_fail_task: true setup_group_timeout_secs: 1800 # 30 minutes @@ -699,3 +756,35 @@ tasks: export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH="/tmp/testazurekms_private_key_file" AZUREKMS_CMD="TEST_FLE_AZURE_AUTO=1 RVM_RUBY=ruby-3.1 FLE=helper TOPOLOGY=standalone MONGODB_VERSION=6.0 MONGO_RUBY_DRIVER_AZURE_TENANT_ID="${MONGO_RUBY_DRIVER_AZURE_TENANT_ID}" MONGO_RUBY_DRIVER_AZURE_CLIENT_ID="${MONGO_RUBY_DRIVER_AZURE_CLIENT_ID}" MONGO_RUBY_DRIVER_AZURE_CLIENT_SECRET="${MONGO_RUBY_DRIVER_AZURE_CLIENT_SECRET}" MONGO_RUBY_DRIVER_AZURE_IDENTITY_PLATFORM_ENDPOINT="${MONGO_RUBY_DRIVER_AZURE_IDENTITY_PLATFORM_ENDPOINT}" MONGO_RUBY_DRIVER_AZURE_KEY_VAULT_ENDPOINT="${testazurekms_keyvaultendpoint}" MONGO_RUBY_DRIVER_AZURE_KEY_NAME="${testazurekms_keyname}" ./.evergreen/run-tests-azure.sh" .evergreen/csfle/azurekms/run-command.sh + + - name: "test-aws-lambda-deployed" + commands: + - command: ec2.assume_role + params: + role_arn: ${LAMBDA_AWS_ROLE_ARN} + duration_seconds: 3600 + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export MONGODB_URI=${MONGODB_URI} + .evergreen/run-tests-deployed-lambda.sh + env: + TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/spec/faas/ruby-sam-app + AWS_REGION: us-east-1 + PROJECT_DIRECTORY: ${PROJECT_DIRECTORY} + DRIVERS_TOOLS: ${DRIVERS_TOOLS} + DRIVERS_ATLAS_PUBLIC_API_KEY: ${DRIVERS_ATLAS_PUBLIC_API_KEY} + DRIVERS_ATLAS_PRIVATE_API_KEY: ${DRIVERS_ATLAS_PRIVATE_API_KEY} + DRIVERS_ATLAS_LAMBDA_USER: ${DRIVERS_ATLAS_LAMBDA_USER} + DRIVERS_ATLAS_LAMBDA_PASSWORD: ${DRIVERS_ATLAS_LAMBDA_PASSWORD} + DRIVERS_ATLAS_GROUP_ID: ${DRIVERS_ATLAS_GROUP_ID} + AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} + AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} + AWS_SESSION_TOKEN: ${AWS_SESSION_TOKEN} + LAMBDA_STACK_NAME: "dbx-ruby-lambda" + RVM_RUBY: ruby-3.2 + MONGODB_URI: ${MONGODB_URI} diff --git a/.evergreen/config/standard.yml.erb b/.evergreen/config/standard.yml.erb index 3bcd1b9acb..1964946d4c 100644 --- a/.evergreen/config/standard.yml.erb +++ b/.evergreen/config/standard.yml.erb @@ -232,24 +232,21 @@ buildvariants: tasks: - name: "test-mlaunch" - # The X.509 tests are failing with exceptions being expected, but never - # raised. (RUBY-3267) - # - # - matrix_name: "x509-tests" - # matrix_spec: - # auth-and-ssl: "x509" - # ruby: <%= latest_ruby %> - # # needs the latest_5x_mdb because run-tests.sh uses `mongo` to configure - # # the server for certain auth mechanisms. Once run-tests.sh is made smart - # # enough to install mongosh, and then use either mongo or mongosh - # # (depending on server version and what's available), we can bump this to - # # the latest stable db version. - # mongodb-version: <%= latest_5x_mdb %> - # topology: standalone - # os: rhel8 - # display_name: "${mongodb-version} ${topology} ${auth-and-ssl} ${ruby}" - # tasks: - # - name: "test-mlaunch" + - matrix_name: "x509-tests" + matrix_spec: + auth-and-ssl: "x509" + ruby: <%= latest_ruby %> + # needs the latest_5x_mdb because run-tests.sh uses `mongo` to configure + # the server for certain auth mechanisms. Once run-tests.sh is made smart + # enough to install mongosh, and then use either mongo or mongosh + # (depending on server version and what's available), we can bump this to + # the latest stable db version. + mongodb-version: <%= latest_5x_mdb %> + topology: standalone + os: rhel8 + display_name: "${mongodb-version} ${topology} ${auth-and-ssl} ${ruby}" + tasks: + - name: "test-mlaunch" - matrix_name: "jruby-auth" matrix_spec: @@ -379,7 +376,7 @@ buildvariants: # (depending on server version and what's available), we can bump this to # the latest stable db version. mongodb-version: <%= latest_5x_mdb %> - os: rhel8 + os: ubuntu2004 display_name: "AWS ${auth-and-ssl} ${mongodb-version} ${ruby}" tasks: - name: "test-aws-auth" @@ -520,6 +517,15 @@ buildvariants: ruby: <%= supported_mri_rubies %> fle: path os: rhel8 - display_name: "Atlas serverless ${ruby} single mongos" + serverless-project: [ original, proxy ] + display_name: "Atlas serverless ${ruby} ${serverless-project}" tasks: - name: test-serverless + + - matrix_name: "aws-lambda" + matrix_spec: + ruby: <%= latest_ruby %> + os: ubuntu2204 + display_name: "AWS Lambda" + tasks: + - name: test_aws_lambda_task_group diff --git a/.evergreen/provision-local b/.evergreen/provision-local index 3ad2654b4b..1ce14d240c 100755 --- a/.evergreen/provision-local +++ b/.evergreen/provision-local @@ -27,5 +27,5 @@ sudo env DEBIAN_FRONTEND=noninteractive \ # Need binutils for `strings` utility per # https://aws.amazon.com/premiumsupport/knowledge-center/ecs-iam-task-roles-config-errors/ sudo env DEBIAN_FRONTEND=noninteractive \ - apt-get install -y libsnmp30 libyaml-0-2 gcc make git lsb-release \ - krb5-user bzip2 libgmp-dev python-pip python2.7-dev binutils + apt-get install -y libsnmp35 libyaml-0-2 gcc make git lsb-release \ + krb5-user bzip2 libgmp-dev python3-pip python2.7-dev binutils diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh new file mode 100755 index 0000000000..32eebfbf2b --- /dev/null +++ b/.evergreen/run-deployed-lambda-aws-tests.sh @@ -0,0 +1,117 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +# Explanation of required environment variables: +# +# TEST_LAMBDA_DIRECTORY: The root of the project's Lambda sam project. +# DRIVERS_ATLAS_PUBLIC_API_KEY: The public Atlas key for the drivers org. +# DRIVERS_ATLAS_PRIVATE_API_KEY: The private Atlas key for the drivers org. +# DRIVERS_ATLAS_LAMBDA_USER: The user for the lambda cluster. +# DRIVERS_ATLAS_LAMBDA_PASSWORD: The password for the user. +# DRIVERS_ATLAS_GROUP_ID: The id of the individual projects under the drivers org, per language. +# LAMBDA_STACK_NAME: The name of the stack on lambda "dbx--lambda" +# AWS_REGION: The region for the function - generally us-east-1 + +VARLIST=( +TEST_LAMBDA_DIRECTORY +DRIVERS_ATLAS_PUBLIC_API_KEY +DRIVERS_ATLAS_PRIVATE_API_KEY +DRIVERS_ATLAS_LAMBDA_USER +DRIVERS_ATLAS_LAMBDA_PASSWORD +DRIVERS_ATLAS_GROUP_ID +LAMBDA_STACK_NAME +AWS_REGION +) + +# Ensure that all variables required to run the test are set, otherwise throw +# an error. +for VARNAME in ${VARLIST[*]}; do +[[ -z "${!VARNAME}" ]] && echo "ERROR: $VARNAME not set" && exit 1; +done + +# Set up the common variables +. `dirname "$0"`/atlas/setup-variables.sh + +# Restarts the cluster's primary node. +restart_cluster_primary () +{ + echo "Testing Atlas primary restart..." + curl \ + --digest -u ${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY} \ + -X POST \ + "${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters/${FUNCTION_NAME}/restartPrimaries" +} + +# Deploys a lambda function to the set stack name. +deploy_lambda_function () +{ + echo "Deploying Lambda function..." + sam deploy \ + --stack-name "${FUNCTION_NAME}" \ + --capabilities CAPABILITY_IAM \ + --resolve-s3 \ + --parameter-overrides "MongoDbUri=${MONGODB_URI}" \ + --region ${AWS_REGION} +} + +# Get the ARN for the Lambda function we created and export it. +get_lambda_function_arn () +{ + echo "Getting Lambda function ARN..." + LAMBDA_FUNCTION_ARN=$(sam list stack-outputs \ + --stack-name ${FUNCTION_NAME} \ + --region ${AWS_REGION} \ + --output json | jq '.[] | select(.OutputKey == "MongoDBFunction") | .OutputValue' | tr -d '"' + ) + echo "Lambda function ARN: $LAMBDA_FUNCTION_ARN" + export LAMBDA_FUNCTION_ARN=$LAMBDA_FUNCTION_ARN +} + +delete_lambda_function () +{ + echo "Deleting Lambda Function..." + sam delete --stack-name ${FUNCTION_NAME} --no-prompts --region us-east-1 +} + +cleanup () +{ + delete_lambda_function +} + +trap cleanup EXIT SIGHUP + +cd "${TEST_LAMBDA_DIRECTORY}" + +sam build --use-container + +deploy_lambda_function + +get_lambda_function_arn + + +check_lambda_output () { + if grep -q FunctionError output.json + then + echo "Exiting due to FunctionError!" + exit 1 + fi + cat output.json | jq -r '.LogResult' | base64 --decode +} + +aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-standard.json > output.json +cat lambda-invoke-standard.json +check_lambda_output + +echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." +sleep 60 +aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-frozen.json > output.json +cat lambda-invoke-frozen.json +check_lambda_output + +restart_cluster_primary + +echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." +sleep 60 +aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-outage.json > output.json +cat lambda-invoke-outage.json +check_lambda_output diff --git a/.evergreen/run-tests-aws-auth.sh b/.evergreen/run-tests-aws-auth.sh index 1813400700..c9649e4a64 100755 --- a/.evergreen/run-tests-aws-auth.sh +++ b/.evergreen/run-tests-aws-auth.sh @@ -97,9 +97,11 @@ case "$AUTH" in aws-web-identity) cd `dirname "$0"`/auth_aws - . ./activate_venv.sh + echo "Activating virtual environment 'authawsvenv'..." + . ./activate-authawsvenv.sh export AWS_ACCESS_KEY_ID="`get_var IAM_AUTH_EC2_INSTANCE_ACCOUNT`" export AWS_SECRET_ACCESS_KEY="`get_var IAM_AUTH_EC2_INSTANCE_SECRET_ACCESS_KEY`" + echo "Unassigning instance profile..." python -u lib/aws_unassign_instance_profile.py unset AWS_ACCESS_KEY_ID unset AWS_SECRET_ACCESS_KEY @@ -113,6 +115,7 @@ case "$AUTH" in unset IDP_JWKS_URI unset IDP_RSA_KEY + deactivate cd - export MONGO_RUBY_DRIVER_AWS_AUTH_ACCESS_KEY_ID="`get_var IAM_AUTH_EC2_INSTANCE_ACCOUNT`" export MONGO_RUBY_DRIVER_AWS_AUTH_SECRET_ACCESS_KEY="`get_var IAM_AUTH_EC2_INSTANCE_SECRET_ACCESS_KEY`" diff --git a/.evergreen/run-tests-deployed-lambda.sh b/.evergreen/run-tests-deployed-lambda.sh new file mode 100755 index 0000000000..9b5d01d526 --- /dev/null +++ b/.evergreen/run-tests-deployed-lambda.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +. `dirname "$0"`/../spec/shared/shlib/distro.sh +. `dirname "$0"`/../spec/shared/shlib/set_env.sh +. `dirname "$0"`/functions.sh + +set_env_vars +set_env_python +set_env_ruby + +export MONGODB_URI=${MONGODB_URI} +export TEST_LAMBDA_DIRECTORY=`dirname "$0"`/../spec/faas/ruby-sam-app + +. `dirname "$0"`/run-deployed-lambda-aws-tests.sh diff --git a/.evergreen/run-tests-serverless.sh b/.evergreen/run-tests-serverless.sh index 4bcd983ade..cd71e41d6e 100755 --- a/.evergreen/run-tests-serverless.sh +++ b/.evergreen/run-tests-serverless.sh @@ -16,14 +16,19 @@ export MONGODB_URI=`echo ${SERVERLESS_URI} | sed -r 's/mongodb\+srv:\/\//mongodb export TOPOLOGY="load-balanced" -python3 -u .evergreen/mongodl.py --component crypt_shared -V ${SERVERLESS_MONGODB_VERSION} --out `pwd`/csfle_lib --target `host_distro` || true -if test -f `pwd`/csfle_lib/lib/mongo_crypt_v1.so -then - echo Usinn crypt shared library version ${SERVERLESS_MONGODB_VERSION} - export MONGO_RUBY_DRIVER_CRYPT_SHARED_LIB_PATH=`pwd`/csfle_lib/lib/mongo_crypt_v1.so +if [ -n "${CRYPT_SHARED_LIB_PATH}" ]; then + echo crypt_shared already present at ${CRYPT_SHARED_LIB_PATH} -- using this version + export MONGO_RUBY_DRIVER_CRYPT_SHARED_LIB_PATH=$CRYPT_SHARED_LIB_PATH else - echo Failed to download crypt shared library - exit -1 + python3 -u .evergreen/mongodl.py --component crypt_shared -V ${SERVERLESS_MONGODB_VERSION} --out `pwd`/csfle_lib --target `host_distro` || true + if test -f `pwd`/csfle_lib/lib/mongo_crypt_v1.so + then + echo Usinn crypt shared library version ${SERVERLESS_MONGODB_VERSION} + export MONGO_RUBY_DRIVER_CRYPT_SHARED_LIB_PATH=`pwd`/csfle_lib/lib/mongo_crypt_v1.so + else + echo Failed to download crypt shared library + exit -1 + fi fi if ! ( test -f /etc/os-release & grep -q ^ID.*rhel /etc/os-release & grep -q ^VERSION_ID.*8.0 /etc/os-release ); then @@ -70,7 +75,8 @@ wait_for_kms_server 5698 echo "Waiting for mock KMS servers to start... done." # Obtain temporary AWS credentials -. ./set-temp-creds.sh +pip3 install boto3 +PYTHON=python3 . ./set-temp-creds.sh cd - echo "Running specs" diff --git a/.gitignore b/.gitignore index 24a54bc470..f1acca9738 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,6 @@ profile/benchmarking/data secrets-export.sh secrets-expansion.yml atlas-expansion.yml +# AWS SAM-generated files +spec/faas/ruby-sam-app/.aws-sam +spec/faas/ruby-sam-app/events/event.json diff --git a/.rubocop.yml b/.rubocop.yml index c76890187f..fa7d04f8c6 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -8,6 +8,7 @@ AllCops: NewCops: enable Exclude: - 'spec/shared/**/*' + - 'spec/faas/**/*' - 'vendor/**/*' Bundler: @@ -50,6 +51,9 @@ Layout/SpaceInsideArrayLiteralBrackets: Layout/SpaceInsidePercentLiteralDelimiters: Enabled: false +Metrics/ClassLength: + Max: 200 + Metrics/ModuleLength: Enabled: false @@ -86,6 +90,9 @@ Style/Documentation: Exclude: - 'spec/**/*' +Style/FormatStringToken: + Enabled: false + Style/ModuleFunction: EnforcedStyle: extend_self diff --git a/Rakefile b/Rakefile index f64a790c12..4a4458070d 100644 --- a/Rakefile +++ b/Rakefile @@ -131,157 +131,4 @@ namespace :docs do end end -require_relative "profile/benchmarking" - -# Some require data files, available from the drivers team. See the comments above each task for details." -namespace :benchmark do - desc "Run the driver benchmark tests." - - namespace :micro do - desc "Run the common driver micro benchmarking tests" - - namespace :flat do - desc "Benchmarking for flat bson documents." - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json. - task :encode do - puts "MICRO BENCHMARK:: FLAT:: ENCODE" - Mongo::Benchmarking::Micro.run(:flat, :encode) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json. - task :decode do - puts "MICRO BENCHMARK:: FLAT:: DECODE" - Mongo::Benchmarking::Micro.run(:flat, :decode) - end - end - - namespace :deep do - desc "Benchmarking for deep bson documents." - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json. - task :encode do - puts "MICRO BENCHMARK:: DEEP:: ENCODE" - Mongo::Benchmarking::Micro.run(:deep, :encode) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json. - task :decode do - puts "MICRO BENCHMARK:: DEEP:: DECODE" - Mongo::Benchmarking::Micro.run(:deep, :decode) - end - end - - namespace :full do - desc "Benchmarking for full bson documents." - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json. - task :encode do - puts "MICRO BENCHMARK:: FULL:: ENCODE" - Mongo::Benchmarking::Micro.run(:full, :encode) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json. - task :decode do - puts "MICRO BENCHMARK:: FULL:: DECODE" - Mongo::Benchmarking::Micro.run(:full, :decode) - end - end - end - - namespace :single_doc do - desc "Run the common driver single-document benchmarking tests" - task :command do - puts "SINGLE DOC BENCHMARK:: COMMAND" - Mongo::Benchmarking::SingleDoc.run(:command) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. - task :find_one do - puts "SINGLE DOC BENCHMARK:: FIND ONE BY ID" - Mongo::Benchmarking::SingleDoc.run(:find_one) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. - task :insert_one_small do - puts "SINGLE DOC BENCHMARK:: INSERT ONE SMALL DOCUMENT" - Mongo::Benchmarking::SingleDoc.run(:insert_one_small) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. - task :insert_one_large do - puts "SINGLE DOC BENCHMARK:: INSERT ONE LARGE DOCUMENT" - Mongo::Benchmarking::SingleDoc.run(:insert_one_large) - end - end - - namespace :multi_doc do - desc "Run the common driver multi-document benchmarking tests" - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. - task :find_many do - puts "MULTI DOCUMENT BENCHMARK:: FIND MANY" - Mongo::Benchmarking::MultiDoc.run(:find_many) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. - task :bulk_insert_small do - puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT SMALL" - Mongo::Benchmarking::MultiDoc.run(:bulk_insert_small) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. - task :bulk_insert_large do - puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT LARGE" - Mongo::Benchmarking::MultiDoc.run(:bulk_insert_large) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. - task :gridfs_upload do - puts "MULTI DOCUMENT BENCHMARK:: GRIDFS UPLOAD" - Mongo::Benchmarking::MultiDoc.run(:gridfs_upload) - end - - # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. - task :gridfs_download do - puts "MULTI DOCUMENT BENCHMARK:: GRIDFS DOWNLOAD" - Mongo::Benchmarking::MultiDoc.run(:gridfs_download) - end - end - - namespace :parallel do - desc "Run the common driver paralell ETL benchmarking tests" - - # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, - # with the files used in this task. - task :import do - puts "PARALLEL ETL BENCHMARK:: IMPORT" - Mongo::Benchmarking::Parallel.run(:import) - end - - # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, - # with the files used in this task. - # Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/LDJSON_MULTI" - # called 'output'. - task :export do - puts "PARALLEL ETL BENCHMARK:: EXPORT" - Mongo::Benchmarking::Parallel.run(:export) - end - - # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, - # with the files used in this task. - task :gridfs_upload do - puts "PARALLEL ETL BENCHMARK:: GRIDFS UPLOAD" - Mongo::Benchmarking::Parallel.run(:gridfs_upload) - end - - # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, - # with the files used in this task. - # Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/GRIDFS_MULTI" - # called 'output'. - task :gridfs_download do - puts "PARALLEL ETL BENCHMARK:: GRIDFS DOWNLOAD" - Mongo::Benchmarking::Parallel.run(:gridfs_download) - end - end -end +load 'profile/benchmarking/rake/tasks.rake' diff --git a/docs/reference/driver-compatibility.txt b/docs/reference/driver-compatibility.txt index 0402c17f83..664b19149d 100644 --- a/docs/reference/driver-compatibility.txt +++ b/docs/reference/driver-compatibility.txt @@ -28,6 +28,13 @@ particular version of the driver will generally work with newer versions of the server but may not take advantage of the functionality released in the newer version of the server. +.. important:: + + MongoDB ensures compatibility between the MongoDB Server and the drivers + for three years after the server version's end of life (EOL) date. To learn + more about the MongoDB release and EOL dates, see + `MongoDB Software Lifecycle Schedules `__. + The first column lists the driver versions.ā€œDā€ in other columns means support for that MongoDB version is deprecated and will be removed in a future driver version. diff --git a/docs/reference/transactions.txt b/docs/reference/transactions.txt index a4e8571320..722038ca8f 100644 --- a/docs/reference/transactions.txt +++ b/docs/reference/transactions.txt @@ -76,6 +76,31 @@ which are read concern, write concern and read preference: collection.insert_one({hello: 'world'}, session: session) end +Handling Errors Within the ``with_transaction`` Block +----------------------------------------------------- + +If a command inside the ``with_transaction`` block fails, it may cause +the transaction on the server to be aborted. This situation is normally handled +transparently by the driver. However, if the application catches such an error +and does not re-raise it, the driver will not be able to determine whether +the transaction was aborted or not. The driver will then retry the block +indefinitely. + +To avoid this situation, the application must not silently handle errors within +``with_transaction`` block. If the application needs to handle errors within +the block, it must re-raise the errors. + +.. code-block:: ruby + + session.with_transaction do + collection.insert_one({hello: 'world'}, session: session) + rescue Mongo::Error::OperationFailure => e + # Do something in response to the error + raise e + end + +If the applications needs to handle errors in a custom way, it should use +the low level API instead. Low Level API ============= @@ -145,6 +170,14 @@ session if one is in progress: # ok c2.database.drop +Handling Errors +--------------- + +If a command inside the transaction fails, the transaction may be aborted +on the server. Errors that abort transactions do not have +``TransientTransactionError`` in their error labels. An attempt to commit such a +transaction will be rejected with ``NoSuchTransaction`` error. + Retrying Commits ================ diff --git a/lib/mongo/cluster.rb b/lib/mongo/cluster.rb index b5981c7471..eac6d6229d 100644 --- a/lib/mongo/cluster.rb +++ b/lib/mongo/cluster.rb @@ -157,7 +157,7 @@ def initialize(seeds, monitoring, options = Options::Redacted.new) # @sdam_flow_lock covers just the sdam flow. Note it does not apply # to @topology replacements which are done under @update_lock. @sdam_flow_lock = Mutex.new - Session::SessionPool.create(self) + @session_pool = Session::SessionPool.new(self) if seeds.empty? && load_balanced? raise ArgumentError, 'Load-balanced clusters with no seeds are prohibited' @@ -186,6 +186,8 @@ def initialize(seeds, monitoring, options = Options::Redacted.new) recreate_topology(topology, opening_topology) end + possibly_warn_about_compatibility! + if load_balanced? # We are required by the specifications to produce certain SDAM events # when in load-balanced topology. @@ -1082,6 +1084,30 @@ def recreate_topology(new_topology_template, previous_topology) Monitoring::Event::TopologyChanged.new(previous_topology, @topology) ) end + + COSMOSDB_HOST_PATTERNS = %w[ .cosmos.azure.com ] + COSMOSDB_LOG_MESSAGE = 'You appear to be connected to a CosmosDB cluster. ' \ + 'For more information regarding feature compatibility and support please visit ' \ + 'https://www.mongodb.com/supportability/cosmosdb' + + DOCUMENTDB_HOST_PATTERNS = %w[ .docdb.amazonaws.com .docdb-elastic.amazonaws.com ] + DOCUMENTDB_LOG_MESSAGE = 'You appear to be connected to a DocumentDB cluster. ' \ + 'For more information regarding feature compatibility and support please visit ' \ + 'https://www.mongodb.com/supportability/documentdb' + + # Compares the server hosts with address suffixes of known services + # that provide limited MongoDB API compatibility, and warns about them. + def possibly_warn_about_compatibility! + if topology.server_hosts_match_any?(COSMOSDB_HOST_PATTERNS) + log_info COSMOSDB_LOG_MESSAGE + return + end + + if topology.server_hosts_match_any?(DOCUMENTDB_HOST_PATTERNS) + log_info DOCUMENTDB_LOG_MESSAGE + return + end + end end end diff --git a/lib/mongo/cluster/topology/base.rb b/lib/mongo/cluster/topology/base.rb index e92181e4fa..999d14bf75 100644 --- a/lib/mongo/cluster/topology/base.rb +++ b/lib/mongo/cluster/topology/base.rb @@ -211,6 +211,22 @@ def new_max_set_version(description) end end + # Compares each server address against the list of patterns. + # + # @param [ Array ] patterns the URL suffixes to compare + # each server against. + # + # @return [ true | false ] whether any of the addresses match any of + # the patterns or not. + # + # @api private + def server_hosts_match_any?(patterns) + server_descriptions.any? do |addr_spec, _desc| + addr, _port = addr_spec.split(/:/) + patterns.any? { |pattern| addr.end_with?(pattern) } + end + end + private # Validates and/or transforms options as necessary for the topology. diff --git a/lib/mongo/collection.rb b/lib/mongo/collection.rb index 90514f203f..a2b2076b7d 100644 --- a/lib/mongo/collection.rb +++ b/lib/mongo/collection.rb @@ -339,7 +339,9 @@ def capped? # inserted or updated documents where the clustered index key value # matches an existing value in the index. # - *:name* -- Optional. A name that uniquely identifies the clustered index. - # @option opts [ Hash ] :collation The collation to use. + # @option opts [ Hash ] :collation The collation to use when creating the + # collection. This option will not be sent to the server when calling + # collection methods. # @option opts [ Hash ] :encrypted_fields Hash describing encrypted fields # for queryable encryption. # @option opts [ Integer ] :expire_after Number indicating @@ -788,7 +790,7 @@ def inspect def insert_one(document, opts = {}) QueryCache.clear_namespace(namespace) - client.send(:with_session, opts) do |session| + client.with_session(opts) do |session| write_concern = if opts[:write_concern] WriteConcern.get(opts[:write_concern]) else diff --git a/lib/mongo/collection/view/iterable.rb b/lib/mongo/collection/view/iterable.rb index f16569efac..83ec0e458b 100644 --- a/lib/mongo/collection/view/iterable.rb +++ b/lib/mongo/collection/view/iterable.rb @@ -162,6 +162,7 @@ def initial_query_op(session) let: options[:let], limit: limit, allow_disk_use: options[:allow_disk_use], + allow_partial_results: options[:allow_partial_results], read: read, read_concern: options[:read_concern] || read_concern, batch_size: batch_size, diff --git a/lib/mongo/error.rb b/lib/mongo/error.rb index 36296735d7..92d6d5f4b3 100644 --- a/lib/mongo/error.rb +++ b/lib/mongo/error.rb @@ -217,6 +217,7 @@ def write_concern_error_labels require 'mongo/error/server_api_conflict' require 'mongo/error/server_api_not_supported' require 'mongo/error/server_not_usable' +require 'mongo/error/transactions_not_supported' require 'mongo/error/unknown_payload_type' require 'mongo/error/unmet_dependency' require 'mongo/error/unsupported_option' diff --git a/lib/mongo/error/transactions_not_supported.rb b/lib/mongo/error/transactions_not_supported.rb new file mode 100644 index 0000000000..bbaa6c7c58 --- /dev/null +++ b/lib/mongo/error/transactions_not_supported.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +# Copyright (C) 2019-2020 MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module Mongo + class Error + # Transactions are not supported by the cluster. There might be the + # following reasons: + # - topology is standalone + # - topology is replica set and server version is < 4.0 + # - topology is sharded and server version is < 4.2 + # + # @param [ String ] reason The reason why transactions are no supported. + # + # @since 2.7.0 + class TransactionsNotSupported < Error + def initialize(reason) + super("Transactions are not supported for the cluster: #{reason}") + end + end + end +end diff --git a/lib/mongo/monitoring/event/secure.rb b/lib/mongo/monitoring/event/secure.rb index 79abfd5a1b..0ad628e518 100644 --- a/lib/mongo/monitoring/event/secure.rb +++ b/lib/mongo/monitoring/event/secure.rb @@ -58,7 +58,7 @@ def sensitive?(command_name:, document:) # According to Command Monitoring spec,for hello/legacy hello commands # when speculativeAuthenticate is present, their commands AND replies # MUST be redacted from the events. - # See https://github.com/mongodb/specifications/blob/master/source/command-monitoring/command-monitoring.rst#security + # See https://github.com/mongodb/specifications/blob/master/source/command-logging-and-monitoring/command-logging-and-monitoring.rst#security true else false diff --git a/lib/mongo/operation/shared/executable.rb b/lib/mongo/operation/shared/executable.rb index 229479e24b..9b61476631 100644 --- a/lib/mongo/operation/shared/executable.rb +++ b/lib/mongo/operation/shared/executable.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/error' + module Mongo module Operation @@ -30,40 +32,42 @@ def do_execute(connection, context, options = {}) session&.materialize_if_needed unpin_maybe(session, connection) do add_error_labels(connection, context) do - add_server_diagnostics(connection) do - get_result(connection, context, options).tap do |result| - if session - if session.in_transaction? && - connection.description.load_balancer? - then - if session.pinned_connection_global_id - unless session.pinned_connection_global_id == connection.global_id - raise( - Error::InternalDriverError, - "Expected operation to use connection #{session.pinned_connection_global_id} but it used #{connection.global_id}" - ) + check_for_network_error do + add_server_diagnostics(connection) do + get_result(connection, context, options).tap do |result| + if session + if session.in_transaction? && + connection.description.load_balancer? + then + if session.pinned_connection_global_id + unless session.pinned_connection_global_id == connection.global_id + raise( + Error::InternalDriverError, + "Expected operation to use connection #{session.pinned_connection_global_id} but it used #{connection.global_id}" + ) + end + else + session.pin_to_connection(connection.global_id) + connection.pin end - else - session.pin_to_connection(connection.global_id) - connection.pin end - end - if session.snapshot? && !session.snapshot_timestamp - session.snapshot_timestamp = result.snapshot_timestamp + if session.snapshot? && !session.snapshot_timestamp + session.snapshot_timestamp = result.snapshot_timestamp + end end - end - if result.has_cursor_id? && - connection.description.load_balancer? - then - if result.cursor_id == 0 - connection.unpin - else - connection.pin + if result.has_cursor_id? && + connection.description.load_balancer? + then + if result.cursor_id == 0 + connection.unpin + else + connection.pin + end end + process_result(result, connection) end - process_result(result, connection) end end end @@ -144,6 +148,18 @@ def process_result_for_sdam(result, connection) connection.server.scan_semaphore.signal end end + + NETWORK_ERRORS = [ + Error::SocketError, + Error::SocketTimeoutError + ].freeze + + def check_for_network_error + yield + rescue *NETWORK_ERRORS + session&.dirty! + raise + end end end end diff --git a/lib/mongo/operation/shared/response_handling.rb b/lib/mongo/operation/shared/response_handling.rb index af4c83ca75..799721a8de 100644 --- a/lib/mongo/operation/shared/response_handling.rb +++ b/lib/mongo/operation/shared/response_handling.rb @@ -50,35 +50,33 @@ def validate_result(result, connection, context) # the operation is performed. # @param [ Mongo::Operation::Context ] context The operation context. def add_error_labels(connection, context) - begin - yield - rescue Mongo::Error::SocketError => e - if context.in_transaction? && !context.committing_transaction? - e.add_label('TransientTransactionError') - end - if context.committing_transaction? - e.add_label('UnknownTransactionCommitResult') - end + yield + rescue Mongo::Error::SocketError => e + if context.in_transaction? && !context.committing_transaction? + e.add_label('TransientTransactionError') + end + if context.committing_transaction? + e.add_label('UnknownTransactionCommitResult') + end - maybe_add_retryable_write_error_label!(e, connection, context) - - raise e - rescue Mongo::Error::SocketTimeoutError => e - maybe_add_retryable_write_error_label!(e, connection, context) - raise e - rescue Mongo::Error::OperationFailure => e - if context.committing_transaction? - if e.write_retryable? || e.wtimeout? || (e.write_concern_error? && - !Session::UNLABELED_WRITE_CONCERN_CODES.include?(e.write_concern_error_code) - ) || e.max_time_ms_expired? - e.add_label('UnknownTransactionCommitResult') - end + maybe_add_retryable_write_error_label!(e, connection, context) + + raise e + rescue Mongo::Error::SocketTimeoutError => e + maybe_add_retryable_write_error_label!(e, connection, context) + raise e + rescue Mongo::Error::OperationFailure => e + if context.committing_transaction? + if e.write_retryable? || e.wtimeout? || (e.write_concern_error? && + !Session::UNLABELED_WRITE_CONCERN_CODES.include?(e.write_concern_error_code) + ) || e.max_time_ms_expired? + e.add_label('UnknownTransactionCommitResult') end + end - maybe_add_retryable_write_error_label!(e, connection, context) + maybe_add_retryable_write_error_label!(e, connection, context) - raise e - end + raise e end # Unpins the session and/or the connection if the yielded to block diff --git a/lib/mongo/retryable.rb b/lib/mongo/retryable.rb index c6330bf486..2508e13efd 100644 --- a/lib/mongo/retryable.rb +++ b/lib/mongo/retryable.rb @@ -46,8 +46,8 @@ module Retryable # @api private # # @return [ Mongo::Server ] A server matching the server preference. - def select_server(cluster, server_selector, session) - server_selector.select_server(cluster, nil, session) + def select_server(cluster, server_selector, session, failed_server = nil) + server_selector.select_server(cluster, nil, session, deprioritized: [failed_server].compact) end # Returns the read worker for handling retryable reads. diff --git a/lib/mongo/retryable/read_worker.rb b/lib/mongo/retryable/read_worker.rb index 763fa0f75b..a82e67a051 100644 --- a/lib/mongo/retryable/read_worker.rb +++ b/lib/mongo/retryable/read_worker.rb @@ -190,12 +190,13 @@ def deprecated_legacy_read_with_retry(&block) # # @return [ Result ] The result of the operation. def modern_read_with_retry(session, server_selector, &block) - yield select_server(cluster, server_selector, session) + server = select_server(cluster, server_selector, session) + yield server rescue *retryable_exceptions, Error::OperationFailure, Auth::Unauthorized, Error::PoolError => e e.add_notes('modern retry', 'attempt 1') raise e if session.in_transaction? raise e if !is_retryable_exception?(e) && !e.write_retryable? - retry_read(e, session, server_selector, &block) + retry_read(e, session, server_selector, failed_server: server, &block) end # Attempts to do a "legacy" read with retry. The operation will be @@ -257,12 +258,14 @@ def read_without_retry(session, server_selector, &block) # being run on. # @param [ Mongo::ServerSelector::Selectable ] server_selector Server # selector for the operation. + # @param [ Mongo::Server ] failed_server The server on which the original + # operation failed. # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. - def retry_read(original_error, session, server_selector, &block) + def retry_read(original_error, session, server_selector, failed_server: nil, &block) begin - server = select_server(cluster, server_selector, session) + server = select_server(cluster, server_selector, session, failed_server) rescue Error, Error::AuthError => e original_error.add_note("later retry failed: #{e.class}: #{e}") raise original_error @@ -289,8 +292,6 @@ def retry_read(original_error, session, server_selector, &block) raise original_error end end - end - end end diff --git a/lib/mongo/retryable/write_worker.rb b/lib/mongo/retryable/write_worker.rb index c21265ed56..339a28b2f1 100644 --- a/lib/mongo/retryable/write_worker.rb +++ b/lib/mongo/retryable/write_worker.rb @@ -103,8 +103,9 @@ def write_with_retry(write_concern, ending_transaction: false, context:, &block) def nro_write_with_retry(write_concern, context:, &block) session = context.session server = select_server(cluster, ServerSelector.primary, session) + options = session&.client&.options || {} - if session&.client.options[:retry_writes] + if options[:retry_writes] begin server.with_connection(connection_global_id: context.connection_global_id) do |connection| yield connection, nil, context @@ -240,7 +241,7 @@ def modern_write_with_retry(session, server, context, &block) # Context#with creates a new context, which is not necessary here # but the API is less prone to misuse this way. - retry_write(e, txn_num, context: context.with(is_retry: true), &block) + retry_write(e, txn_num, context: context.with(is_retry: true), failed_server: server, &block) end # Called after a failed write, this will retry the write no more than @@ -250,9 +251,11 @@ def modern_write_with_retry(session, server, context, &block) # retry. # @param [ Number ] txn_num The transaction number. # @param [ Operation::Context ] context The context for the operation. + # @param [ Mongo::Server ] failed_server The server on which the original + # operation failed. # # @return [ Result ] The result of the operation. - def retry_write(original_error, txn_num, context:, &block) + def retry_write(original_error, txn_num, context:, failed_server: nil, &block) session = context.session # We do not request a scan of the cluster here, because error handling @@ -260,7 +263,7 @@ def retry_write(original_error, txn_num, context:, &block) # server description and/or topology as necessary (specifically, # a socket error or a not master error should have marked the respective # server unknown). Here we just need to wait for server selection. - server = select_server(cluster, ServerSelector.primary, session) + server = select_server(cluster, ServerSelector.primary, session, failed_server) unless server.retry_writes? # Do not need to add "modern retry" here, it should already be on diff --git a/lib/mongo/server/app_metadata.rb b/lib/mongo/server/app_metadata.rb index 2d982c4f4b..81e595d9aa 100644 --- a/lib/mongo/server/app_metadata.rb +++ b/lib/mongo/server/app_metadata.rb @@ -187,13 +187,14 @@ def os_doc } end - # Returns the environment doc describing the current FaaS environment. + # Returns the environment doc describing the current execution + # environment. # - # @return [ Hash | nil ] the environment doc (or nil if not in a FaaS - # environment). + # @return [ Hash | nil ] the environment doc (or nil if no relevant + # environment info was detected) def env_doc env = Environment.new - env.faas? ? env.to_h : nil + env.present? ? env.to_h : nil end def type diff --git a/lib/mongo/server/app_metadata/environment.rb b/lib/mongo/server/app_metadata/environment.rb index fafb1c73e7..029257faec 100644 --- a/lib/mongo/server/app_metadata/environment.rb +++ b/lib/mongo/server/app_metadata/environment.rb @@ -18,9 +18,12 @@ module Mongo class Server class AppMetadata # Implements the logic from the handshake spec, for deducing and - # reporting the current FaaS environment in which the program is + # reporting the current environment in which the program is # executing. # + # This includes FaaS environment checks, as well as checks for the + # presence of a container (Docker) and/or orchestrator (Kubernetes). + # # @api private class Environment # Error class for reporting that too many discriminators were found @@ -39,6 +42,10 @@ class TypeMismatch < Mongo::Error; end # Error class for reporting that the value for a field is too long. class ValueTooLong < Mongo::Error; end + # The name and location of the .dockerenv file that will signal the + # presence of Docker. + DOCKERENV_PATH = '/.dockerenv' + # This value is not explicitly specified in the spec, only implied to be # less than 512. MAXIMUM_VALUE_LENGTH = 500 @@ -102,9 +109,11 @@ class ValueTooLong < Mongo::Error; end # if the environment contains invalid or contradictory state, it will # be initialized with {{name}} set to {{nil}}. def initialize + @fields = {} @error = nil @name = detect_environment - populate_fields + populate_faas_fields + detect_container rescue TooManyEnvironments => e self.error = "too many environments detected: #{e.message}" rescue MissingVariable => e @@ -115,6 +124,23 @@ def initialize self.error = "value for #{e.message} is too long" end + # Queries the detected container information. + # + # @return [ Hash | nil ] the detected container information, or + # nil if no container was detected. + def container + fields[:container] + end + + # Queries whether any environment information was able to be + # detected. + # + # @return [ true | false ] if any environment information was + # detected. + def present? + @name || fields.any? + end + # Queries whether the current environment is a valid FaaS environment. # # @return [ true | false ] whether the environment is a FaaS @@ -159,14 +185,11 @@ def vercel? @name == 'vercel' end - # Compiles the detected environment information into a Hash. It will - # always include a {{name}} key, but may include other keys as well, - # depending on the detected FaaS environment. (See the handshake - # spec for details.) + # Compiles the detected environment information into a Hash. # # @return [ Hash ] the detected environment information. def to_h - fields.merge(name: name) + name ? fields.merge(name: name) : fields end private @@ -192,6 +215,38 @@ def detect_environment names.first end + # Looks for the presence of a container. Currently can detect + # Docker (by the existence of a .dockerenv file in the root + # directory) and Kubernetes (by the existence of the KUBERNETES_SERVICE_HOST + # environment variable). + def detect_container + runtime = docker_present? && 'docker' + orchestrator = kubernetes_present? && 'kubernetes' + + return unless runtime || orchestrator + + fields[:container] = {} + fields[:container][:runtime] = runtime if runtime + fields[:container][:orchestrator] = orchestrator if orchestrator + end + + # Checks for the existence of a .dockerenv in the root directory. + def docker_present? + File.exist?(dockerenv_path) + end + + # Implementing this as a method so that it can be mocked in tests, to + # test the presence or absence of Docker. + def dockerenv_path + DOCKERENV_PATH + end + + # Checks for the presence of a non-empty KUBERNETES_SERVICE_HOST + # environment variable. + def kubernetes_present? + !ENV['KUBERNETES_SERVICE_HOST'].to_s.empty? + end + # Determines whether the named environment variable exists, and (if # a pattern has been declared for that descriminator) whether the # pattern matches the value of the variable. @@ -212,10 +267,10 @@ def discriminator_matches?(var) # Extracts environment information from the current environment # variables, based on the detected FaaS environment. Populates the # {{@fields}} instance variable. - def populate_fields + def populate_faas_fields return unless name - @fields = FIELDS[name].each_with_object({}) do |(var, defn), fields| + FIELDS[name].each_with_object(@fields) do |(var, defn), fields| fields[defn[:field]] = extract_field(var, defn) end end diff --git a/lib/mongo/server/description/features.rb b/lib/mongo/server/description/features.rb index 849374546c..97a222713b 100644 --- a/lib/mongo/server/description/features.rb +++ b/lib/mongo/server/description/features.rb @@ -48,6 +48,7 @@ class Features # provided by the client during findAndModify operations, requiring the # driver to raise client-side errors when those options are provided. find_and_modify_option_validation: 8, + sharded_transactions: 8, transactions: 7, scram_sha_256: 7, array_filters: 6, diff --git a/lib/mongo/server_selector/base.rb b/lib/mongo/server_selector/base.rb index b883785129..10eb478449 100644 --- a/lib/mongo/server_selector/base.rb +++ b/lib/mongo/server_selector/base.rb @@ -164,6 +164,10 @@ def ==(other) # for mongos pinning. Added in version 2.10.0. # @param [ true | false ] write_aggregation Whether we need a server that # supports writing aggregations (e.g. with $merge/$out) on secondaries. + # @param [ Array ] deprioritized A list of servers that should + # be selected from only if no other servers are available. This is + # used to avoid selecting the same server twice in a row when + # retrying a command. # # @return [ Mongo::Server ] A server matching the server preference. # @@ -174,8 +178,8 @@ def ==(other) # lint mode is enabled. # # @since 2.0.0 - def select_server(cluster, ping = nil, session = nil, write_aggregation: false) - select_server_impl(cluster, ping, session, write_aggregation).tap do |server| + def select_server(cluster, ping = nil, session = nil, write_aggregation: false, deprioritized: []) + select_server_impl(cluster, ping, session, write_aggregation, deprioritized).tap do |server| if Lint.enabled? && !server.pool.ready? raise Error::LintError, 'Server selector returning a server with a pool which is not ready' end @@ -183,7 +187,7 @@ def select_server(cluster, ping = nil, session = nil, write_aggregation: false) end # Parameters and return values are the same as for select_server. - private def select_server_impl(cluster, ping, session, write_aggregation) + private def select_server_impl(cluster, ping, session, write_aggregation, deprioritized) if cluster.topology.is_a?(Cluster::Topology::LoadBalanced) return cluster.servers.first end @@ -266,7 +270,7 @@ def select_server(cluster, ping = nil, session = nil, write_aggregation: false) end end - server = try_select_server(cluster, write_aggregation: write_aggregation) + server = try_select_server(cluster, write_aggregation: write_aggregation, deprioritized: deprioritized) if server unless cluster.topology.compatible? @@ -321,11 +325,15 @@ def select_server(cluster, ping = nil, session = nil, write_aggregation: false) # an eligible server. # @param [ true | false ] write_aggregation Whether we need a server that # supports writing aggregations (e.g. with $merge/$out) on secondaries. + # @param [ Array ] deprioritized A list of servers that should + # be selected from only if no other servers are available. This is + # used to avoid selecting the same server twice in a row when + # retrying a command. # # @return [ Server | nil ] A suitable server, if one exists. # # @api private - def try_select_server(cluster, write_aggregation: false) + def try_select_server(cluster, write_aggregation: false, deprioritized: []) servers = if write_aggregation && cluster.replica_set? # 1. Check if ALL servers in cluster support secondary writes. is_write_supported = cluster.servers.reduce(true) do |res, server| @@ -347,7 +355,7 @@ def try_select_server(cluster, write_aggregation: false) # by the selector (e.g. for secondary preferred, the first # server may be a secondary and the second server may be primary) # and we should take the first server here respecting the order - server = servers.first + server = suitable_server(servers, deprioritized) if server if Lint.enabled? @@ -418,6 +426,24 @@ def suitable_servers(cluster) private + # Returns a server from the list of servers that is suitable for + # executing the operation. + # + # @param [ Array ] servers The candidate servers. + # @param [ Array ] deprioritized A list of servers that should + # be selected from only if no other servers are available. + # + # @return [ Server | nil ] The suitable server or nil if no suitable + # server is available. + def suitable_server(servers, deprioritized) + preferred = servers - deprioritized + if preferred.empty? + servers.first + else + preferred.first + end + end + # Convert this server preference definition into a format appropriate # for sending to a MongoDB server (i.e., as a command field). # diff --git a/lib/mongo/session.rb b/lib/mongo/session.rb index bc12896af0..b22519efc1 100644 --- a/lib/mongo/session.rb +++ b/lib/mongo/session.rb @@ -123,6 +123,23 @@ def snapshot? # @since 2.5.0 attr_reader :operation_time + # Sets the dirty state to the given value for the underlying server + # session. If there is no server session, this does nothing. + # + # @param [ true | false ] mark whether to mark the server session as + # dirty, or not. + def dirty!(mark = true) + @server_session&.dirty!(mark) + end + + # @return [ true | false | nil ] whether the underlying server session is + # dirty. If no server session exists for this session, returns nil. + # + # @api private + def dirty? + @server_session&.dirty? + end + # @return [ Hash ] The options for the transaction currently being executed # on this session. # @@ -538,6 +555,8 @@ def with_transaction(options=nil) # # @since 2.6.0 def start_transaction(options = nil) + check_transactions_supported! + if options Lint.validate_read_concern_option(options[:read_concern]) @@ -1185,5 +1204,18 @@ def check_matching_cluster!(client) raise Mongo::Error::InvalidSession.new(MISMATCHED_CLUSTER_ERROR_MSG) end end + + def check_transactions_supported! + raise Mongo::Error::TransactionsNotSupported, "standalone topology" if cluster.single? + + cluster.next_primary.with_connection do |conn| + if cluster.replica_set? && !conn.features.transactions_enabled? + raise Mongo::Error::TransactionsNotSupported, "server version is < 4.0" + end + if cluster.sharded? && !conn.features.sharded_transactions_enabled? + raise Mongo::Error::TransactionsNotSupported, "sharded transactions require server version >= 4.2" + end + end + end end end diff --git a/lib/mongo/session/server_session.rb b/lib/mongo/session/server_session.rb index 6d0410903a..6f7283c79e 100644 --- a/lib/mongo/session/server_session.rb +++ b/lib/mongo/session/server_session.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/session/server_session/dirtyable' + module Mongo class Session @@ -25,6 +27,7 @@ class Session # # @since 2.5.0 class ServerSession + include Dirtyable # Regex for removing dashes from the UUID string. # diff --git a/lib/mongo/session/server_session/dirtyable.rb b/lib/mongo/session/server_session/dirtyable.rb new file mode 100644 index 0000000000..0df262c85b --- /dev/null +++ b/lib/mongo/session/server_session/dirtyable.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +# Copyright (C) 2024 MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module Mongo + class Session + class ServerSession + # Functionality for manipulating and querying a session's + # "dirty" state, per the last paragraph at + # https://github.com/mongodb/specifications/blob/master/source/sessions/driver-sessions.rst#server-session-pool + # + # If a driver has a server session pool and a network error is + # encountered when executing any command with a ClientSession, the + # driver MUST mark the associated ServerSession as dirty. Dirty server + # sessions are discarded when returned to the server session pool. It is + # valid for a dirty session to be used for subsequent commands (e.g. an + # implicit retry attempt, a later command in a bulk write, or a later + # operation on an explicit session), however, it MUST remain dirty for + # the remainder of its lifetime regardless if later commands succeed. + # + # @api private + module Dirtyable + # Query whether the server session has been marked dirty or not. + # + # @return [ true | false ] the server session's dirty state + def dirty? + @dirty + end + + # Mark the server session as dirty (the default) or clean. + # + # @param [ true | false ] mark whether the mark the server session + # dirty or not. + def dirty!(mark = true) + @dirty = mark + end + end + end + end +end diff --git a/lib/mongo/session/session_pool.rb b/lib/mongo/session/session_pool.rb index e89d634c31..4fb832a813 100644 --- a/lib/mongo/session/session_pool.rb +++ b/lib/mongo/session/session_pool.rb @@ -25,21 +25,6 @@ class Session # # @since 2.5.0 class SessionPool - - # Create a SessionPool. - # - # @example - # SessionPool.create(cluster) - # - # @param [ Mongo::Cluster ] cluster The cluster that will be associated with this - # session pool. - # - # @since 2.5.0 - def self.create(cluster) - pool = new(cluster) - cluster.instance_variable_set(:@session_pool, pool) - end - # Initialize a SessionPool. # # @example @@ -105,9 +90,7 @@ def checkin(session) @mutex.synchronize do prune! - unless about_to_expire?(session) - @queue.unshift(session) - end + @queue.unshift(session) if return_to_queue?(session) end end @@ -136,6 +119,17 @@ def end_sessions private + # Query whether the given session is okay to return to the + # pool's queue. + # + # @param [ Session::ServerSession ] session the session to query + # + # @return [ true | false ] whether to return the session to the + # queue. + def return_to_queue?(session) + !session.dirty? && !about_to_expire?(session) + end + def about_to_expire?(session) if session.nil? raise ArgumentError, 'session cannot be nil' diff --git a/lib/mongo/uri.rb b/lib/mongo/uri.rb index 899ec94d0a..0b891a858c 100644 --- a/lib/mongo/uri.rb +++ b/lib/mongo/uri.rb @@ -377,10 +377,6 @@ def parse!(remaining) raise_invalid_error!("Options contain an unescaped question mark (?), or the database name contains a question mark and was not escaped") end - if options && !hosts_and_db.index('/') - raise_invalid_error!("MongoDB URI must have a slash (/) after the hosts if options are given") - end - hosts, db = hosts_and_db.split('/', 2) if db && db.index('/') raise_invalid_error!("Database name contains an unescaped slash (/): #{db}") diff --git a/profile/benchmarking.rb b/profile/benchmarking.rb index a054b3906b..08f6b20423 100644 --- a/profile/benchmarking.rb +++ b/profile/benchmarking.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true -# rubocop:todo all # Copyright (C) 2015-2020 MongoDB Inc. # @@ -17,113 +16,65 @@ require 'benchmark' require_relative 'benchmarking/helper' -require_relative 'benchmarking/micro' +require_relative 'benchmarking/bson' require_relative 'benchmarking/single_doc' require_relative 'benchmarking/multi_doc' require_relative 'benchmarking/parallel' module Mongo - # Module with all functionality for running driver benchmark tests. # # @since 2.2.3 module Benchmarking - extend self - # The current path. - # - # @return [ String ] The current path. - # - # @since 2.2.3 - CURRENT_PATH = File.expand_path(File.dirname(__FILE__)).freeze - - # The path to data files used in Benchmarking tests. - # # @return [ String ] Path to Benchmarking test files. - # - # @since 2.2.3 - DATA_PATH = [CURRENT_PATH, 'benchmarking', 'data'].join('/').freeze + DATA_PATH = [ __dir__, 'benchmarking', 'data' ].join('/').freeze - # The file containing the single tweet document. - # - # @return [ String ] The file containing the tweet document. - # - # @since 2.2.3 - TWEET_DOCUMENT_FILE = [DATA_PATH, 'TWEET.json'].join('/').freeze + # @return [ String ] The file containing the single tweet document. + TWEET_DOCUMENT_FILE = [ DATA_PATH, 'TWEET.json' ].join('/').freeze - # The file containing the single small document. - # - # @return [ String ] The file containing the small document. - # - # @since 2.2.3 - SMALL_DOCUMENT_FILE = [DATA_PATH, 'SMALL_DOC.json'].join('/').freeze + # @return [ String ] The file containing the single small document. + SMALL_DOCUMENT_FILE = [ DATA_PATH, 'SMALL_DOC.json' ].join('/').freeze - # The file containing the single large document. - # - # @return [ String ] The file containing the large document. - # - # @since 2.2.3 - LARGE_DOCUMENT_FILE = [DATA_PATH, 'LARGE_DOC.json'].join('/').freeze + # @return [ String ] The file containing the single large document. + LARGE_DOCUMENT_FILE = [ DATA_PATH, 'LARGE_DOC.json' ].join('/').freeze - # The file to upload when testing GridFS. - # - # @return [ String ] The file containing the GridFS test data. - # - # @since 2.2.3 - GRIDFS_FILE = [DATA_PATH, 'GRIDFS_LARGE'].join('/').freeze + # @return [ String ] The file to upload when testing GridFS. + GRIDFS_FILE = [ DATA_PATH, 'GRIDFS_LARGE' ].join('/').freeze - # The file path and base name for the LDJSON files. - # # @return [ String ] The file path and base name for the LDJSON files. - # - # @since 2.2.3 - LDJSON_FILE_BASE = [DATA_PATH, 'LDJSON_MULTI', 'LDJSON'].join('/').freeze + LDJSON_FILE_BASE = [ DATA_PATH, 'LDJSON_MULTI', 'LDJSON' ].join('/').freeze - # The file path and base name for the outputted LDJSON files. - # - # @return [ String ] The file path and base name for the outputted LDJSON files. - # - # @since 2.2.3 - LDJSON_FILE_OUTPUT_BASE = [DATA_PATH, 'LDJSON_MULTI', 'output', 'LDJSON'].join('/').freeze + # @return [ String ] The file path and base name for the emitted LDJSON files. + LDJSON_FILE_OUTPUT_BASE = [ DATA_PATH, 'LDJSON_MULTI', 'output', 'LDJSON' ].join('/').freeze - # The file path and base name for the GRIDFS files to upload. - # # @return [ String ] The file path and base name for the GRIDFS files to upload. - # - # @since 2.2.3 - GRIDFS_MULTI_BASE = [DATA_PATH, 'GRIDFS_MULTI', 'file'].join('/').freeze + GRIDFS_MULTI_BASE = [ DATA_PATH, 'GRIDFS_MULTI', 'file' ].join('/').freeze - # The file path and base name for the outputted GRIDFS downloaded files. - # - # @return [ String ] The file path and base name for the outputted GRIDFS downloaded files. - # - # @since 2.2.3 - GRIDFS_MULTI_OUTPUT_BASE = [DATA_PATH, 'GRIDFS_MULTI', 'output', 'file-output'].join('/').freeze + # @return [ String ] The file path and base name for the emitted GRIDFS downloaded files. + GRIDFS_MULTI_OUTPUT_BASE = [ DATA_PATH, 'GRIDFS_MULTI', 'output', 'file-output' ].join('/').freeze - # The default number of test repetitions. - # # @return [ Integer ] The number of test repetitions. - # - # @since 2.2.3 - TEST_REPETITIONS = 100.freeze + TEST_REPETITIONS = 100 - # The number of default warmup repetitions of the test to do before - # recording times. - # - # @return [ Integer ] The default number of warmup repetitions. + # Convenience helper for loading the single tweet document. # - # @since 2.2.3 - WARMUP_REPETITIONS = 10.freeze - + # @return [ Hash ] a single parsed JSON document def tweet_document Benchmarking.load_file(TWEET_DOCUMENT_FILE).first end + # Convenience helper for loading the single small document. + # + # @return [ Hash ] a single parsed JSON document def small_document Benchmarking.load_file(SMALL_DOCUMENT_FILE).first end + # Convenience helper for loading the single large document. + # + # @return [ Hash ] a single parsed JSON document def large_document Benchmarking.load_file(LARGE_DOCUMENT_FILE).first end diff --git a/profile/benchmarking/bson.rb b/profile/benchmarking/bson.rb new file mode 100644 index 0000000000..415e58114e --- /dev/null +++ b/profile/benchmarking/bson.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true + +# Copyright (C) 2015-2020 MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require_relative 'percentiles' +require_relative 'summary' + +module Mongo + module Benchmarking + # These tests focus on BSON encoding and decoding; they are client-side only and + # do not involve any transmission of data to or from the server. + module BSON + extend self + + # Runs all of the benchmarks specified by the given mapping. + # + # @example Run a collection of benchmarks. + # Benchmarking::BSON.run_all( + # flat: %i[ encode decode ], + # deep: %i[ encode decode ], + # full: %i[ encode decode ] + # ) + # + # @return [ Hash ] a hash of the results for each benchmark + def run_all(map) + {}.tap do |results| + map.each do |type, actions| + results[type] = {} + + actions.each do |action| + results[type][action] = run(type, action) + end + end + end + end + + # As defined by the spec, the score for a given benchmark is the + # size of the task (in MB) divided by the median wall clock time. + # + # @param [ Symbol ] type the type of the task + # @param [ Mongo::Benchmarking::Percentiles ] percentiles the Percentiles + # object to query for the median time. + # @param [ Numeric ] scale the number of times the operation is performed + # per iteration, used to scale the task size. + # + # @return [ Numeric ] the score for the given task. + def score_for(type, percentiles, scale: 10_000) + task_size(type, scale) / percentiles[50] + end + + # Run a BSON benchmark test. + # + # @example Run a test. + # Benchmarking::BSON.run(:flat) + # + # @param [ Symbol ] type The type of test to run. + # @param [ :encode | :decode ] action The action to perform. + # + # @return [ Hash<:timings,:percentiles,:score> ] The test results for + # the requested benchmark. + def run(type, action) + timings = send(action, file_for(type)) + percentiles = Percentiles.new(timings) + score = score_for(type, percentiles) + + Summary.new(timings, percentiles, score) + end + + # Run an encoding BSON benchmark test. + # + # @example Run an encoding test. + # Benchmarking::BSON.encode(file_name) + # + # @param [ String ] file_name The name of the file with data for the test. + # @param [ Integer ] repetitions The number of test repetitions. + # + # @return [ Array ] The list of the results for each iteration + def encode(file_name) + data = Benchmarking.load_file(file_name) + document = ::BSON::Document.new(data.first) + + Benchmarking.benchmark do + 10_000.times { document.to_bson } + end + end + + # Run a decoding BSON benchmark test. + # + # @example Run an decoding test. + # Benchmarking::BSON.decode(file_name) + # + # @param [ String ] file_name The name of the file with data for the test. + # @param [ Integer ] repetitions The number of test repetitions. + # + # @return [ Array ] The list of the results for each iteration + def decode(file_name) + data = Benchmarking.load_file(file_name) + buffer = ::BSON::Document.new(data.first).to_bson + + Benchmarking.benchmark do + 10_000.times do + ::BSON::Document.from_bson(buffer) + buffer.rewind! + end + end + end + + private + + # The path to the source file for the given task type. + # + # @param [ Symbol ] type the task type + # + # @return [ String ] the path to the source file. + def file_for(type) + File.join(Benchmarking::DATA_PATH, "#{type}_bson.json") + end + + # As defined by the spec, the size of a BSON task is the size of the + # file, multipled by the scale (the number of times the file is processed + # per iteration), divided by a million. + # + # "the dataset size for a task is the size of the single-document source + # file...times 10,000 operations" + # + # "Each task will have defined for it an associated size in + # megabytes (MB)" + # + # @param [ Symbol ] type the type of the task + # @param [ Numeric ] scale the number of times the operation is performed + # per iteration (e.g. 10,000) + # + # @return [ Numeric ] the score for the task, reported in MB + def task_size(type, scale) + File.size(file_for(type)) * scale / 1_000_000.0 + end + end + end +end diff --git a/profile/benchmarking/helper.rb b/profile/benchmarking/helper.rb index 11d9f670ab..70d4e34312 100644 --- a/profile/benchmarking/helper.rb +++ b/profile/benchmarking/helper.rb @@ -1,11 +1,8 @@ # frozen_string_literal: true -# rubocop:todo all module Mongo - # Helper functions used by benchmarking tasks module Benchmarking - extend self # Load a json file and represent each document as a Hash. @@ -19,7 +16,7 @@ module Benchmarking # # @since 2.2.3 def load_file(file_name) - File.open(file_name, "r") do |f| + File.open(file_name, 'r') do |f| f.each_line.collect do |line| parse_json(line) end @@ -39,8 +36,74 @@ def load_file(file_name) # @since 2.2.3 def parse_json(document) JSON.parse(document).tap do |doc| - if doc['_id'] && doc['_id']['$oid'] - doc['_id'] = BSON::ObjectId.from_string(doc['_id']['$oid']) + doc['_id'] = ::BSON::ObjectId.from_string(doc['_id']['$oid']) if doc['_id'] && doc['_id']['$oid'] + end + end + + # The spec requires that most benchmarks use a variable number of + # iterations, defined as follows: + # + # * iterations should loop for at least 1 minute cumulative execution + # time + # * iterations should stop after 100 iterations or 5 minutes cumulative + # execution time, whichever is shorter + # + # This method will yield once for each iteration. + # + # @param [ Integer ] max_iterations the maximum number of iterations to + # attempt (default: 100) + # @param [ Integer ] min_time the minimum number of seconds to spend + # iterating + # @param [ Integer ] max_time the maximum number of seconds to spend + # iterating. + # + # @return [ Array ] the timings for each iteration + def benchmark(max_iterations: Benchmarking::TEST_REPETITIONS, + min_time: 60, + max_time: 5 * 60, + progress: default_progress_callback, + &block) + progress ||= ->(state) {} # fallback to a no-op callback + progress[:start] + + [].tap do |results| + iteration_count = 0 + cumulative_time = 0 + + loop do + timing = without_gc { Benchmark.realtime(&block) } + progress[:step] + + iteration_count += 1 + cumulative_time += timing + results.push timing + + # always stop after the maximum time has elapsed, regardless of + # iteration count. + break if cumulative_time > max_time + + # otherwise, break if the minimum time has elapsed, and the maximum + # number of iterations have been reached. + break if cumulative_time >= min_time && iteration_count >= max_iterations + end + + progress[:end] + end + end + + # Formats and displays a report of the given results. + # + # @param [ Hash ] results the results of a benchmarking run. + # @param [ Integer ] indent how much the report should be indented. + # @param [ Array ] percentiles the percentile values to report + def report(results, indent: 0, percentiles: [ 10, 25, 50, 75, 90, 95, 98, 99 ]) + results.each do |key, value| + puts format('%*s%s:', indent, '', key) + + if value.respond_to?(:summary) + puts value.summary(indent + 2, percentiles) + else + report(value, indent: indent + 2, percentiles: percentiles) end end end @@ -50,13 +113,52 @@ def parse_json(document) # @example Get the median. # Benchmarking.median(values) # - # @param [ Array ] The values to get the median of. + # @param [ Array ] values The values to get the median of. # # @return [ Numeric ] The median of the list. - # - # @since 2.2.3 def median(values) - values.sort![values.size/2-1] + i = (values.size / 2) - 1 + values.sort[i] + end + + # Runs a given block with GC disabled. + def without_gc + GC.disable + yield + ensure + GC.enable + end + + private + + # Returns the proc object (or nil) corresponding to the "PROGRESS" + # environment variable. + # + # @return [ Proc | nil ] the callback proc to use (or nil if none should + # be used) + def default_progress_callback + case ENV['PROGRESS'] + when '0', 'false', 'none' + nil + when nil, '1', 'true', 'minimal' + method(:minimal_progress_callback).to_proc + else + raise ArgumentError, "unsupported progress callback #{ENV['PROGRESS'].inspect}" + end + end + + # A minimal progress callback implementation, printing '|' when a benchmark + # starts and '.' for each iteration. + # + # @param [ :start | :step | :end ] state the current progress state + def minimal_progress_callback(state) + case state + when :start then print '|' + when :step then print '.' + when :end then puts + end + + $stdout.flush end end end diff --git a/profile/benchmarking/micro.rb b/profile/benchmarking/micro.rb deleted file mode 100644 index 2f560bb310..0000000000 --- a/profile/benchmarking/micro.rb +++ /dev/null @@ -1,107 +0,0 @@ -# frozen_string_literal: true -# rubocop:todo all - -# Copyright (C) 2015-2020 MongoDB Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -module Mongo - module Benchmarking - - # These tests focus on BSON encoding and decoding; they are client-side only and - # do not involve any transmission of data to or from the server. - # - # @since 2.2.3 - module Micro - - extend self - - # Run a micro benchmark test. - # - # @example Run a test. - # Benchmarking::Micro.run(:flat) - # - # @param [ Symbol ] type The type of test to run. - # @param [ Integer ] repetitions The number of test repetitions. - # - # @return [ Numeric ] The test results. - # - # @since 2.2.3 - def run(type, action, repetitions = Benchmarking::TEST_REPETITIONS) - file_name = type.to_s << "_bson.json" - GC.disable - file_path = [Benchmarking::DATA_PATH, file_name].join('/') - puts "#{action} : #{send(action, file_path, repetitions)}" - end - - # Run an encoding micro benchmark test. - # - # @example Run an encoding test. - # Benchmarking::Micro.encode(file_name) - # - # @param [ String ] file_name The name of the file with data for the test. - # @param [ Integer ] repetitions The number of test repetitions. - # - # @return [ Numeric ] The median of the results. - # - # @since 2.2.3 - def encode(file_name, repetitions) - data = Benchmarking.load_file(file_name) - document = BSON::Document.new(data.first) - - # WARMUP_REPETITIONS.times do - # doc.to_bson - # end - - results = repetitions.times.collect do - Benchmark.realtime do - 10_000.times do - document.to_bson - end - end - end - Benchmarking.median(results) - end - - # Run a decoding micro benchmark test. - # - # @example Run an decoding test. - # Benchmarking::Micro.decode(file_name) - # - # @param [ String ] file_name The name of the file with data for the test. - # @param [ Integer ] repetitions The number of test repetitions. - # - # @return [ Numeric ] The median of the results. - # - # @since 2.2.3 - def decode(file_name, repetitions) - data = Benchmarking.load_file(file_name) - buffer = BSON::Document.new(data.first).to_bson - - # WARMUP_REPETITIONS.times do - # BSON::Document.from_bson(buffers.shift) - # end - - results = repetitions.times.collect do - Benchmark.realtime do - 10_000.times do - BSON::Document.from_bson(buffer) - buffer.rewind! - end - end - end - Benchmarking.median(results) - end - end - end -end diff --git a/profile/benchmarking/percentiles.rb b/profile/benchmarking/percentiles.rb new file mode 100644 index 0000000000..aeebe9d1d9 --- /dev/null +++ b/profile/benchmarking/percentiles.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Mongo + module Benchmarking + # A utility class for returning the list item at a given percentile + # value. + class Percentiles + # @return [ Array ] the sorted list of numbers to consider + attr_reader :list + + # Create a new Percentiles object that encapsulates the given list of + # numbers. + # + # @param [ Array ] list the list of numbers to considier + def initialize(list) + @list = list.sort + end + + # Finds and returns the element in the list that represents the given + # percentile value. + # + # @param [ Number ] percentile a number in the range [1,100] + # + # @return [ Number ] the element of the list for the given percentile. + def [](percentile) + i = (list.size * percentile / 100.0).ceil - 1 + list[i] + end + end + end +end diff --git a/profile/benchmarking/rake/bson.rake b/profile/benchmarking/rake/bson.rake new file mode 100644 index 0000000000..4d5bdb1c04 --- /dev/null +++ b/profile/benchmarking/rake/bson.rake @@ -0,0 +1,122 @@ +# frozen_string_literal: true + +# rubocop:disable Layout/FirstHashElementIndentation + +desc 'Run the full BSON benchmarking suite' +task :bson do + puts 'BSON BENCHMARK SUITE' + Mongo::Benchmarking.report({ + bson: Mongo::Benchmarking::BSON.run_all( + flat: %i[ encode decode ], + deep: %i[ encode decode ], + full: %i[ encode decode ] + ) + }) +end + +namespace :bson do # rubocop:disable Metrics/BlockLength + # a convenience task for running all of the bson benchmark tasks; this is + # only useful for testing that they all work. + task test: %w[ + bson + bson:flat bson:flat:encode bson:flat:decode + bson:deep bson:deep:encode bson:deep:decode + bson:full bson:full:encode bson:full:decode + ] + + desc 'Learn how to run the BSON benchmarks' + task :help do + puts <<~HELP + The BSON micro benchmarks require a set of data files that are stored in + the specifications repository, here: + + https://github.com/mongodb/specifications/tree/master/source/benchmarking/data + + Download the `extended_bson.tgz` file and extract its contents. It should + contain a single folder (`extended_bson`) with several files in it. Move + those files to: + + #{Mongo::Benchmarking::DATA_PATH} + + Once there, you may run any of the BSON benchmarking tasks: + + $ rake benchmark:bson:flat:encode + + Tasks may be run in aggregate, as well, by specifying the namespace + directly: + + $ rake benchmark:bson:flat # runs all flat BSON benchmarks + $ rake benchmark:bson:deep # runs all deep BSON benchmarks + $ rake benchmark:bson:full # runs all full BSON benchmarks + # rake benchmark:bson # runs all BSON benchmarks + HELP + end + + desc 'Run the `flat` BSON benchmarking suite' + task :flat do + puts 'BSON BENCHMARK :: FLAT' + Mongo::Benchmarking.report({ + bson: Mongo::Benchmarking::BSON.run_all(flat: %i[ encode decode ]) + }) + end + + namespace :flat do + desc 'Run the `flat` encoding BSON benchmark' + task :encode do + puts 'BSON BENCHMARK :: FLAT :: ENCODE' + Mongo::Benchmarking.report({ bson: { flat: { encode: Mongo::Benchmarking::BSON.run(:flat, :encode) } } }) + end + + desc 'Run the `flat` decoding BSON benchmark' + task :decode do + puts 'BSON BENCHMARK :: FLAT :: DECODE' + Mongo::Benchmarking.report({ bson: { flat: { decode: Mongo::Benchmarking::BSON.run(:flat, :decode) } } }) + end + end + + desc 'Run the `deep` BSON benchmarking suite' + task :deep do + puts 'BSON BENCHMARK :: DEEP' + Mongo::Benchmarking.report({ + bson: Mongo::Benchmarking::BSON.run_all(deep: %i[ encode decode ]) + }) + end + + namespace :deep do + desc 'Run the `deep` encoding BSON benchmark' + task :encode do + puts 'BSON BENCHMARK :: DEEP :: ENCODE' + Mongo::Benchmarking.report({ bson: { deep: { encode: Mongo::Benchmarking::BSON.run(:deep, :encode) } } }) + end + + desc 'Run the `deep` decoding BSON benchmark' + task :decode do + puts 'BSON BENCHMARK :: DEEP :: DECODE' + Mongo::Benchmarking.report({ bson: { deep: { decode: Mongo::Benchmarking::BSON.run(:deep, :decode) } } }) + end + end + + desc 'Run the `full` BSON benchmarking suite' + task :full do + puts 'BSON BENCHMARK :: FULL' + Mongo::Benchmarking.report({ + bson: Mongo::Benchmarking::BSON.run_all({ full: %i[ encode decode ] }) + }) + end + + namespace :full do + desc 'Run the `full` encoding BSON benchmark' + task :encode do + puts 'BSON BENCHMARK :: FULL :: ENCODE' + Mongo::Benchmarking.report({ bson: { full: { encode: Mongo::Benchmarking::BSON.run(:full, :encode) } } }) + end + + desc 'Run the `full` decoding BSON benchmark' + task :decode do + puts 'BSON BENCHMARK :: FULL :: DECODE' + Mongo::Benchmarking.report({ bson: { full: { decode: Mongo::Benchmarking::BSON.run(:full, :decode) } } }) + end + end +end + +# rubocop:enable Layout/FirstHashElementIndentation diff --git a/profile/benchmarking/rake/multi_doc.rake b/profile/benchmarking/rake/multi_doc.rake new file mode 100644 index 0000000000..86c190ef1f --- /dev/null +++ b/profile/benchmarking/rake/multi_doc.rake @@ -0,0 +1,34 @@ +# frozen_string_literal: true +# rubocop:todo all + +namespace :multi_doc do + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. + task :find_many do + puts 'MULTI DOCUMENT BENCHMARK :: FIND MANY' + Mongo::Benchmarking::MultiDoc.run(:find_many) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. + task :bulk_insert_small do + puts 'MULTI DOCUMENT BENCHMARK :: BULK INSERT SMALL' + Mongo::Benchmarking::MultiDoc.run(:bulk_insert_small) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. + task :bulk_insert_large do + puts 'MULTI DOCUMENT BENCHMARK :: BULK INSERT LARGE' + Mongo::Benchmarking::MultiDoc.run(:bulk_insert_large) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. + task :gridfs_upload do + puts 'MULTI DOCUMENT BENCHMARK :: GRIDFS UPLOAD' + Mongo::Benchmarking::MultiDoc.run(:gridfs_upload) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. + task :gridfs_download do + puts 'MULTI DOCUMENT BENCHMARK :: GRIDFS DOWNLOAD' + Mongo::Benchmarking::MultiDoc.run(:gridfs_download) + end +end diff --git a/profile/benchmarking/rake/parallel.rake b/profile/benchmarking/rake/parallel.rake new file mode 100644 index 0000000000..98752e231e --- /dev/null +++ b/profile/benchmarking/rake/parallel.rake @@ -0,0 +1,36 @@ +# frozen_string_literal: true +# rubocop:todo all + +namespace :parallel do + # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, + # with the files used in this task. + task :import do + puts 'PARALLEL ETL BENCHMARK :: IMPORT' + Mongo::Benchmarking::Parallel.run(:import) + end + + # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, + # with the files used in this task. + # Requirement: Another directory in '#{Mongo::Benchmarking::DATA_PATH}/LDJSON_MULTI' + # called 'output'. + task :export do + puts 'PARALLEL ETL BENCHMARK :: EXPORT' + Mongo::Benchmarking::Parallel.run(:export) + end + + # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, + # with the files used in this task. + task :gridfs_upload do + puts 'PARALLEL ETL BENCHMARK :: GRIDFS UPLOAD' + Mongo::Benchmarking::Parallel.run(:gridfs_upload) + end + + # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, + # with the files used in this task. + # Requirement: Another directory in '#{Mongo::Benchmarking::DATA_PATH}/GRIDFS_MULTI' + # called 'output'. + task :gridfs_download do + puts 'PARALLEL ETL BENCHMARK :: GRIDFS DOWNLOAD' + Mongo::Benchmarking::Parallel.run(:gridfs_download) + end +end diff --git a/profile/benchmarking/rake/single_doc.rake b/profile/benchmarking/rake/single_doc.rake new file mode 100644 index 0000000000..803e28d593 --- /dev/null +++ b/profile/benchmarking/rake/single_doc.rake @@ -0,0 +1,27 @@ +# frozen_string_literal: true +# rubocop:todo all + +namespace :single_doc do + task :command do + puts 'SINGLE DOC BENCHMARK :: COMMAND' + Mongo::Benchmarking::SingleDoc.run(:command) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. + task :find_one do + puts 'SINGLE DOC BENCHMARK :: FIND ONE BY ID' + Mongo::Benchmarking::SingleDoc.run(:find_one) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. + task :insert_one_small do + puts 'SINGLE DOC BENCHMARK :: INSERT ONE SMALL DOCUMENT' + Mongo::Benchmarking::SingleDoc.run(:insert_one_small) + end + + # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. + task :insert_one_large do + puts 'SINGLE DOC BENCHMARK :: INSERT ONE LARGE DOCUMENT' + Mongo::Benchmarking::SingleDoc.run(:insert_one_large) + end +end diff --git a/profile/benchmarking/rake/tasks.rake b/profile/benchmarking/rake/tasks.rake new file mode 100644 index 0000000000..7feae8d2ab --- /dev/null +++ b/profile/benchmarking/rake/tasks.rake @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative '../../benchmarking' + +# Some require data files, available from the drivers team. +# See the comments above each task for details. +namespace :benchmark do + %w[ bson single_doc multi_doc parallel ].each do |group| + load File.join(__dir__, "#{group}.rake") + end +end diff --git a/profile/benchmarking/summary.rb b/profile/benchmarking/summary.rb new file mode 100644 index 0000000000..93fddf5435 --- /dev/null +++ b/profile/benchmarking/summary.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module Mongo + module Benchmarking + # A utility class for encapsulating the summary information for a + # benchmark, including behaviors for reporting on the summary. + class Summary + # @return [ Array ] the timings of each iteration in the + # benchmark + attr_reader :timings + + # @return [ Percentiles ] the percentiles object for querying the + # timing at a given percentile value. + attr_reader :percentiles + + # @return [ Numeric ] the composite score for the benchmark + attr_reader :score + + # Construct a new Summary object with the given timings, percentiles, + # and score. + # + # @param [ Array ] timings the timings of each iteration in the + # benchmark + # @param [ Percentiles ] percentiles the percentiles object for querying + # the timing at a given percentile value + # @param [ Numeric ] score the composite score for the benchmark + def initialize(timings, percentiles, score) + @timings = timings + @percentiles = percentiles + @score = score + end + + # @return [ Numeric ] the median timing for the benchmark. + def median + percentiles[50] + end + + # Formats and displays the results of a single benchmark run. + # + # @param [ Integer ] indent how much the report should be indented + # @param [ Array ] points the percentile points to report + # + # @return [ String ] a YAML-formatted summary + def summary(indent, points) + [].tap do |lines| + lines << format('%*sscore: %g', indent, '', score) + lines << format('%*smedian: %g', indent, '', median) + lines << format('%*spercentiles:', indent, '') + points.each do |pct| + lines << format('%*s%g: %g', indent + 2, '', pct, percentiles[pct]) + end + end.join("\n") + end + end + end +end diff --git a/spec/faas/ruby-sam-app/.gitignore b/spec/faas/ruby-sam-app/.gitignore new file mode 100644 index 0000000000..4bccb52c85 --- /dev/null +++ b/spec/faas/ruby-sam-app/.gitignore @@ -0,0 +1,345 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/osx,linux,python,windows,pycharm,visualstudiocode,sam +# Edit at https://www.toptal.com/developers/gitignore?templates=osx,linux,python,windows,pycharm,visualstudiocode,sam + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### OSX ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +pytestdebug.log + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +doc/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +# .env +.env/ +.venv/ +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pythonenv* + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# operating system-related files +# file properties cache/storage on macOS +*.DS_Store +# thumbnail cache on Windows +Thumbs.db + +# profiling data +.prof + + +### SAM ### +# Ignore build directories for the AWS Serverless Application Model (SAM) +# Info: https://aws.amazon.com/serverless/sam/ +# Docs: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-reference.html + +**/.aws-sam + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +### Windows ### +# Windows thumbnail cache files +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.toptal.com/developers/gitignore/api/osx,linux,python,windows,pycharm,visualstudiocode,sam diff --git a/spec/faas/ruby-sam-app/Gemfile b/spec/faas/ruby-sam-app/Gemfile new file mode 100644 index 0000000000..64004bea4c --- /dev/null +++ b/spec/faas/ruby-sam-app/Gemfile @@ -0,0 +1,9 @@ +source "https://rubygems.org" + +gem "httparty" +gem "mongo" + +group :test do + gem "test-unit" + gem "mocha" +end diff --git a/spec/faas/ruby-sam-app/mongodb/Gemfile b/spec/faas/ruby-sam-app/mongodb/Gemfile new file mode 100644 index 0000000000..8eee1f8e03 --- /dev/null +++ b/spec/faas/ruby-sam-app/mongodb/Gemfile @@ -0,0 +1,4 @@ +source "https://rubygems.org" + +gem "httparty" +gem "mongo" diff --git a/spec/faas/ruby-sam-app/mongodb/app.rb b/spec/faas/ruby-sam-app/mongodb/app.rb new file mode 100644 index 0000000000..0eb7e9e46a --- /dev/null +++ b/spec/faas/ruby-sam-app/mongodb/app.rb @@ -0,0 +1,149 @@ +# frozen_string_literal: true + +require 'mongo' +require 'json' + +class StatsAggregator + + def initialize + @open_connections = 0 + @heartbeats_count = 0 + @total_heartbeat_time = 0 + @commands_count = 0 + @total_command_time = 0 + end + + def add_command(duration) + @commands_count += 1 + @total_command_time += duration + end + + def add_heartbeat(duration) + @heartbeats_count += 1 + @total_heartbeat_time += duration + end + + def add_connection + @open_connections += 1 + end + + def remove_connection + @open_connections -= 1 + end + + def average_heartbeat_time + if @heartbeats_count == 0 + 0 + else + @total_heartbeat_time / @heartbeats_count + end + end + + def average_command_time + if @commands_count == 0 + 0 + else + @total_command_time / @commands_count + end + end + + def reset + @open_connections = 0 + @heartbeats_count = 0 + @total_heartbeat_time = 0 + @commands_count = 0 + @total_command_time = 0 + end + + def result + { + average_heartbeat_time: average_heartbeat_time, + average_command_time: average_command_time, + heartbeats_count: @heartbeats_count, + open_connections: @open_connections, + } + end +end + +class CommandMonitor + + def initialize(stats_aggregator) + @stats_aggregator = stats_aggregator + end + + def started(event); end + + def failed(event) + @stats_aggregator.add_command(event.duration) + end + + def succeeded(event) + @stats_aggregator.add_command(event.duration) + end +end + +class HeartbeatMonitor + + def initialize(stats_aggregator) + @stats_aggregator = stats_aggregator + end + + def started(event); end + + def succeeded(event) + @stats_aggregator.add_heartbeat(event.duration) + end + + def failed(event) + @stats_aggregator.add_heartbeat(event.duration) + end +end + +class PoolMonitor + + def initialize(stats_aggregator) + @stats_aggregator = stats_aggregator + end + + def published(event) + case event + when Mongo::Monitoring::Event::Cmap::ConnectionCreated + @stats_aggregator.add_connection + when Mongo::Monitoring::Event::Cmap::ConnectionClosed + @stats_aggregator.remove_connection + end + end +end + +$stats_aggregator = StatsAggregator.new + +command_monitor = CommandMonitor.new($stats_aggregator) +heartbeat_monitor = HeartbeatMonitor.new($stats_aggregator) +pool_monitor = PoolMonitor.new($stats_aggregator) + +sdam_proc = proc do |client| + client.subscribe(Mongo::Monitoring::COMMAND, command_monitor) + client.subscribe(Mongo::Monitoring::SERVER_HEARTBEAT, heartbeat_monitor) + client.subscribe(Mongo::Monitoring::CONNECTION_POOL, pool_monitor) +end + +puts 'Connecting' +$client = Mongo::Client.new(ENV['MONGODB_URI'], sdam_proc: sdam_proc) +# Populate the connection pool +$client.use('lambda_test').database.list_collections +puts 'Connected' + +def lambda_handler(event:, context:) + db = $client.use('lambda_test') + collection = db[:test_collection] + result = collection.insert_one({ name: 'test' }) + collection.delete_one({ _id: result.inserted_id }) + response = $stats_aggregator.result.to_json + $stats_aggregator.reset + puts "Response: #{response}" + + { + statusCode: 200, + body: response + } +end diff --git a/spec/faas/ruby-sam-app/template.yaml b/spec/faas/ruby-sam-app/template.yaml new file mode 100644 index 0000000000..c42df95e3b --- /dev/null +++ b/spec/faas/ruby-sam-app/template.yaml @@ -0,0 +1,48 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + Sample SAM Template for ruby-sam-app + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 30 + MemorySize: 128 + +Parameters: + MongoDbUri: + Type: String + Description: The MongoDB connection string. + +Resources: + MongoDBFunction: + Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction + Properties: + CodeUri: mongodb/ + Environment: + Variables: + MONGODB_URI: !Ref MongoDbUri + Handler: app.lambda_handler + Runtime: ruby3.2 + Architectures: + - x86_64 + Events: + MongoDB: + Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api + Properties: + Path: /mongodb + Method: get + +Outputs: + # ServerlessRestApi is an implicit API created out of Events key under Serverless::Function + # Find out more about other implicit resources you can reference within SAM + # https://github.com/awslabs/serverless-application-model/blob/master/docs/internals/generated_resources.rst#api + MongoDBApi: + Description: "API Gateway endpoint URL for Prod stage for MongoDB function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mongodb/" + MongoDBFunction: + Description: "MongoDB Lambda Function ARN" + Value: !GetAtt MongoDBFunction.Arn + MongoDBFunctionIamRole: + Description: "Implicit IAM Role created for MongoDB function" + Value: !GetAtt MongoDBFunctionRole.Arn diff --git a/spec/integration/find_options_spec.rb b/spec/integration/find_options_spec.rb index 519a164b70..e5a822a3ea 100644 --- a/spec/integration/find_options_spec.rb +++ b/spec/integration/find_options_spec.rb @@ -13,20 +13,10 @@ [ SpecConfig.instance.addresses.first ] end - let(:client_options) do - {} - end - - let(:collection_options) do - {} - end - let(:client) do ClientRegistry.instance.new_local_client( seeds, - SpecConfig.instance.test_options - .merge(database: SpecConfig.instance.test_db) - .merge(client_options) + SpecConfig.instance.test_options.merge(client_options) ).tap do |client| client.subscribe(Mongo::Monitoring::COMMAND, subscriber) end @@ -40,11 +30,8 @@ subscriber.started_events.find { |cmd| cmd.command_name == 'find' } end - let(:should_create_collection) { true } - before do - client['find_options'].drop - collection.create if should_create_collection + ClientRegistry.instance.global_client('authorized')['find_options'].drop collection.insert_many([ { a: 1 }, { a: 2 }, { a: 3 } ]) end @@ -84,8 +71,6 @@ { 'locale' => 'de_AT' } end - let(:should_create_collection) { false } - it 'uses the collation defined on the collection' do collection.find({}, collation: collation).to_a expect(find_command.command['collation']).to eq(collation) @@ -202,26 +187,4 @@ end end end - - describe 'cursor type' do - let(:collection_options) do - { capped: true, size: 1000 } - end - - context 'when cursor type is :tailable' do - it 'sets the cursor type to tailable' do - collection.find({}, cursor_type: :tailable).first - expect(find_command.command['tailable']).to be true - expect(find_command.command['awaitData']).to be_falsey - end - end - - context 'when cursor type is :tailable_await' do - it 'sets the cursor type to tailable' do - collection.find({}, cursor_type: :tailable_await).first - expect(find_command.command['tailable']).to be true - expect(find_command.command['awaitData']).to be true - end - end - end end diff --git a/spec/integration/retryable_reads_errors_spec.rb b/spec/integration/retryable_reads_errors_spec.rb index 4d662c4bf5..81402c8f4d 100644 --- a/spec/integration/retryable_reads_errors_spec.rb +++ b/spec/integration/retryable_reads_errors_spec.rb @@ -20,14 +20,14 @@ let(:failpoint) do { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: [ "find" ], - errorCode: 91, - blockConnection: true, - blockTimeMS: 1000 - } + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: [ "find" ], + errorCode: 91, + blockConnection: true, + blockTimeMS: 1000 + } } end @@ -107,4 +107,157 @@ }) end end + + context 'Retries in a sharded cluster' do + require_topology :sharded + min_server_version '4.2' + require_no_auth + + let(:subscriber) { Mrss::EventSubscriber.new } + + let(:find_started_events) do + subscriber.started_events.select { |e| e.command_name == "find" } + end + + let(:find_failed_events) do + subscriber.failed_events.select { |e| e.command_name == "find" } + end + + let(:find_succeeded_events) do + subscriber.succeeded_events.select { |e| e.command_name == "find" } + end + + context 'when another mongos is available' do + + let(:first_mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.first], + direct_connection: true, + database: 'admin' + ) + end + + let(:second_mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.last], + direct_connection: false, + database: 'admin' + ) + end + + let(:client) do + new_local_client( + [ + SpecConfig.instance.addresses.first, + SpecConfig.instance.addresses.last, + ], + SpecConfig.instance.test_options.merge(retry_reads: true) + ) + end + + let(:expected_servers) do + [ + SpecConfig.instance.addresses.first.to_s, + SpecConfig.instance.addresses.last.to_s + ].sort + end + + before do + skip 'This test requires at least two mongos' if SpecConfig.instance.addresses.length < 2 + + first_mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(find), + closeConnection: false, + errorCode: 6 + } + ) + + second_mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(find), + closeConnection: false, + errorCode: 6 + } + ) + end + + after do + [first_mongos, second_mongos].each do |admin_client| + admin_client.database.command( + configureFailPoint: 'failCommand', + mode: 'off' + ) + admin_client.close + end + client.close + end + + it 'retries on different mongos' do + client.subscribe(Mongo::Monitoring::COMMAND, subscriber) + expect { collection.find.first }.to raise_error(Mongo::Error::OperationFailure) + expect(find_started_events.map { |e| e.address.to_s }.sort).to eq(expected_servers) + expect(find_failed_events.map { |e| e.address.to_s }.sort).to eq(expected_servers) + end + end + + context 'when no other mongos is available' do + let(:mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.first], + direct_connection: true, + database: 'admin' + ) + end + + let(:client) do + new_local_client( + [ + SpecConfig.instance.addresses.first + ], + SpecConfig.instance.test_options.merge(retry_reads: true) + ) + end + + before do + mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(find), + closeConnection: false, + errorCode: 6 + } + ) + end + + after do + mongos.database.command( + configureFailPoint: 'failCommand', + mode: 'off' + ) + mongos.close + client.close + end + + it 'retries on the same mongos' do + client.subscribe(Mongo::Monitoring::COMMAND, subscriber) + expect { collection.find.first }.not_to raise_error + expect(find_started_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s, + SpecConfig.instance.addresses.first.to_s + ]) + expect(find_failed_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s + ]) + expect(find_succeeded_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s + ]) + end + end + end end diff --git a/spec/integration/retryable_writes_errors_spec.rb b/spec/integration/retryable_writes_errors_spec.rb index 2769089b11..25b5cba8aa 100644 --- a/spec/integration/retryable_writes_errors_spec.rb +++ b/spec/integration/retryable_writes_errors_spec.rb @@ -189,4 +189,160 @@ }) end end + + context 'Retries in a sharded cluster' do + require_topology :sharded + min_server_version '4.2' + require_no_auth + + let(:subscriber) { Mrss::EventSubscriber.new } + + let(:insert_started_events) do + subscriber.started_events.select { |e| e.command_name == "insert" } + end + + let(:insert_failed_events) do + subscriber.failed_events.select { |e| e.command_name == "insert" } + end + + let(:insert_succeeded_events) do + subscriber.succeeded_events.select { |e| e.command_name == "insert" } + end + + context 'when another mongos is available' do + + let(:first_mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.first], + direct_connection: true, + database: 'admin' + ) + end + + let(:second_mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.last], + direct_connection: false, + database: 'admin' + ) + end + + let(:client) do + new_local_client( + [ + SpecConfig.instance.addresses.first, + SpecConfig.instance.addresses.last, + ], + SpecConfig.instance.test_options.merge(retry_writes: true) + ) + end + + let(:expected_servers) do + [ + SpecConfig.instance.addresses.first.to_s, + SpecConfig.instance.addresses.last.to_s + ].sort + end + + before do + skip 'This test requires at least two mongos' if SpecConfig.instance.addresses.length < 2 + + first_mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(insert), + closeConnection: false, + errorCode: 6, + errorLabels: ['RetryableWriteError'] + } + ) + + second_mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(insert), + closeConnection: false, + errorCode: 6, + errorLabels: ['RetryableWriteError'] + } + ) + end + + after do + [first_mongos, second_mongos].each do |admin_client| + admin_client.database.command( + configureFailPoint: 'failCommand', + mode: 'off' + ) + admin_client.close + end + client.close + end + + it 'retries on different mongos' do + client.subscribe(Mongo::Monitoring::COMMAND, subscriber) + expect { collection.insert_one(x: 1) }.to raise_error(Mongo::Error::OperationFailure) + expect(insert_started_events.map { |e| e.address.to_s }.sort).to eq(expected_servers) + expect(insert_failed_events.map { |e| e.address.to_s }.sort).to eq(expected_servers) + end + end + + context 'when no other mongos is available' do + let(:mongos) do + Mongo::Client.new( + [SpecConfig.instance.addresses.first], + direct_connection: true, + database: 'admin' + ) + end + + let(:client) do + new_local_client( + [ + SpecConfig.instance.addresses.first + ], + SpecConfig.instance.test_options.merge(retry_writes: true) + ) + end + + before do + mongos.database.command( + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: %w(insert), + closeConnection: false, + errorCode: 6, + errorLabels: ['RetryableWriteError'] + } + ) + end + + after do + mongos.database.command( + configureFailPoint: 'failCommand', + mode: 'off' + ) + mongos.close + client.close + end + + it 'retries on the same mongos' do + client.subscribe(Mongo::Monitoring::COMMAND, subscriber) + expect { collection.insert_one(x: 1) }.not_to raise_error + expect(insert_started_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s, + SpecConfig.instance.addresses.first.to_s + ]) + expect(insert_failed_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s + ]) + expect(insert_succeeded_events.map { |e| e.address.to_s }.sort).to eq([ + SpecConfig.instance.addresses.first.to_s + ]) + end + end + end end diff --git a/spec/mongo/cluster_spec.rb b/spec/mongo/cluster_spec.rb index b93f529a0e..b87733f261 100644 --- a/spec/mongo/cluster_spec.rb +++ b/spec/mongo/cluster_spec.rb @@ -1,6 +1,7 @@ # frozen_string_literal: true require 'spec_helper' +require 'support/recording_logger' # let these existing styles stand, rather than going in for a deep refactoring # of these specs. @@ -84,6 +85,41 @@ ) end end + + context 'when a non-genuine host is detected' do + before { described_class.new(host_names, monitoring, logger: logger, monitoring_io: false) } + + let(:logger) { RecordingLogger.new } + + shared_examples 'an action that logs' do + it 'writes a warning to the log' do + expect(logger.lines).to include(a_string_matching(expected_log_output)) + end + end + + context 'when CosmosDB is detected' do + let(:host_names) { %w[ xyz.cosmos.azure.com ] } + let(:expected_log_output) { %r{https://www.mongodb.com/supportability/cosmosdb} } + + it_behaves_like 'an action that logs' + end + + context 'when DocumentDB is detected' do + let(:expected_log_output) { %r{https://www.mongodb.com/supportability/documentdb} } + + context 'with docdb uri' do + let(:host_names) { [ 'xyz.docdb.amazonaws.com' ] } + + it_behaves_like 'an action that logs' + end + + context 'with docdb-elastic uri' do + let(:host_names) { [ 'xyz.docdb-elastic.amazonaws.com' ] } + + it_behaves_like 'an action that logs' + end + end + end end describe '#==' do diff --git a/spec/mongo/collection/view/aggregation_spec.rb b/spec/mongo/collection/view/aggregation_spec.rb index 7657460697..da363d9992 100644 --- a/spec/mongo/collection/view/aggregation_spec.rb +++ b/spec/mongo/collection/view/aggregation_spec.rb @@ -321,7 +321,12 @@ min_server_fcv '4.2' let(:result) do - aggregation.explain['queryPlanner']['collation']['locale'] + if aggregation.explain.key?('queryPlanner') + aggregation.explain['queryPlanner']['collation']['locale'] + else + # 7.2+ sharded cluster + aggregation.explain['shards'].first.last['queryPlanner']['collation']['locale'] + end end it_behaves_like 'applies the collation' diff --git a/spec/mongo/collection_crud_spec.rb b/spec/mongo/collection_crud_spec.rb index 2f26179ffc..7a5651d1d6 100644 --- a/spec/mongo/collection_crud_spec.rb +++ b/spec/mongo/collection_crud_spec.rb @@ -2491,7 +2491,7 @@ def generate end let(:updated) do - authorized_collection.find.to_a.last + authorized_collection.find.sort(_id: 1).to_a.last end it 'reports that a document was written' do diff --git a/spec/mongo/operation/insert_spec.rb b/spec/mongo/operation/insert_spec.rb index 2c15d0fef6..2a01fa6e17 100644 --- a/spec/mongo/operation/insert_spec.rb +++ b/spec/mongo/operation/insert_spec.rb @@ -177,7 +177,7 @@ end it 'inserts the documents into the collection' do - expect(authorized_collection.find.to_a). to eq(documents) + expect(authorized_collection.find.sort(_id: 1).to_a). to eq(documents) end end diff --git a/spec/mongo/retryable/write_worker_spec.rb b/spec/mongo/retryable/write_worker_spec.rb new file mode 100644 index 0000000000..db0cd9ffaa --- /dev/null +++ b/spec/mongo/retryable/write_worker_spec.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Mongo::Retryable::WriteWorker do + describe '#nro_write_with_retry' do + context 'when session is nil' do + let(:retryable) do + authorized_client['write_worker_test'] + end + + let(:write_concern) do + Mongo::WriteConcern.get(w: 0) + end + + let(:write_worker) do + described_class.new(retryable) + end + + let(:context) do + instance_double(Mongo::Operation::Context).tap do |context| + allow(context).to receive(:session).and_return(nil) + end + end + + before do + # We avoid actual execution of the operation to speed up and simplify + # the spec. + allow(write_worker).to receive(:legacy_write_with_retry).and_return(nil) + end + + it 'does not raise' do + expect do + write_worker.nro_write_with_retry(write_concern, context: context) + end.not_to raise_error + end + end + end +end diff --git a/spec/mongo/server/app_metadata/environment_spec.rb b/spec/mongo/server/app_metadata/environment_spec.rb index b63e174e16..0b0b844b59 100644 --- a/spec/mongo/server/app_metadata/environment_spec.rb +++ b/spec/mongo/server/app_metadata/environment_spec.rb @@ -1,8 +1,52 @@ # frozen_string_literal: true +# rubocop:todo all require 'spec_helper' +require 'fileutils' + +MOCKED_DOCKERENV_PATH = File.expand_path(File.join(Dir.pwd, '.dockerenv-mocked')) + +module ContainerChecking + def mock_dockerenv_path + before do + allow_any_instance_of(Mongo::Server::AppMetadata::Environment) + .to receive(:dockerenv_path) + .and_return(MOCKED_DOCKERENV_PATH) + end + end + + def with_docker + mock_dockerenv_path + + around do |example| + File.write(MOCKED_DOCKERENV_PATH, 'placeholder') + example.run + ensure + File.delete(MOCKED_DOCKERENV_PATH) + end + end + + def without_docker + mock_dockerenv_path + + around do |example| + FileUtils.rm_f(MOCKED_DOCKERENV_PATH) + example.run + end + end + + def with_kubernetes + local_env 'KUBERNETES_SERVICE_HOST' => 'kubernetes.default.svc.cluster.local' + end + + def without_kubernetes + local_env 'KUBERNETES_SERVICE_HOST' => nil + end +end describe Mongo::Server::AppMetadata::Environment do + extend ContainerChecking + let(:env) { described_class.new } shared_examples_for 'running in a FaaS environment' do @@ -17,6 +61,36 @@ end end + shared_examples_for 'not running in a Docker container' do + it 'does not detect Docker' do + expect(env.container || {}).not_to include :runtime + end + end + + shared_examples_for 'not running under Kubernetes' do + it 'does not detect Kubernetes' do + expect(env.container || {}).not_to include :orchestrator + end + end + + shared_examples_for 'running under Kubernetes' do + it 'detects that Kubernetes is present' do + expect(env.container[:orchestrator]).to be == 'kubernetes' + end + end + + shared_examples_for 'running in a Docker container' do + it 'detects that Docker is present' do + expect(env.container[:runtime]).to be == 'docker' + end + end + + shared_examples_for 'running under Kerbenetes' do + it 'detects that kubernetes is present' do + expect(env.container['orchestrator']).to be == 'kubernetes' + end + end + context 'when run outside of a FaaS environment' do it_behaves_like 'running outside a FaaS environment' end @@ -204,6 +278,67 @@ timeout_sec: 60, region: 'us-central1', } end + + context 'when a container is present' do + with_kubernetes + with_docker + + it 'includes a container key' do + expect(env.to_h[:container]).to be == { + runtime: 'docker', + orchestrator: 'kubernetes' + } + end + end + + context 'when no container is present' do + without_kubernetes + without_docker + + it 'does not include a container key' do + expect(env.to_h).not_to include(:container) + end + end + end + end + + # have a specific test for this, since the tests that check + # for Docker use a mocked value for the .dockerenv path. + it 'should look for dockerenv in root directory' do + expect(described_class::DOCKERENV_PATH).to be == '/.dockerenv' + end + + context 'when no container is present' do + without_kubernetes + without_docker + + it_behaves_like 'not running in a Docker container' + it_behaves_like 'not running under Kubernetes' + end + + context 'when container is present' do + context 'when kubernetes is present' do + without_docker + with_kubernetes + + it_behaves_like 'not running in a Docker container' + it_behaves_like 'running under Kubernetes' + end + + context 'when docker is present' do + with_docker + without_kubernetes + + it_behaves_like 'running in a Docker container' + it_behaves_like 'not running under Kubernetes' + end + + context 'when both kubernetes and docker are present' do + with_docker + with_kubernetes + + it_behaves_like 'running in a Docker container' + it_behaves_like 'running under Kubernetes' end end end diff --git a/spec/mongo/server/app_metadata_spec.rb b/spec/mongo/server/app_metadata_spec.rb index 47da5fa8bb..a3b8619b63 100644 --- a/spec/mongo/server/app_metadata_spec.rb +++ b/spec/mongo/server/app_metadata_spec.rb @@ -87,8 +87,18 @@ end context 'when run outside of a FaaS environment' do - it 'excludes the :env key from the client document' do - expect(app_metadata.client_document.key?(:env)).to be false + context 'when a container is present' do + local_env 'KUBERNETES_SERVICE_HOST' => 'something' + + it 'includes the :env key in the client document' do + expect(app_metadata.client_document.key?(:env)).to be true + end + end + + context 'when no container is present' do + it 'excludes the :env key from the client document' do + expect(app_metadata.client_document.key?(:env)).to be false + end end end diff --git a/spec/mongo/server/connection_spec.rb b/spec/mongo/server/connection_spec.rb index f1e0dabc56..a1acf2f69c 100644 --- a/spec/mongo/server/connection_spec.rb +++ b/spec/mongo/server/connection_spec.rb @@ -265,6 +265,10 @@ class ConnectionSpecTestException < Exception; end context 'when #authenticate! raises an exception' do require_auth + # because the mock/stub flow here doesn't cover the flow used by + # the X.509 authentication mechanism... + forbid_x509_auth + let(:server_options) do Mongo::Client.canonicalize_ruby_options( SpecConfig.instance.all_test_options, diff --git a/spec/mongo/session/session_pool_spec.rb b/spec/mongo/session/session_pool_spec.rb index 1dedebed25..caac0117be 100644 --- a/spec/mongo/session/session_pool_spec.rb +++ b/spec/mongo/session/session_pool_spec.rb @@ -17,21 +17,6 @@ end end - describe '.create' do - - let!(:pool) do - described_class.create(cluster) - end - - it 'creates a session pool' do - expect(pool).to be_a(Mongo::Session::SessionPool) - end - - it 'adds the pool as an instance variable on the cluster' do - expect(cluster.session_pool).to eq(pool) - end - end - describe '#initialize' do let(:pool) do @@ -181,7 +166,7 @@ describe '#end_sessions' do let(:pool) do - described_class.create(client.cluster) + client.cluster.session_pool end let!(:session_a) do diff --git a/spec/mongo/session_transaction_spec.rb b/spec/mongo/session_transaction_spec.rb index 851b08433f..37fcc92f08 100644 --- a/spec/mongo/session_transaction_spec.rb +++ b/spec/mongo/session_transaction_spec.rb @@ -26,6 +26,17 @@ class SessionTransactionSpecError < StandardError; end collection.delete_many end + describe 'start_transaction' do + context 'when topology is sharded and server is < 4.2' do + max_server_fcv '4.1' + require_topology :sharded + + it 'raises an error' do + expect { session.start_transaction }.to raise_error(Mongo::Error::TransactionsNotSupported, /sharded transactions require server version/) + end + end + end + describe '#abort_transaction' do require_topology :replica_set @@ -75,6 +86,8 @@ class SessionTransactionSpecError < StandardError; end end describe '#with_transaction' do + require_topology :replica_set + context 'callback successful' do it 'commits' do session.with_transaction do @@ -123,6 +136,7 @@ class SessionTransactionSpecError < StandardError; end expect(Mongo::Utils).to receive(:monotonic_time).ordered.and_return(start + 1) expect(Mongo::Utils).to receive(:monotonic_time).ordered.and_return(start + 2) expect(Mongo::Utils).to receive(:monotonic_time).ordered.and_return(start + 200) + allow(session).to receive('check_transactions_supported!').and_return true expect do session.with_transaction do @@ -156,6 +170,7 @@ class SessionTransactionSpecError < StandardError; end expect(Mongo::Utils).to receive(:monotonic_time).ordered.and_return(start + i) end expect(Mongo::Utils).to receive(:monotonic_time).ordered.and_return(start + 200) + allow(session).to receive('check_transactions_supported!').and_return true exc = Mongo::Error::OperationFailure.new('timeout test') exc.add_label(label) diff --git a/spec/mongo/uri_spec.rb b/spec/mongo/uri_spec.rb index f2e2e55e4f..0fffa6ef62 100644 --- a/spec/mongo/uri_spec.rb +++ b/spec/mongo/uri_spec.rb @@ -266,15 +266,6 @@ end end - context 'no slash after hosts, and options' do - - let(:string) { 'mongodb://example.com?tls=true' } - - it 'raises an error' do - expect { uri }.to raise_error(Mongo::Error::InvalidURI, %r,MongoDB URI must have a slash \(/\) after the hosts if options are given,) - end - end - context 'mongodb://example.com/?w' do let(:string) { 'mongodb://example.com/?w' } diff --git a/spec/runners/crud.rb b/spec/runners/crud.rb index 765597bac0..b0a76b5aca 100644 --- a/spec/runners/crud.rb +++ b/spec/runners/crud.rb @@ -26,7 +26,7 @@ require 'runners/crud/verifier' def collection_data(collection) - collection.find.to_a + collection.find.sort(_id: 1).to_a end def crud_execute_operations(spec, test, num_ops, event_subscriber, expect_error, diff --git a/spec/runners/crud/test.rb b/spec/runners/crud/test.rb index 57793e23c6..ae210b584f 100644 --- a/spec/runners/crud/test.rb +++ b/spec/runners/crud/test.rb @@ -110,14 +110,6 @@ def setup_test(spec, client) end setup_fail_point(client) end - - def actual_collection_contents(client) - unless @spec.collection_name - raise ArgumentError, 'Spec does not specify a global collection' - end - - client[@spec.collection_name, read_concern: {level: :majority}].find.to_a - end end end end diff --git a/spec/runners/unified/assertions.rb b/spec/runners/unified/assertions.rb index 722b11ad83..908fe80fbb 100644 --- a/spec/runners/unified/assertions.rb +++ b/spec/runners/unified/assertions.rb @@ -251,11 +251,11 @@ def assert_matches(actual, expected, msg) end when Hash if expected.keys == %w($$unsetOrMatches) && expected.values.first.keys == %w(insertedId) - actual_v = actual.inserted_id + actual_v = get_actual_value(actual, 'inserted_id') expected_v = expected.values.first.values.first assert_value_matches(actual_v, expected_v, 'inserted_id') elsif expected.keys == %w(insertedId) - actual_v = actual.inserted_id + actual_v = get_actual_value(actual, 'inserted_id') expected_v = expected.values.first assert_value_matches(actual_v, expected_v, 'inserted_id') else @@ -270,7 +270,7 @@ def assert_matches(actual, expected, msg) if k.start_with?('$$') assert_value_matches(actual, expected, k) else - actual_v = actual[k] + actual_v = get_actual_value(actual, k) if Hash === expected_v && expected_v.length == 1 && expected_v.keys.first.start_with?('$$') assert_value_matches(actual_v, expected_v, k) else @@ -290,6 +290,19 @@ def assert_matches(actual, expected, msg) end end + # The actual value may be of different types depending on the operation. + # In order to avoid having to write a lot of code to handle the different + # types, we use this method to get the actual value. + def get_actual_value(actual, key) + if Hash === actual + actual[key] + elsif Mongo::Operation::Result === actual && !actual.respond_to?(key.to_sym) + actual.documents.first[key] + else + actual.send(key) + end + end + def assert_type(object, type) ok = [*type].reduce(false) { |acc, x| acc || type_matches?(object, x) } diff --git a/spec/runners/unified/crud_operations.rb b/spec/runners/unified/crud_operations.rb index 9274bf62df..69e35513a8 100644 --- a/spec/runners/unified/crud_operations.rb +++ b/spec/runners/unified/crud_operations.rb @@ -32,6 +32,18 @@ def get_find_view(op) if session = args.use('session') opts[:session] = entities.get(:session, session) end + if collation = args.use('collation') + opts[:collation] = collation + end + if args.key?('noCursorTimeout') + opts[:no_cursor_timeout] = args.use('noCursorTimeout') + end + if args.key?('oplogReplay') + opts[:oplog_replay] = args.use('oplogReplay') + end + if args.key?('allowPartialResults') + opts[:allow_partial_results] = args.use('allowPartialResults') + end req = collection.find(args.use!('filter'), **opts) if batch_size = args.use('batchSize') req = req.batch_size(batch_size) diff --git a/spec/runners/unified/support_operations.rb b/spec/runners/unified/support_operations.rb index f8b4e53af3..a99d310958 100644 --- a/spec/runners/unified/support_operations.rb +++ b/spec/runners/unified/support_operations.rb @@ -70,8 +70,7 @@ def assert_session_dirty(op) consume_test_runner(op) use_arguments(op) do |args| session = entities.get(:session, args.use!('session')) - # https://jira.mongodb.org/browse/RUBY-1813 - true + session.dirty? || raise(Error::ResultMismatch, 'expected session to be dirty') end end @@ -79,8 +78,7 @@ def assert_session_not_dirty(op) consume_test_runner(op) use_arguments(op) do |args| session = entities.get(:session, args.use!('session')) - # https://jira.mongodb.org/browse/RUBY-1813 - true + session.dirty? && raise(Error::ResultMismatch, 'expected session to be not dirty') end end @@ -92,7 +90,7 @@ def assert_same_lsid_on_last_two_commands(op, expected: true) unless subscriber.started_events.length >= 2 raise Error::ResultMismatch, "Must have at least 2 events, have #{subscriber.started_events.length}" end - lsids = subscriber.started_events[-2...-1].map do |cmd| + lsids = subscriber.started_events[-2..-1].map do |cmd| cmd.command.fetch('lsid') end if expected diff --git a/spec/shared b/spec/shared index ce1f8945bd..bd968a969a 160000 --- a/spec/shared +++ b/spec/shared @@ -1 +1 @@ -Subproject commit ce1f8945bd61e614ca4cc7a24d95d16071c46a59 +Subproject commit bd968a969aed7ae4d579855f2ce5b3e3201444a4 diff --git a/spec/spec_tests/data/client_side_encryption/explain.yml b/spec/spec_tests/data/client_side_encryption/explain.yml index c0dd9c57c0..5d1874565b 100644 --- a/spec/spec_tests/data/client_side_encryption/explain.yml +++ b/spec/spec_tests/data/client_side_encryption/explain.yml @@ -1,5 +1,5 @@ runOn: - - minServerVersion: "4.1.10" + - minServerVersion: "7.0.0" database_name: &database_name "default" collection_name: &collection_name "default" @@ -54,4 +54,4 @@ tests: # Outcome is checked using a separate MongoClient without auto encryption. data: - *doc0_encrypted - - *doc1_encrypted \ No newline at end of file + - *doc1_encrypted diff --git a/spec/spec_tests/data/cmap/pool-clear-interrupt-immediately.yml b/spec/spec_tests/data/cmap/pool-clear-interrupt-immediately.yml deleted file mode 100644 index afbb880df0..0000000000 --- a/spec/spec_tests/data/cmap/pool-clear-interrupt-immediately.yml +++ /dev/null @@ -1,49 +0,0 @@ -version: 1 -style: unit -description: Connections MUST be interrupted as soon as possible (interruptInUseConnections=true) -# Remove the topology runOn requirement when cmap specs are adjusted for lbs -runOn: - - topology: [ "single", "replicaset", "sharded" ] -poolOptions: - # ensure it's not involved by default - backgroundThreadIntervalMS: 10000 -operations: - - name: ready - - name: checkOut - - name: checkOut - label: conn - - name: clear - interruptInUseConnections: true - - name: waitForEvent - event: ConnectionPoolCleared - count: 1 - timeout: 1000 - - name: waitForEvent - event: ConnectionClosed - count: 2 - timeout: 1000 - - name: close -events: - - type: ConnectionCheckedOut - connectionId: 1 - address: 42 - - type: ConnectionCheckedOut - connectionId: 2 - address: 42 - - type: ConnectionPoolCleared - interruptInUseConnections: true - - type: ConnectionClosed - reason: stale - address: 42 - - type: ConnectionClosed - reason: stale - address: 42 - - type: ConnectionPoolClosed - address: 42 -ignore: - - ConnectionCreated - - ConnectionPoolReady - - ConnectionReady - - ConnectionCheckOutStarted - - ConnectionPoolCreated - - ConnectionCheckedIn diff --git a/spec/spec_tests/data/connection_string/invalid-uris.yml b/spec/spec_tests/data/connection_string/invalid-uris.yml index 395e60eed9..dd4d4ce31c 100644 --- a/spec/spec_tests/data/connection_string/invalid-uris.yml +++ b/spec/spec_tests/data/connection_string/invalid-uris.yml @@ -143,14 +143,6 @@ tests: hosts: ~ auth: ~ options: ~ - - - description: "Missing delimiting slash between hosts and options" - uri: "mongodb://example.com?w=1" - valid: false - warning: ~ - hosts: ~ - auth: ~ - options: ~ - description: "Incomplete key value pair for option" uri: "mongodb://example.com/?w" @@ -257,5 +249,3 @@ tests: hosts: ~ auth: ~ options: ~ - - diff --git a/spec/spec_tests/data/connection_string/valid-options.yml b/spec/spec_tests/data/connection_string/valid-options.yml index e1b94039c8..8cb0dea3a6 100644 --- a/spec/spec_tests/data/connection_string/valid-options.yml +++ b/spec/spec_tests/data/connection_string/valid-options.yml @@ -15,3 +15,16 @@ tests: db: "admin" options: authmechanism: "MONGODB-CR" + - + description: "Missing delimiting slash between hosts and options" + uri: "mongodb://example.com?tls=true" + valid: true + warning: false + hosts: + - + type: "hostname" + host: "example.com" + port: ~ + auth: ~ + options: + tls: true diff --git a/spec/spec_tests/data/crud_unified/find-test-all-options.yml b/spec/spec_tests/data/crud_unified/find-test-all-options.yml new file mode 100644 index 0000000000..0f456b9cdf --- /dev/null +++ b/spec/spec_tests/data/crud_unified/find-test-all-options.yml @@ -0,0 +1,348 @@ +description: "find options" + +schemaVersion: "1.0" +runOnRequirements: + - serverless: 'forbid' + + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name find-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +tests: + - description: "sort" + operations: + - name: find + arguments: + filter: &filter { _name: "John" } + sort: &sort { _id: 1 } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + sort: *sort + commandName: find + + - description: "projection" + operations: + - name: find + arguments: + filter: *filter + projection: &projection { _id: 1 } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + projection: *projection + commandName: find + databaseName: *database0Name + + - description: "hint" + operations: + - name: find + arguments: + filter: *filter + hint: &hint { _id: 1 } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + hint: *hint + commandName: find + databaseName: *database0Name + + - description: "skip" + operations: + - name: find + arguments: + filter: *filter + skip: &skip 10 + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + skip: *skip + commandName: find + databaseName: *database0Name + + - description: "limit" + operations: + - name: find + arguments: + filter: *filter + limit: &limit 10 + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + limit: *limit + commandName: find + databaseName: *database0Name + + - description: "batchSize" + operations: + - name: find + arguments: + filter: *filter + batchSize: &batchSize 10 + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + batchSize: *batchSize + commandName: find + databaseName: *database0Name + + - description: "comment" + operations: + - name: find + arguments: + filter: *filter + comment: &comment 'comment' + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + comment: *comment + commandName: find + databaseName: *database0Name + + - description: "maxTimeMS" + operations: + - name: find + arguments: + filter: *filter + maxTimeMS: &maxTimeMS 1000 + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + maxTimeMS: *maxTimeMS + commandName: find + databaseName: *database0Name + + - description: "max" + operations: + - name: find + arguments: + filter: *filter + max: &max { _id: 10 } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + max: *max + commandName: find + databaseName: *database0Name + + - description: "min" + operations: + - name: find + arguments: + filter: *filter + hint: { name: 1 } + min: &min { name: 'John' } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + min: *min + commandName: find + databaseName: *database0Name + + - description: "returnKey" + operations: + - name: find + arguments: + filter: *filter + returnKey: &returnKey false + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + returnKey: *returnKey + commandName: find + databaseName: *database0Name + + - description: "showRecordId" + operations: + - name: find + arguments: + filter: *filter + showRecordId: &showRecordId false + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + showRecordId: *showRecordId + commandName: find + databaseName: *database0Name + + - description: "oplogReplay" + operations: + - name: find + arguments: + filter: *filter + oplogReplay: &oplogReplay false + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + oplogReplay: *oplogReplay + commandName: find + databaseName: *database0Name + + - description: "noCursorTimeout" + operations: + - name: find + arguments: + filter: *filter + noCursorTimeout: &noCursorTimeout false + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + noCursorTimeout: *noCursorTimeout + commandName: find + databaseName: *database0Name + + - description: "allowPartialResults" + operations: + - name: find + arguments: + filter: *filter + allowPartialResults: &allowPartialResults false + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + allowPartialResults: *allowPartialResults + commandName: find + databaseName: *database0Name + + - description: "collation" + operations: + - name: find + arguments: + filter: *filter + collation: &collation { locale: "en" } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + collation: *collation + commandName: find + databaseName: *database0Name + + - description: "allowDiskUse" + runOnRequirements: + - minServerVersion: 4.4 + operations: + - name: find + arguments: + filter: *filter + allowDiskUse: &allowDiskUse true + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + allowDiskUse: *allowDiskUse + commandName: find + databaseName: *database0Name + + - description: "let" + runOnRequirements: + - minServerVersion: "5.0" + operations: + - name: find + arguments: + filter: *filter + let: &let { name: "Mary" } + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + let: *let + commandName: find + databaseName: *database0Name diff --git a/spec/spec_tests/data/index_management/createSearchIndex.yml b/spec/spec_tests/data/index_management/createSearchIndex.yml index 6aa56f3bc4..2e3cf50f8d 100644 --- a/spec/spec_tests/data/index_management/createSearchIndex.yml +++ b/spec/spec_tests/data/index_management/createSearchIndex.yml @@ -30,8 +30,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -50,8 +51,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -59,4 +61,4 @@ tests: command: createSearchIndexes: *collection0 indexes: [ { definition: *definition, name: 'test index' } ] - $db: *database0 \ No newline at end of file + $db: *database0 diff --git a/spec/spec_tests/data/index_management/createSearchIndexes.yml b/spec/spec_tests/data/index_management/createSearchIndexes.yml index 54a6e84ccb..db8f02e551 100644 --- a/spec/spec_tests/data/index_management/createSearchIndexes.yml +++ b/spec/spec_tests/data/index_management/createSearchIndexes.yml @@ -30,8 +30,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -51,8 +52,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -71,8 +73,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -80,4 +83,4 @@ tests: command: createSearchIndexes: *collection0 indexes: [ { definition: *definition, name: 'test index' } ] - $db: *database0 \ No newline at end of file + $db: *database0 diff --git a/spec/spec_tests/data/index_management/dropSearchIndex.yml b/spec/spec_tests/data/index_management/dropSearchIndex.yml index e384cf26c5..8a8e829454 100644 --- a/spec/spec_tests/data/index_management/dropSearchIndex.yml +++ b/spec/spec_tests/data/index_management/dropSearchIndex.yml @@ -30,8 +30,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: diff --git a/spec/spec_tests/data/index_management/listSearchIndexes.yml b/spec/spec_tests/data/index_management/listSearchIndexes.yml index a50becdf1d..f05a368585 100644 --- a/spec/spec_tests/data/index_management/listSearchIndexes.yml +++ b/spec/spec_tests/data/index_management/listSearchIndexes.yml @@ -28,8 +28,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -48,8 +49,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -71,8 +73,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: @@ -82,4 +85,4 @@ tests: cursor: { batchSize: 10 } pipeline: - $listSearchIndexes: { name: *indexName } - $db: *database0 \ No newline at end of file + $db: *database0 diff --git a/spec/spec_tests/data/index_management/updateSearchIndex.yml b/spec/spec_tests/data/index_management/updateSearchIndex.yml index bb18ab512e..2c56e75ef6 100644 --- a/spec/spec_tests/data/index_management/updateSearchIndex.yml +++ b/spec/spec_tests/data/index_management/updateSearchIndex.yml @@ -31,8 +31,9 @@ tests: expectError: # This test always errors in a non-Atlas environment. The test functions as a unit test by asserting # that the driver constructs and sends the correct command. + # The expected error message was changed in SERVER-83003. Check for the substring "Atlas" shared by both error messages. isError: true - errorContains: Search index commands are only supported with Atlas + errorContains: Atlas expectEvents: - client: *client0 events: diff --git a/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml b/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml index dc664ab76b..7d1375793e 100644 --- a/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml +++ b/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml @@ -3,8 +3,10 @@ description: "retryable-writes bulkWrite serverErrors" schemaVersion: "1.0" runOnRequirements: - - minServerVersion: "3.6" + - minServerVersion: "4.0" topologies: [ replicaset ] + - minServerVersion: "4.1.7" + topologies: [ sharded ] createEntities: - client: @@ -29,11 +31,6 @@ initialData: tests: - description: "BulkWrite succeeds after retryable writeConcernError in first batch" - runOnRequirements: - - minServerVersion: "4.0" - topologies: [ replicaset ] - - minServerVersion: "4.1.7" - topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml b/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml index 689328818b..231569fb0d 100644 --- a/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml +++ b/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml @@ -3,8 +3,10 @@ description: "retryable-writes insertOne serverErrors" schemaVersion: "1.0" runOnRequirements: - - minServerVersion: "3.6" + - minServerVersion: "4.0" topologies: [ replicaset ] + - minServerVersion: "4.1.7" + topologies: [ sharded ] createEntities: - client: @@ -29,11 +31,6 @@ initialData: tests: - description: "InsertOne succeeds after retryable writeConcernError" - runOnRequirements: - - minServerVersion: "4.0" - topologies: [ replicaset ] - - minServerVersion: "4.1.7" - topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/spec/spec_tests/data/run_command_unified/runCommand.yml b/spec/spec_tests/data/run_command_unified/runCommand.yml new file mode 100644 index 0000000000..9b0bf1ad63 --- /dev/null +++ b/spec/spec_tests/data/run_command_unified/runCommand.yml @@ -0,0 +1,319 @@ +description: runCommand + +schemaVersion: "1.3" + +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: [commandStartedEvent] + - database: + id: &db db + client: *client + databaseName: *db + - collection: + id: &collection collection + database: *db + collectionName: *collection + - database: + id: &dbWithRC dbWithRC + client: *client + databaseName: *dbWithRC + databaseOptions: + readConcern: { level: 'local' } + - database: + id: &dbWithWC dbWithWC + client: *client + databaseName: *dbWithWC + databaseOptions: + writeConcern: { w: 0 } + - session: + id: &session session + client: *client + # Stable API test + - client: + id: &clientWithStableApi clientWithStableApi + observeEvents: [commandStartedEvent] + serverApi: + version: "1" + strict: true + - database: + id: &dbWithStableApi dbWithStableApi + client: *clientWithStableApi + databaseName: *dbWithStableApi + +initialData: +- collectionName: *collection + databaseName: *db + documents: [] + +tests: + - description: always attaches $db and implicit lsid to given command and omits default readPreference + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $db: *db + lsid: { $$exists: true } + $readPreference: { $$exists: false } + commandName: ping + + - description: always gossips the $clusterTime on the sent command + runOnRequirements: + # Only replicasets and sharded clusters have a $clusterTime + - topologies: [ replicaset, sharded ] + operations: + # We have to run one command to obtain a clusterTime to gossip + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + # Only check the shape of the second ping which should have the $clusterTime received from the first operation + - commandStartedEvent: + command: + ping: 1 + $clusterTime: { $$exists: true } + commandName: ping + + - description: attaches the provided session lsid to given command + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + session: *session + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + lsid: { $$sessionLsid: *session } + $db: *db + commandName: ping + + - description: attaches the provided $readPreference to given command + runOnRequirements: + # Exclude single topology, which is most likely a standalone server + - topologies: [ replicaset, load-balanced, sharded ] + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: &readPreference { mode: 'nearest' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: *readPreference + $db: *db + commandName: ping + + - description: does not attach $readPreference to given command on standalone + runOnRequirements: + # This test assumes that the single topology contains a standalone server; + # however, it is possible for a single topology to contain a direct + # connection to another server type. + # See: https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#topology-type-single + - topologies: [ single ] + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: { mode: 'nearest' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: { $$exists: false } + $db: *db + commandName: ping + + - description: does not attach primary $readPreference to given command + operations: + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + readPreference: { mode: 'primary' } + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + ping: 1 + $readPreference: { $$exists: false } + $db: *db + commandName: ping + + - description: does not inherit readConcern specified at the db level + operations: + - name: runCommand + object: *dbWithRC + # Test with a command that supports a readConcern option. + # expectResult is intentionally omitted because some drivers + # may automatically convert command responses into cursors. + arguments: + commandName: aggregate + command: { aggregate: *collection, pipeline: [], cursor: {} } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + aggregate: *collection + readConcern: { $$exists: false } + $db: *dbWithRC + commandName: aggregate + + - description: does not inherit writeConcern specified at the db level + operations: + - name: runCommand + object: *dbWithWC + arguments: + commandName: insert + command: + insert: *collection + documents: [ { foo: 'bar' } ] + ordered: true + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + insert: *collection + writeConcern: { $$exists: false } + $db: *dbWithWC + commandName: insert + + - description: does not retry retryable errors on given command + runOnRequirements: + - minServerVersion: "4.2" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ping] + closeConnection: true + - name: runCommand + object: *db + arguments: + commandName: ping + command: { ping: 1 } + expectError: + isClientError: true + + - description: attaches transaction fields to given command + runOnRequirements: + - minServerVersion: "4.0" + topologies: [ replicaset ] + - minServerVersion: "4.2" + topologies: [ sharded, load-balanced ] + operations: + - name: withTransaction + object: *session + arguments: + callback: + - name: runCommand + object: *db + arguments: + session: *session + commandName: insert + command: + insert: *collection + documents: [ { foo: 'transaction' } ] + ordered: true + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 1 } } } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + insert: *collection + documents: [ { foo: 'transaction' } ] + ordered: true + lsid: { $$sessionLsid: *session } + txnNumber: 1 + startTransaction: true + autocommit: false + # omitted fields + readConcern: { $$exists: false } + writeConcern: { $$exists: false } + commandName: insert + databaseName: *db + - commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session } + txnNumber: 1 + autocommit: false + # omitted fields + writeConcern: { $$exists: false } + readConcern: { $$exists: false } + commandName: commitTransaction + databaseName: admin + + - description: attaches apiVersion fields to given command when stableApi is configured on the client + runOnRequirements: + - minServerVersion: "5.0" + operations: + - name: runCommand + object: *dbWithStableApi + arguments: + commandName: ping + command: + ping: 1 + expectResult: { ok: 1 } + expectEvents: + - client: *clientWithStableApi + events: + - commandStartedEvent: + command: + ping: 1 + $db: *dbWithStableApi + apiVersion: "1" + apiStrict: true + apiDeprecationErrors: { $$unsetOrMatches: false } + commandName: ping diff --git a/spec/spec_tests/data/sessions_unified/driver-sessions-dirty-session-errors.yml b/spec/spec_tests/data/sessions_unified/driver-sessions-dirty-session-errors.yml new file mode 100644 index 0000000000..b7f2917efc --- /dev/null +++ b/spec/spec_tests/data/sessions_unified/driver-sessions-dirty-session-errors.yml @@ -0,0 +1,351 @@ +description: "driver-sessions-dirty-session-errors" + +schemaVersion: "1.0" + +runOnRequirements: + - minServerVersion: "4.0" + topologies: [ replicaset ] + - minServerVersion: "4.1.8" + topologies: [ sharded ] + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name session-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name test + - session: + id: &session0 session0 + client: *client0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1 } + +tests: + - description: "Dirty explicit session is discarded (insert)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + closeConnection: true + - name: assertSessionNotDirty + object: testRunner + arguments: + session: *session0 + - name: insertOne + object: *collection0 + arguments: + session: *session0 + document: { _id: 2 } + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 2 } } } + - name: assertSessionDirty + object: testRunner + arguments: + session: *session0 + - name: insertOne + object: *collection0 + arguments: + session: *session0 + document: { _id: 3 } + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 3 } } } + - name: assertSessionDirty + object: testRunner + arguments: + session: *session0 + - name: endSession + object: *session0 + - &find_with_implicit_session + name: find + object: *collection0 + arguments: + filter: { _id: -1 } + expectResult: [] + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &insert_attempt + command: + insert: *collection0Name + documents: + - { _id: 2 } + ordered: true + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + commandName: insert + databaseName: *database0Name + - commandStartedEvent: *insert_attempt + - commandStartedEvent: + command: + insert: *collection0Name + documents: + - { _id: 3 } + ordered: true + lsid: { $$sessionLsid: *session0 } + txnNumber: 2 + commandName: insert + databaseName: *database0Name + - commandStartedEvent: &find_with_implicit_session_event + command: + find: *collection0Name + filter: { _id: -1 } + # There is no explicit session to use with $$sessionLsid, so + # just assert an arbitrary lsid document + lsid: { $$type: object } + commandName: find + databaseName: *database0Name + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1 } + - { _id: 2 } + - { _id: 3 } + + - description: "Dirty explicit session is discarded (findAndModify)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ findAndModify ] + closeConnection: true + - name: assertSessionNotDirty + object: testRunner + arguments: + session: *session0 + - name: findOneAndUpdate + object: *collection0 + arguments: + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: Before + expectResult: { _id: 1 } + - name: assertSessionDirty + object: testRunner + arguments: + session: *session0 + - name: endSession + object: *session0 + - *find_with_implicit_session + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &findAndModify_attempt + command: + findAndModify: *collection0Name + query: { _id: 1 } + update: { $inc: { x: 1 } } + new: false + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + readConcern: { $$exists: false } + writeConcern: { $$exists: false } + commandName: findAndModify + databaseName: *database0Name + - commandStartedEvent: *findAndModify_attempt + - commandStartedEvent: *find_with_implicit_session_event + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 1 } + + - description: "Dirty implicit session is discarded (insert)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + closeConnection: true + - name: insertOne + object: *collection0 + arguments: + document: { _id: 2 } + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 2 } } } + - *find_with_implicit_session + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &insert_attempt + command: + insert: *collection0Name + documents: + - { _id: 2 } + ordered: true + lsid: { $$type: object } + txnNumber: 1 + commandName: insert + databaseName: *database0Name + - commandStartedEvent: *insert_attempt + - commandStartedEvent: *find_with_implicit_session_event + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1 } + - { _id: 2 } + + - description: "Dirty implicit session is discarded (findAndModify)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ findAndModify ] + closeConnection: true + - name: findOneAndUpdate + object: *collection0 + arguments: + filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: Before + expectResult: { _id: 1 } + - *find_with_implicit_session + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &findAndModify_attempt + command: + findAndModify: *collection0Name + query: { _id: 1 } + update: { $inc: { x: 1 } } + new: false + lsid: { $$type: object } + txnNumber: 1 + readConcern: { $$exists: false } + writeConcern: { $$exists: false } + commandName: findAndModify + databaseName: *database0Name + - commandStartedEvent: *findAndModify_attempt + - commandStartedEvent: *find_with_implicit_session_event + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 1 } + + - description: "Dirty implicit session is discarded (read returning cursor)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ aggregate ] + closeConnection: true + - name: aggregate + object: *collection0 + arguments: + pipeline: [ { $project: { _id: 1 } } ] + expectResult: [ { _id: 1 } ] + - *find_with_implicit_session + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &aggregate_attempt + command: + aggregate: *collection0Name + pipeline: [ { $project: { _id: 1 } } ] + lsid: { $$type: object } + commandName: aggregate + databaseName: *database0Name + - commandStartedEvent: *aggregate_attempt + - commandStartedEvent: *find_with_implicit_session_event + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1 } + + - description: "Dirty implicit session is discarded (read not returning cursor)" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ aggregate ] + closeConnection: true + - name: countDocuments + object: *collection0 + arguments: + filter: {} + expectResult: 1 + - *find_with_implicit_session + - name: assertDifferentLsidOnLastTwoCommands + object: testRunner + arguments: + client: *client0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &countDocuments_attempt + command: + aggregate: *collection0Name + pipeline: [ { $match: {} }, { $group: { _id: 1, n: { $sum: 1 } } } ] + lsid: { $$type: object } + commandName: aggregate + databaseName: *database0Name + - commandStartedEvent: *countDocuments_attempt + - commandStartedEvent: *find_with_implicit_session_event + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1 } diff --git a/spec/spec_tests/data/unified/valid-pass/poc-crud.yml b/spec/spec_tests/data/unified/valid-pass/poc-crud.yml index b7d05d75af..5748c0779f 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-crud.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml b/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml index fa882e2836..258043764a 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml @@ -2,10 +2,6 @@ description: "poc-retryable-writes" schemaVersion: "1.0" -runOnRequirements: - - minServerVersion: "3.6" - topologies: [ replicaset ] - createEntities: - client: id: &client0 client0 @@ -42,6 +38,9 @@ initialData: tests: - description: "FindOneAndUpdate is committed on first attempt" + runOnRequirements: &onPrimaryTransactionalWrite_requirements + - minServerVersion: "3.6" + topologies: [ replicaset ] operations: - name: failPoint object: testRunner @@ -65,6 +64,7 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is not committed on first attempt" + runOnRequirements: *onPrimaryTransactionalWrite_requirements operations: - name: failPoint object: testRunner @@ -89,6 +89,7 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is never committed" + runOnRequirements: *onPrimaryTransactionalWrite_requirements operations: - name: failPoint object: testRunner @@ -117,9 +118,7 @@ tests: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.7" - # Original test uses "sharded", but retryable writes requires a sharded - # cluster backed by replica sets - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner @@ -192,6 +191,7 @@ tests: mode: { times: 2 } data: failCommands: [ insert ] + errorLabels: [ RetryableWriteError ] writeConcernError: code: 91 # ShutdownInProgress errmsg: "Replication is being shut down" diff --git a/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml b/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml index e770f7fc6d..a93096a222 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml @@ -125,12 +125,11 @@ tests: - description: "Dirty explicit session is discarded" skipReason: RUBY-1813 # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner @@ -181,7 +180,7 @@ tests: - commandStartedEvent: &insert_attempt command: insert: *collection0Name - documents: + documents: - { _id: 2 } ordered: true lsid: { $$sessionLsid: *session0 } @@ -192,7 +191,7 @@ tests: - commandStartedEvent: command: insert: *collection0Name - documents: + documents: - { _id: 3 } ordered: true lsid: { $$sessionLsid: *session0 } diff --git a/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml b/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd..94fadda0aa 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml b/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188..33cd2a2521 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml b/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml index 0a66b9bd7f..8a12c8b39a 100644 --- a/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml +++ b/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/spec/spec_tests/run_command_unified_spec.rb b/spec/spec_tests/run_command_unified_spec.rb new file mode 100644 index 0000000000..50878ca4fe --- /dev/null +++ b/spec/spec_tests/run_command_unified_spec.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +# rubocop:todo all + +require 'spec_helper' + +require 'runners/unified' + +base = "#{CURRENT_PATH}/spec_tests/data/run_command_unified" +RUN_COMMAND_UNIFIED_TESTS = Dir.glob("#{base}/**/*.yml").sort + +describe 'runCommand unified spec tests' do + define_unified_spec_tests(base, RUN_COMMAND_UNIFIED_TESTS) +end diff --git a/spec/spec_tests/sdam_unified_spec.rb b/spec/spec_tests/sdam_unified_spec.rb index 8454edc004..21d6e1cbb3 100644 --- a/spec/spec_tests/sdam_unified_spec.rb +++ b/spec/spec_tests/sdam_unified_spec.rb @@ -9,5 +9,7 @@ SDAM_UNIFIED_TESTS = Dir.glob("#{base}/**/*.yml").sort describe 'SDAM unified spec tests' do + forbid_x509_auth + define_unified_spec_tests(base, SDAM_UNIFIED_TESTS) end diff --git a/spec/support/constraints.rb b/spec/support/constraints.rb index 47057743fb..6d92409937 100644 --- a/spec/support/constraints.rb +++ b/spec/support/constraints.rb @@ -17,6 +17,12 @@ def require_local_tls end end + def forbid_x509_auth + before(:all) do + skip 'X.509 auth not allowed' if SpecConfig.instance.x509_auth? + end + end + def max_bson_version(version) required_version = version.split('.').map(&:to_i) actual_version = bson_version(required_version.length) diff --git a/spec/support/recording_logger.rb b/spec/support/recording_logger.rb new file mode 100644 index 0000000000..cfed9b0f28 --- /dev/null +++ b/spec/support/recording_logger.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true +# rubocop:todo all + +require 'stringio' + +# A "Logger-alike" class, quacking like ::Logger, used for recording messages +# as they are written to the log +class RecordingLogger < Logger + def initialize(*args, **kwargs) + @buffer = StringIO.new + super(@buffer, *args, **kwargs) + end + + # Accesses the raw contents of the log + # + # @return [ String ] the raw contents of the log + def contents + @buffer.string + end + + # Returns the contents of the log as individual lines. + # + # @return [ Array ] the individual log lines + def lines + contents.split(/\n/) + end +end diff --git a/upload-api-docs b/upload-api-docs new file mode 100755 index 0000000000..1915efabb0 --- /dev/null +++ b/upload-api-docs @@ -0,0 +1,121 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'bundler/inline' + +gemfile true do + source 'https://rubygems.org' + gem 'nokogiri' + gem 'aws-sdk-s3' + gem 'yard' +end + +require 'aws-sdk-s3' +require 'optparse' +require 'yard' + +# This class contains logic for uploading API docs to S3. +class FileUploader + def initialize(options) + Aws.config.update({ + region: options[:region], + credentials: Aws::Credentials.new(options[:access_key], options[:secret_key]) + }) + Aws.use_bundled_cert! + @s3 = Aws::S3::Client.new + @bucket = options[:bucket] + @prefix = options[:prefix] + @docs_path = options[:docs_path] + end + + def upload_docs + Dir.glob("#{@docs_path}/**/*").each do |file| + next if File.directory?(file) + + upload_file(file, key(file)) + print '.' + $stdout.flush + end + puts "\nDone!" + end + + private + + def key(file) + File.join(@prefix, file.gsub("#{@docs_path}/", '')) + end + + def upload_file(file, key) + mime_type = mime_type(file) + @s3.put_object(bucket: @bucket, key: key, body: File.read(file), content_type: mime_type) + end + + def mime_type(file) + { + '.html' => 'text/html', + '.css' => 'text/css', + '.js' => 'application/javascript', + }.fetch(File.extname(file)) + end +end + +# This class contains logic for parsing CLI and ENV options. +class Options + def initialize + @options = {} + parse_cli_options! + parse_env_options! + @options[:prefix] = 'docs/ruby-driver/current/api' + @options[:docs_path] = 'build/public/current/api' + end + + def [](key) + @options[key] + end + + private + + def parse_cli_options! + OptionParser.new do |opts| + opts.banner = 'Usage: upload-api-docs [options]' + + opts.on('-b BUCKET', '--bucket=BUCKET', 'S3 Bucket to upload') do |b| + @options[:bucket] = b + end + opts.on('-r REGION', '--region=REGION', 'AWS region') do |r| + @options[:region] = r + end + end.parse! + %i[bucket region].each do |opt| + raise OptionParser::MissingArgument, "Option --#{opt} is required" unless @options[opt] + end + end + + def parse_env_options! + @options[:access_key] = ENV.fetch('DOCS_AWS_ACCESS_KEY_ID') do + raise ArgumentError, 'Please provide aws access key via DOCS_AWS_ACCESS_KEY_ID env variable' + end + @options[:secret_key] = ENV.fetch('DOCS_AWS_SECRET_ACCESS_KEY') do + raise ArgumentError, 'Please provide aws secret key via DOCS_AWS_SECRET_ACCESS_KEY env variable' + end + end +end + +def generate_docs(options) + YARD::CLI::Yardoc.run( + '.', + '--exclude', './.evergreen', + '--exclude', './.mod', + '--exclude', './examples', + '--exclude', './profile', + '--exclude', './release', + '--exclude', './spec', + '--readme', './README.md', + '-o', options[:docs_path] + ) +end + +options = Options.new +generate_docs(options) +FileUploader.new(options).upload_docs +return