diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 61def5908..000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,8 +0,0 @@
-[submodule "mongo-ruby-driver"]
- path = mongo-ruby-driver
- url = https://github.com/mongodb/mongo-ruby-driver.git
- branch = 2.19-stable
-[submodule "bson-ruby"]
- path = bson-ruby
- url = https://github.com/mongodb/bson-ruby.git
- branch = master
diff --git a/bson-ruby b/bson-ruby
deleted file mode 160000
index ea6fe692f..000000000
--- a/bson-ruby
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit ea6fe692f174574deb800bdf909e9c4060c61b77
diff --git a/mongo-ruby-driver b/mongo-ruby-driver
deleted file mode 160000
index 517891b56..000000000
--- a/mongo-ruby-driver
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 517891b56c5e7ef1fd25e825a0c8ff4e3afd5d84
diff --git a/source/.gitignore b/source/.gitignore
new file mode 100644
index 000000000..e35d8850c
--- /dev/null
+++ b/source/.gitignore
@@ -0,0 +1 @@
+_build
diff --git a/source/Makefile b/source/Makefile
new file mode 100644
index 000000000..d4bb2cbb9
--- /dev/null
+++ b/source/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/source/README.md b/source/README.md
new file mode 100644
index 000000000..338b3c297
--- /dev/null
+++ b/source/README.md
@@ -0,0 +1,17 @@
+Ruby MongoDB Driver Documentation
+=================================
+
+This subdirectory contains the high-level driver documentation, including
+tutorials and the reference.
+
+Building the documentation for publishing is done via the
+[docs-ruby repo](https://github.com/mongodb/docs-ruby).
+
+To build the documentation locally for review, install `sphinx` and
+`sphinx-book-theme`, then execute `make html` in this directory:
+
+ pip install sphinx sphinx-book-theme
+ make html
+
+Note that the documentation generated in this manner wouldn't have the
+BSON documentation included, nor are intersphinx links currently handled.
diff --git a/source/conf.py b/source/conf.py
new file mode 100644
index 000000000..1a27671f0
--- /dev/null
+++ b/source/conf.py
@@ -0,0 +1,57 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'Ruby MongoDB Driver'
+copyright = '2021, MongoDB'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+source_suffix = {
+ '.txt': 'restructuredtext',
+}
+
+html_theme = 'sphinx_book_theme'
diff --git a/source/contribute.txt b/source/contribute.txt
new file mode 100644
index 000000000..a541b15fa
--- /dev/null
+++ b/source/contribute.txt
@@ -0,0 +1,81 @@
+************************
+Contribute to the Driver
+************************
+
+.. default-domain:: mongodb
+
+Report Bugs and Request Ruby Driver-Specific Features
+=====================================================
+
+To report a bug in the driver or request a feature specific to the Ruby driver:
+
+1. Visit `our issue tracker `_ and login
+ (or create an account if you do not have one already).
+2. Navigate to the `RUBY project `_.
+3. Click :guilabel:`Create Issue` and fill out all of the applicable form
+ fields.
+
+When creating an issue, please keep in mind that all information in JIRA
+for the RUBY project, as well as the core server (the SERVER project),
+is publicly visible.
+
+**PLEASE DO:**
+
+- Provide as much information as possible about the issue.
+- Provide detailed steps for reproducing the issue.
+- Provide any applicable code snippets, stack traces and log data.
+ Do not include any sensitive data or server logs.
+- Specify version numbers of the driver and MongoDB server.
+
+**PLEASE DO NOT:**
+
+- Provide any sensitive data or server logs.
+- Report potential security issues publicly (see 'Security Issues' below).
+
+.. note::
+
+ Bug reports in JIRA for the Ruby driver and the core server (the **SERVER**)
+ projects are public.
+
+If you identified a potential security vulnerability in the Ruby driver or
+any other MongoDB product, please report it according to the instructions found
+in the :manual:`Create a Vulnerability Report
+`.
+
+
+Request Product Features
+========================
+
+To request a feature which is not specific to the Ruby driver, or which
+affects more than the driver alone (for example, a feature which requires
+MongoDB server support), please submit your idea through the
+`MongoDB Feedback Forum `_.
+
+
+Contribute Code
+===============
+
+The MongoDB Ruby driver source is located
+`at GitHub `_.
+
+The list of known issues in the driver is available
+`in JIRA `_.
+
+We recommend creating a JIRA ticket before starting work on a bug fix or
+an improvement to the driver, to obtain feedback from the Ruby driver team
+as to the proposed changes. A JIRA ticket is not required to submit
+a pull request but it is appreciated, especially for non-trivial changes.
+
+Pull requests should be made against the ``master`` branch and
+include relevant tests, if applicable. The Ruby driver team will backport
+the changes to the stable branches, if needed.
+
+A MongoDB deployment is required to run the tests. Setup procedures and
+recommendations for various deployments, as well as how to configure the
+driver's test suite for the deployments, are covered in the `spec
+readme `__.
+
+The driver is tested on `Evergreen `_,
+MongoDB's in-house continuous integration platform. After a pull request
+is created, one of the Ruby driver team engineers will schedule an Evergreen
+build.
diff --git a/source/getting-started.txt b/source/getting-started.txt
new file mode 100644
index 000000000..b92298a54
--- /dev/null
+++ b/source/getting-started.txt
@@ -0,0 +1,17 @@
+.. _getting-started:
+
+***************
+Getting Started
+***************
+
+.. default-domain:: mongodb
+
+This section describes how to install the driver, installation prerequisites
+and compatibility considerations.
+
+.. toctree::
+ :titlesonly:
+
+ installation
+ reference/driver-compatibility
+ support
diff --git a/source/includes/unicode-checkmark.rst b/source/includes/unicode-checkmark.rst
new file mode 100644
index 000000000..16f4e9476
--- /dev/null
+++ b/source/includes/unicode-checkmark.rst
@@ -0,0 +1 @@
+.. |checkmark| unicode:: U+2713
diff --git a/source/includes/unicode-nbsp.rst b/source/includes/unicode-nbsp.rst
new file mode 100644
index 000000000..aa0e52ea7
--- /dev/null
+++ b/source/includes/unicode-nbsp.rst
@@ -0,0 +1,2 @@
+.. |nbsp| unicode:: 0xA0
+ :trim:
diff --git a/source/index.txt b/source/index.txt
new file mode 100644
index 000000000..d8ab98a35
--- /dev/null
+++ b/source/index.txt
@@ -0,0 +1,67 @@
+.. http://www.mongodb.org/display/DOCS/Ruby+Language+Center
+
+.. _ruby-language-center:
+
+*******************
+Ruby MongoDB Driver
+*******************
+
+.. default-domain:: mongodb
+
+Welcome to the documentation site for the official MongoDB Ruby driver.
+You can add the driver to your application to work with MongoDB in
+Ruby.
+
+Get Started
+===========
+
+To get started with the Ruby driver, see :doc:`/installation` and
+:doc:`/tutorials/quick-start`. Continue to :doc:`/tutorials`
+for high level documentation for common operations.
+
+BSON
+====
+
+The Ruby BSON implementation is packaged in a separate gem with C and
+Java extensions for speed depending on the runtime environment.
+
+For reference on the Ruby BSON gem, see the :doc:`/tutorials/bson`.
+
+Object Mappers
+==============
+
+Because MongoDB is so easy to use, the basic Ruby driver can be the
+best solution for many applications. But if you need validations,
+associations, and other high-level data modeling functions, then you
+may need Object Document Mapper.
+
+In the context of a Rails application, an Object Document Mapper
+provides functionality equivalent to, but distinct from, ActiveRecord.
+Because MongoDB is a document-based database, these mappers are called
+Object Document Mappers (ODM) as opposed to Object Relational Mappers
+(ORM).
+
+The ODM officially supported by MongoDB is Mongoid, originally written
+by Durran Jordan.
+
+For tutorials on Mongoid, see the `Mongoid Manual `_.
+
+.. COMMENT For the actual build, see mongodb/docs-ruby repo which pulls the documentation source from:
+.. mongo-ruby-driver,
+.. bson-ruby, and
+.. mongoid repos.
+
+.. class:: hidden
+
+ .. toctree::
+ :titlesonly:
+
+ getting-started
+ tutorials
+ reference/connection-and-configuration
+ reference/working-with-data
+ reference/schema-operations
+ API
+ release-notes
+ reference/additional-resources
+ contribute
diff --git a/source/installation.txt b/source/installation.txt
new file mode 100644
index 000000000..a5f404b04
--- /dev/null
+++ b/source/installation.txt
@@ -0,0 +1,56 @@
+************
+Installation
+************
+
+.. default-domain:: mongodb
+
+The Ruby driver is released as a gem hosted on `Rubygems
+`_.
+
+
+Prerequisites
+=============
+
+Please see the :ref:`compatibility ` page for the list of
+Ruby versions and MongoDB server versions that this release of the Ruby
+driver is compatible with.
+
+The driver itself is written entirely in Ruby, however it depends on the
+`bson library `_ which includes a C extension
+for MRI and a compiled Java extension for JRuby. A working C compiler and Ruby
+development headers and libraries are required when installing on MRI.
+When installing on JRuby, JRE is sufficient because the ``bson`` gem includes
+the compiled extension.
+
+Connecting to TLS-enabled MongoDB servers, using SCRAM authentication
+(both SCRAM-SHA-1 and SCRAM-SHA-256) and using X.509 authentication (which
+is performed over a TLS connection) requires the Ruby ``openssl`` extension
+to be present and working. The :ref:`TLS compatibility `
+section provides further details on usage of newer TLS protocols like TLS 1.1.
+
+
+.. _installation:
+
+Install the Gem
+===============
+
+Add ``mongo`` to your ``Gemfile``:
+
+.. code-block:: ruby
+
+ gem "mongo", "~> 2"
+
+To install the driver manually:
+
+.. code-block:: sh
+
+ gem install mongo -v '~> 2'
+
+
+What's New
+==========
+
+Please see the :ref:`release notes ` for the major changes
+in each driver release and the `releases page on GitHub
+`_ for the complete
+list of changes for each release of the driver.
diff --git a/source/meta/404.txt b/source/meta/404.txt
new file mode 100644
index 000000000..4143ded8d
--- /dev/null
+++ b/source/meta/404.txt
@@ -0,0 +1,7 @@
+:orphan:
+
+**************
+File not found
+**************
+
+The URL you requested does not exist or has been removed.
diff --git a/source/nesting-levels.txt b/source/nesting-levels.txt
new file mode 100644
index 000000000..6df05d1cb
--- /dev/null
+++ b/source/nesting-levels.txt
@@ -0,0 +1,17 @@
+This file is not part of Ruby driver documentation proper, it is an internal
+reference for the nesting levels that other files should be using.
+
+Ruby driver documentation nesting levels:
+
+**********
+Page Title
+**********
+
+First Level Heading
+===================
+
+Second Level Heading
+--------------------
+
+Third Level Heading
+```````````````````
diff --git a/source/reference/additional-resources.txt b/source/reference/additional-resources.txt
new file mode 100644
index 000000000..218218f1c
--- /dev/null
+++ b/source/reference/additional-resources.txt
@@ -0,0 +1,200 @@
+.. _ruby-external-resources:
+
+********************
+Additional Resources
+********************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: twocols
+
+There are a number of good resources appearing all over the web for
+learning about MongoDB and Ruby. A useful selection is listed below. If
+you know of others, do let us know.
+
+Screencasts
+===========
+
+- `Introduction to MongoDB - Part I
+ `_
+
+ An introduction to MongoDB via the MongoDB shell.
+
+- `Introduction to MongoDB - Part II
+ `_
+
+ In this screencast, Joon You teaches how to use the Ruby driver to
+ build a simple Sinatra app.
+
+- `Introduction to MongoDB - Part III
+ `_
+
+ For the final screencast in the series, Joon You introduces
+ MongoMapper and Rails.
+
+- `RailsCasts: MongoDB & MongoMapper
+ `_
+
+ Ryan Bates' RailsCast introducing MongoDB and MongoMapper.
+
+- `RailsCasts: Mongoid `_
+
+ Ryan Bates' RailsCast introducing Mongoid.
+
+Presentations
+=============
+
+- `Introduction to MongoDB (Video) `_
+
+ Mike Dirolf's introduction to MongoDB at Pivotal Labs, SF.
+
+- `MongoDB: A Ruby Document Store that doesn't rhyme with 'Ouch'
+ (Slides)
+ `_
+
+ Wynn Netherland's introduction to MongoDB with some comparisons to
+ CouchDB.
+
+- `MongoDB (is) for Rubyists (Slides)
+ `_
+
+ Kyle Banker's presentation on why MongoDB is for Rubyists (and all
+ human-oriented programmers).
+
+Articles
+========
+
+- `Why I Think Mongo is to Databases What Rails was to Frameworks
+ `_
+
+- `What if a key-value store mated with a relational database system?
+ `_
+
+- `Mongo Tips `_
+
+ John Nunemaker's articles on MongoDB and his Mongo Tips blog.
+
+- A series of articles on aggregation with MongoDB and Ruby:
+
+ 1. `Part I: Introduction of Aggregation in MongoDB
+ `_
+
+ #. `Part II: MongoDB Grouping Elaborated
+ `_
+
+ #. `Part III: Introduction to Map-Reduce in MongoDB
+ `_
+
+- `Does the MongoDB Driver Support Feature X?
+ `_
+
+ An explanation of how the MongoDB drivers usually automatically
+ support new database features.
+
+Projects
+========
+
+- `Capistrano Mongo Sync `_
+
+ Sync your local development db with your remote production db using capistrano.
+
+- `Simple Pub/Sub `_
+
+ A very simple pub/sub system.
+
+- `Mongo Queue `_
+
+ An extensible thread safe job/message queueing system that uses
+ MongoDB as the persistent storage engine.
+
+- `Resque-mongo `_
+
+ A port of the Github's Resque to MongoDB.
+
+- `Mongo Admin `_
+
+ A Rails plugin for browsing and managing MongoDB data. See the `live
+ demo `_.
+
+- `Sinatra Resource `_
+
+ Resource Oriented Architecture (REST) for Sinatra and MongoMapper.
+
+- `NewsMonger `_
+
+ A simple social news application demonstrating MongoMapper and Rails.
+
+- `Data Catalog API `_
+
+ From `Sunlight Labs `_, a non-trivial
+ application using MongoMapper and Sinatra.
+
+- `Watchtower `_
+
+ An example application using Mustache, MongoDB, and Sinatra.
+
+- `Shapado `_
+
+ A question and answer site similar to Stack Overflow. Live version at
+ `shapado.com `_.
+
+.. Does not seem to exist
+.. - `Shorty `_
+.. A URL-shortener written with Sinatra and the MongoDB Ruby driver.
+
+Libraries
+=========
+
+- `ActiveExpando `_
+
+ An extension to ActiveRecord to allow the storage of arbitrary
+ attributes in MongoDB.
+
+- `ActsAsTree (MongoMapper)
+ `_
+
+ ActsAsTree implementation for MongoMapper.
+
+- `Machinist adapter (MongoMapper)
+ `_
+
+ Machinist adapter using MongoMapper.
+
+- `Mongo-Delegate `_
+
+ A delegation library for experimenting with production data without
+ altering it. A quite useful pattern.
+
+- `Remarkable Matchers (MongoMapper)
+ `_
+
+ Testing / Matchers library using MongoMapper.
+
+- `OpenIdAuthentication, supporting MongoDB as the datastore
+ `_
+
+ Brandon Keepers' fork of OpenIdAuthentication supporting MongoDB.
+
+- `MongoTree (MongoRecord)
+ `_
+
+ MongoTree adds parent / child relationships to MongoRecord.
+
+- `Merb_MongoMapper
+ `_
+
+ A plugin for the Merb framework for supporting MongoMapper models.
+
+- `Mongolytics (MongoMapper)
+ `_
+
+ A web analytics tool.
+
+- `Rack-GridFS `_
+
+ A Rack middleware component that creates HTTP endpoints for files
+ stored in GridFS.
diff --git a/source/reference/aggregation.txt b/source/reference/aggregation.txt
new file mode 100644
index 000000000..f9e63f995
--- /dev/null
+++ b/source/reference/aggregation.txt
@@ -0,0 +1,118 @@
+.. _aggregation:
+
+***********
+Aggregation
+***********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: singlecol
+
+:manual:`Aggregation framework`
+operations process data records and return
+computed results. Aggregation operations group values from
+multiple documents together, and can perform a variety of
+operations on the grouped data to return a single result.
+
+The Aggregation Pipeline
+````````````````````````
+
+The aggregation pipeline is a framework for data aggregation
+modeled on the concept of data processing pipelines. Documents
+enter a multi-stage pipeline that transforms the documents into
+aggregated results.
+
+For a full explanation and a complete list of pipeline stages
+and operators, see the
+:manual:`manual`.
+
+The following example uses the aggregation pipeline on the
+``restaurants`` sample dataset to find
+a list of the total number of 5-star restaurants, grouped by restaurant
+category.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ coll = client['restaurants']
+ aggregation = coll.aggregate([
+ { '$match'=> { 'stars'=> 5 } },
+ { '$unwind'=> '$categories'},
+ { '$group'=> { '_id'=> '$categories', 'fiveStars'=> { '$sum'=> 1 } } }
+ ])
+
+ aggregation.each do |doc|
+ #=> Yields a BSON::Document.
+ end
+
+Inside the ``aggregate`` method, the first pipeline stage filters out
+all documents except those with ``5`` in the ``stars`` field. The
+second stage unwinds the ``categories`` field, which is an array, and
+treats each item in the array as a separate document. The third stage
+groups the documents by category and adds up the number of matching
+5-star results.
+
+Aggregation pipeline stages have a
+:manual:`maximum memory use limit`.
+To handle large datasets, set the ``allowDiskUse`` option to true to enable
+writing data to temporary files.
+
+- You can call the ``allow_disk_use`` method the ``aggregation``
+ object to get a new object with the option set:
+
+.. code-block:: ruby
+
+ aggregation = coll.aggregate([ ])
+ aggregation_with_disk_use = aggregation.allow_disk_use(true)
+
+- Or you can pass an option to the ``aggregate`` method:
+
+.. code-block:: ruby
+
+ aggregation = coll.aggregate([ ],
+ :allow_disk_use => true)
+
+Single Purpose Aggregation Operations
+`````````````````````````````````````
+
+MongoDB provides helper methods for some aggregation functions,
+including :manual:`count`
+and :manual:`distinct`.
+
+Count
+~~~~~
+
+The following example demonstrates how to use the ``count`` method to
+find the total number of documents which have the exact array
+``[ 'Chinese', 'Seafood' ]`` in the ``categories`` field.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ coll = client['restaurants']
+ aggregation = coll.count({ 'categories': [ 'Chinese', 'Seafood' ] })
+
+ count = coll.count({ 'categories' => [ 'Chinese', 'Seafood' ] })
+
+Distinct
+~~~~~~~~
+
+The ``distinct`` helper method eliminates results which contain
+values and returns one record for each unique value.
+
+The following example returns a list of unique values for the
+``categories`` field in the ``restaurants`` collection:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ coll = client['restaurants']
+ aggregation = coll.distinct('categories')
+
+ aggregation.each do |doc|
+ #=> Yields a BSON::Document.
+ end
diff --git a/source/reference/authentication.txt b/source/reference/authentication.txt
new file mode 100644
index 000000000..0c30c7056
--- /dev/null
+++ b/source/reference/authentication.txt
@@ -0,0 +1,534 @@
+**************
+Authentication
+**************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: singlecol
+
+MongoDB supports a variety of
+:manual:`authentication mechanisms `.
+
+For more information about configuring your MongoDB server for each of
+these authentication mechanisms see MongoDB's
+:manual:`online documentation `.
+
+For more information about users and the Ruby driver's helpers for
+user management, see the :ref:`User Management tutorial`.
+
+
+Providing credentials
+=====================
+
+If authentication is enabled, provide credentials when creating a new
+client:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ user: 'test',
+ password: '123',
+ database: 'mydb' )
+
+ # If using a URI:
+ client = Mongo::Client.new("mongodb://test:123@127.0.0.1:27017/mydb")
+
+Authentication credentials can be changed on a client instance to obtain
+a new client using the ``Client#with`` method:
+
+.. code-block:: ruby
+
+ authenticated_client = client.with( user: 'another-user',
+ password: '123' )
+
+It is also possible to change the client's database and credentials in
+one step:
+
+.. code-block:: ruby
+
+ authenticated_music_client = client.with( database: 'music',
+ user:'test',
+ password:'123' )
+
+
+.. _auth-source:
+
+Auth Source
+===========
+
+A user's auth source is the database where that user's authentication
+credentials are stored.
+
+The user's auth source may be specified whenever the credentials are specified:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ database: 'mydb',
+ user: 'test',
+ password: '123',
+ auth_source: 'admin' )
+
+ # If using a URI:
+ client = Mongo::Client.new("mongodb://test:123@127.0.0.1:27017/mydb?authSource=admin")
+
+If no auth source is specified, then a default will be assumed by the client.
+The default auth source depends on the authentication mechanism that is being
+used to connect.
+
+For the ``MONGODB-CR``, ``SCRAM-SHA-1``, and ``SCRAM-SHA-256`` authentication
+mechanisms, the default auth source is the database to which the client is
+connecting; if no database is specified, ``admin`` database is the default
+database and hence the default auth source. For the ``PLAIN`` mechanism (LDAP),
+the default auth source is the database to which the client is connecting;
+if no database is specified, the ``$external`` database is used as the
+auth source. For the ``AWS``, ``GSSAPI`` and ``MONGODB_X509`` mechanisms, the
+auth source is always ``$external``.
+
+When a client is constructed using an SRV URI, the driver will look for URI
+options in a TXT DNS record that corresponds to the SRV record. Thus, for
+example, MongoDB Atlas generally uses the ``admin`` database as its auth
+source, but this is not specified in SRV URIs because the database is given
+as a URI option on the TXT records.
+
+Note that when using SRV URIs, the SRV query and the TXT query are performed
+separately. On systems where DNS resolution is not 100% reliable, the
+failure to look up TXT records can cause authentication errors, as the driver
+may end up using an incorrect auth source. If reliable DNS resolution cannot
+be guaranteed, the auth source can be specified explicitly in SRV URIs as
+a URI option:
+
+.. code-block:: ruby
+
+ Mongo::Client.new("mongodb+srv://username:myRealPassword@cluster0.mongodb.net/test?w=majority&authSource=admin")
+
+.. note::
+
+ When changing the database using the ``with`` method, the auth source is
+ determined in the new ``Client`` instance using the full set of options
+ that applies to it. For example, if the original client had an auth source
+ specified, this auth source would take precedence over the database
+ given in the ``with`` call. If the original client did not have an auth
+ source specified, the new database would be the new auth source, subject
+ to the rules of the authentication mechanism used.
+
+
+Authentication Mechanisms
+=========================
+
+MongoDB supports several authentication mechanisms, as detailed in this section.
+Authentication mechanism to use can be explicitly specified when a Client is
+created; if authentication mechanism is not provided by the application, it is
+selected as follows:
+
+- For MongoDB 4.0 and higher, the client performs SCRAM mechanism negotiation
+ with the server. If the user specified in client configuration permits
+ authentication with SCRAM-SHA-256, then SCRAM-SHA-256 is used for
+ authentication. Otherwise SCRAM-SHA-1 is used.
+- For MongoDB 3.0 through 3.6, SCRAM-SHA-1 is used.
+- For MongoDB 2.6, MONGODB-CR is used.
+
+Note that:
+
+- X.509, AWS, LDAP and Kerberos authentication mechanisms must always be
+ explicitly requested.
+- If the MongoDB server that the client is connecting to supports SCRAM,
+ the client will attempt to authenticate using SCRAM if no authentication
+ mechanism is explicitly specified. To authenticate to MongoDB 3.0 and
+ higher servers using MONGODB-CR, the MONGODB-CR mechanism must be
+ explicitly requested.
+
+.. _scram:
+
+SCRAM
+`````
+
+:manual:`SCRAM authentication ` is the default
+authentication mechanism for MongoDB. There are two SCRAM mechanisms in
+MongoDB: SCRAM-SHA-1 (available as of MongoDB 3.0) and SCRAM-SHA-256
+(available as of MongoDB 4.0). If an authentication mechanism is not
+specified but user credentials are, the driver will attempt to use SCRAM
+authentication on server 3.0 or newer and will negotiate the mechanism
+to use based on the server version and the mechanisms defined for a
+particular user (it is possible to configure a user in the server to only
+allow SCRAM-SHA-1 mechanism, only SCRAM-SHA-256 mechanism or both).
+
+To explicitly specify SCRAM-SHA-1 as the authentication mechanism, use the
+``auth_mech: :scram`` Ruby client option or the ``SCRAM-SHA-1`` as the value
+for the ``authMechanism`` URI option, as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ database: 'mydb',
+ user: 'test',
+ password: '123',
+ auth_mech: :scram )
+
+ client = Mongo::Client.new("mongodb://test:123@127.0.0.1:27017/mydb?authMechanism=SCRAM-SHA-1")
+
+To explicitly specify SCRAM-SHA-256 as the authentication mechanism, use the
+``auth_mech: :scram256`` Ruby client option or the ``SCRAM-SHA-256`` as the
+value for the ``authMechanism`` URI option, as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ database: 'mydb',
+ user: 'test',
+ password: '123',
+ auth_mech: :scram256 )
+
+ client = Mongo::Client.new("mongodb://test:123@127.0.0.1:27017/mydb?authMechanism=SCRAM-SHA-256")
+
+
+.. _x.509:
+
+Client Certificate (X.509)
+``````````````````````````
+
+The driver presents an X.509 certificate during TLS negotiation.
+The MONGODB-X509 authentication mechanism authenticates a username
+retrieved from the distinguished subject name of this certificate.
+
+.. note::
+
+ Since the username is retrieved from the certificate, a username does not
+ need to be specified. If a username is specified, it will be sent to the
+ server verbatim. If a password is provided, an error will be raised.
+
+This authentication method requires the use of TLS connections with
+certificate validation.
+
+To authenticate the client, you will need a valid TLS certificate
+and private encryption key. These can be stored in separate files,
+or together in one file (in the PEM format). Even if the certificate
+and private key are stored in the same file, you must specify the path to
+that file by passing both the ``ssl_cert`` and ``ssl_key`` options
+to the client.
+
+For more information about configuring X.509 authentication in MongoDB,
+see the :manual:`X.509 tutorial in the MongoDB Manual
+`.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ auth_mech: :mongodb_x509,
+ ssl: true,
+ ssl_cert: '/path/to/client.pem',
+ ssl_key: '/path/to/client.pem',
+ ssl_ca_cert: '/path/to/ca.pem' )
+
+
+.. _aws-auth:
+
+AWS
+```
+
+*Requires MongoDB Enterprise Edition and server version 4.4 or later.*
+
+The AWS authentication mechanism uses AWS `Identity and Access Management (IAM)
+`_
+and AWS `Security Token Service (STS)
+`_
+to prove the client's identity to a MongoDB server. Briefly, AWS authentication
+works as follows:
+
+1. The client uses AWS IAM credentials to create a signature that is sent to
+ the MongoDB server.
+2. The server sends a request to AWS STS using the client's signature.
+3. A successful STS request returns the username (technically, the ARN of
+ the IAM user or role) corresponding to the credentials that the client used.
+ The IAM user ARN is used by the server to look up a defined user, and the
+ client is considered to have authenticated as this user.
+
+.. note::
+
+ Unlike other authentication mechanisms, the username that the application
+ provides when creating a client and the username of the server user are
+ different: the username on the client is the AWS access key ID, but the
+ username on the server is the ARN of the IAM user or role corresponding
+ to the access key ID.
+
+AWS credentials are comprised of:
+
+- The access key ID.
+- The secret access key.
+- The optional session token.
+
+Authentication with `AWS IAM credentials
+`_,
+uses the access key ID and the secret access key. Authentication with
+`temporary AWS IAM credentials
+`_
+uses all three components.
+
+.. note::
+
+ The driver never sends the secret access key or the session token over
+ the network.
+
+Temporary credentials are used with:
+
+- STS `Assume Role `_
+ requests.
+- `EC2 instance roles `_.
+- `ECS task roles `_.
+- `AWS Lambda environment `_.
+- `IAM roles for service accounts `_.
+
+The Ruby driver allows providing both regular and temporary credentials
+explicitly as Ruby options or URI options. If credentials are not explicitly
+provided, the driver will attempt to retrieve them from environment variables
+described below and from EC2 instance and ECS task metadata endpoints.
+
+Providing Credentials Explicitly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Regular (non-temporary) IAM credentials can be provided as Ruby options,
+as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['mongodb.example.com'],
+ auth_mech: :aws,
+ user: '',
+ password: '',
+ database: 'mydb',
+ )
+
+They can also be provided via a URI:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(
+ 'mongodb://:@mongodb.example.com/mydb?authMechanism=MONGODB-AWS')
+
+.. note::
+
+ When credentials are provided via a URI, they must be percent-escaped.
+
+To provide temporary credentials, specify the session token in the
+authentication mechanism properties as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['mongodb.example.com'],
+ auth_mech: :aws,
+ user: '',
+ password: '',
+ auth_mech_properties: {
+ aws_session_token: '',
+ },
+ database: 'mydb',
+ )
+
+The temporary credentials can also be provided via a URI:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(
+ 'mongodb://:@mongodb.example.com/mydb?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:')
+
+.. _auto-retrieve-aws-credentials:
+
+Automatically Retrieving Credentials
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The client can retrieve credentials from the environment or from EC2 or ECS
+metadata endpoints. To retrieve credentials automatically, specify the
+AWS authentication mechanism but do not specify a username nor a password:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['mongodb.example.com'],
+ auth_mech: :aws,
+ database: 'mydb',
+ )
+
+ # Using a URI:
+ client = Mongo::Client.new(
+ 'mongodb://mongodb.example.com/mydb?authMechanism=MONGODB-AWS')
+
+The driver will try to obtain credentials from the following sources, in
+the specified order:
+
+- ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY`` and ``AWS_SESSION_TOKEN``
+ environment variables. These environment variables are recognized by
+ a variety of AWS-related libraries and tools such as the official
+ AWS Ruby SDK and the AWS CLI. They are also defined when running in an
+ AWS Lambda environment.
+- The AWS STS `AssumeRoleWithWebIdentity action
+ `_.
+ This returns credentials associated with the service account token. This mechanism
+ requires the following environment variables to be set:
+
+ - ``AWS_WEB_IDENTITY_TOKEN_FILE`` - path to a file containing the service
+ account token.
+ - ``AWS_ROLE_ARN`` - the Amazon Resource Name (ARN) of the role that the
+ caller is assuming.
+ - ``AWS_ROLE_SESSION_NAME`` (optional) - An identifier for the assumed role
+ session. If omitted, a random name will be generated by the driver.
+
+- The AWS `ECS task metadata endpoint
+ `_.
+ This returns credentials associated with the ECS task role assigned to
+ the container.
+- The AWS `EC2 instance metadata endpoint
+ `_.
+ This returns credentials associated with the EC2 instance role assigned to
+ the instance.
+
+.. note::
+
+ A credentials source that provides any credentials must provide a complete
+ set of credentials. For example, the driver will raise an error if only
+ one of ``AWS_ACCESS_KEY_ID`` or ``AWS_SECRET_ACCESS_KEY`` environment
+ variables is populated but not the other.
+
+.. note::
+
+ If an application is running in an ECS container on an EC2 instance and
+ `the container is allowed access to the instance metadata
+ `_,
+ the driver will attempt to retrieve credentials for the AWS authentication
+ mechanism from the EC2 instance metadata endpoint, thus potentially
+ authenticating as the IAM role assigned to the EC2 instance, if it was not
+ able to retrieve ECS task role credentials from the ECS task endpoint.
+
+
+.. _plain:
+
+LDAP (SASL PLAIN)
+`````````````````
+
+*Requires MongoDB Enterprise Edition.*
+
+MongoDB Enterprise Edition supports the LDAP authentication mechanism
+which allows you to delegate authentication using a Lightweight Directory
+Access Protocol `LDAP `_ server.
+
+.. warning::
+
+ When using LDAP, passwords are sent to the server in plain text. For this
+ reason, we strongly recommend enabling TLS when using LDAP as your
+ authentication mechanism.
+
+For more information about configuring LDAP authentication in
+MongoDB, see the :manual:`SASL/LDAP tutorial in the MongoDB Manual
+`.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ auth_mech: :plain,
+ ssl: true,
+ ssl_verify: true,
+ ssl_cert: '/path/to/client.pem',
+ ssl_ca_cert: '/path/to/ca.pem' )
+
+
+.. _kerberos:
+
+Kerberos (GSSAPI)
+`````````````````
+
+*Requires MongoDB Enterprise Edition.*
+
+To configure the MongoDB server to use Kerberos, please refer to the
+:manual:`server Kerberos documentation
+`.
+
+To use the Kerberos authentication mechanism with the Ruby MongoDB driver,
+an additional library implementing the Kerberos authenticator -
+`mongo_kerberos `_ - must be
+installed and loaded. To do so, add to your ``Gemfile``:
+
+.. code-block:: ruby
+
+ gem 'mongo', '~> 2'
+ gem 'mongo_kerberos', '~> 2'
+
+... and add to your application code:
+
+.. code-block:: ruby
+
+ require 'mongo'
+ require 'mongo_kerberos'
+
+If using Kerberos authentication with **MRI**, the password is not specified
+in driver configuration and it is not sent to the MongoDB server by the driver.
+Instead a Kerberos session must be established externally to the driver
+and this session is used by the driver to prove the user's identity to
+the server. Establishing this session requires that the host system is
+configured for Kerberos authentication; refer to the `Kerberos documentation
+`_
+or your operating system documentation for details. Use the `kinit utility
+`_
+to establish a Kerberos session.
+
+If using Kerberos authentication with **JRuby**, the Kerberos session may
+be estabished externally to the driver using the process described above
+for MRI; alternatively, the password may be provided directly to the driver
+via client configuration, or the path to a keytab file may be provided via
+configuration stored in the ``java.security.auth.login.config`` system property.
+Additionally, the Java runtime environment must be configured for Kerberos;
+please refer to the `MongoDB Java Driver Kerberos documentation
+`_
+for more information.
+
+.. note::
+
+ As per the server Kerberos documentation, the FQDN of the host
+ running MongoDB must be specified when using Kerberos authentication.
+
+.. note::
+
+ If using MongoDB URIs, be sure to percent-escape special characters like
+ ``/`` and ``@`` when they appear in the username.
+
+.. code-block:: ruby
+
+ # Authenticate as appuser@MYREALM:
+ client = Mongo::Client.new("mongodb://appuser%40MYREALM@myserver.mycompany.com:27017/mydb?authMechanism=GSSAPI")
+
+ # Authenticate as myapp/appuser@MYREALM:
+ client = Mongo::Client.new("mongodb://myapp%2Fappuser%40MYREALM@myserver.mycompany.com:27017/mydb?authMechanism=GSSAPI")
+
+ # Authenticate using Ruby options:
+ client = Mongo::Client.new(['myserver.mycompany.com:27017'],
+ auth_mech: :gssapi,
+ user: 'myapp/appuser@MYREALM')
+
+
+MONGODB-CR
+``````````
+
+*Deprecated:* MONGODB-CR mechanism is deprecated as of MongoDB 3.6 and
+removed as of MongoDB 4.0. Please use `SCRAM authentication <#scram>`_ instead.
+
+MONGODB-CR was the default authentication mechanism for MongoDB through
+version 2.6.
+
+The mechanism can be explicitly set with the credentials:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ database: 'mydb',
+ user: 'test',
+ password: '123',
+ auth_mech: :mongodb_cr )
+
+.. note::
+
+ If the MongoDB server that the client is connecting to supports SCRAM,
+ the client will attempt to authenticate using SCRAM if no authentication
+ mechanism is explicitly specified. To authenticate to MongoDB 3.0 and
+ higher servers using MONGODB-CR, the MONGODB-CR mechanism must be
+ explicitly requested.
diff --git a/source/reference/bulk-operations.txt b/source/reference/bulk-operations.txt
new file mode 100644
index 000000000..7f4d59b12
--- /dev/null
+++ b/source/reference/bulk-operations.txt
@@ -0,0 +1,169 @@
+***********
+Bulk Writes
+***********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+.. _bulk-operations:
+
+The bulk write API sends several write operations to the server in a single
+command. Use the bulk write API to reduce the number of network round-trips
+when performing several writes at a time. For example, to efficiently perform
+multiple updates, one might do:
+
+.. code-block:: ruby
+
+ collection = client['colors']
+ collection.bulk_write([
+ {
+ update_one: {
+ filter: {name: 'yellow'},
+ update: {'$set' => {hex: 'ffff00'}},
+ },
+ },
+ {
+ update_one: {
+ filter: {name: 'purple'},
+ update: {'$set' => {hex: '800080'}},
+ },
+ },
+ ], ordered: true, write_concern: {w: :majority})
+
+The following example shows how to execute different types of operations
+in the same request:
+
+.. code-block:: ruby
+
+ collection.bulk_write([
+ { insert_one: { x: 1 } },
+ { update_one: {
+ filter: { x: 1 },
+ update: {'$set' => { x: 2 } },
+ } },
+ { replace_one: {
+ filter: { x: 2 },
+ replacement: { x: 3 },
+ } },
+ ], :ordered => true)
+
+The first argument to ``bulk_write`` is the list of operations to perform.
+Each operation must be specified as a hash with exactly one key which is
+the operation name and the operation specification as the corresponding
+value. The supported operations are detailed below. The ``bulk_write`` method
+also accepts the following options:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``bypass_document_validation``
+ - ``true`` or ``false``. Whether to bypass document validation.
+ * - ``ordered``
+ - If the ``ordered`` option is set to ``true`` (which is the default),
+ the operations are applied in order and if any operation fails, subsequent
+ operations are not attempted. If the ``ordered`` option is set to ``false``,
+ all specified operations are attempted.
+ * - ``write_concern``
+ - The write concern for the operation, specified as a hash.
+
+Valid bulk write operations are the following:
+
+
+insert_one
+==========
+
+.. code-block:: ruby
+
+ { insert_one: { x: 1 } }
+
+.. note::
+
+ There is no ``insert_many`` bulk operation. To insert multiple documents,
+ specify multiple ``insert_one`` operations.
+
+
+update_one
+==========
+
+.. code-block:: ruby
+
+ { update_one: {
+ filter: { x: 1 },
+ update: { '$set' => { x: 2 } },
+ # upsert is optional and defaults to false
+ upsert: true,
+ } }
+
+
+update_many
+===========
+
+.. code-block:: ruby
+
+ { update_many: {
+ filter: { x: 1 },
+ update: { '$set' => { x: 2 } },
+ # upsert is optional and defaults to false
+ :upsert => true,
+ } }
+
+
+replace_one
+===========
+
+.. code-block:: ruby
+
+ { replace_one: {
+ filter: { x: 1 },
+ replacement: { x: 2 },
+ # upsert is optional and defaults to false
+ upsert: true,
+ } }
+
+.. note::
+
+ The ``:replace_one`` operation requires that the replacement value is a
+ document. ``:replace_one`` does not recognize MongoDB update operators in
+ the replacement value. In a future release the driver is expected to
+ prohibit using keys beginning with ``$`` in the replacement document.
+
+
+delete_one
+==========
+
+.. code-block:: ruby
+
+ { delete_one: {
+ filter: { x: 1 },
+ } }
+
+
+delete_many
+===========
+
+.. code-block:: ruby
+
+ { delete_many: {
+ filter: { x: 1 },
+ } }
+
+
+Bulk Write Splitting
+====================
+
+The driver allows the application to submit arbitrarily large bulk write
+requests. However, since MongoDB server limits the size of command documents
+(currently this limit is 48 MiB), bulk writes that exceed this limit will be
+split into multiple requests.
+
+When :ref:`client-side encryption ` is used, the
+threshold used for bulk write splitting is reduced to allow for overhead in
+the ciphertext.
diff --git a/source/reference/change-streams.txt b/source/reference/change-streams.txt
new file mode 100644
index 000000000..e8f896a8c
--- /dev/null
+++ b/source/reference/change-streams.txt
@@ -0,0 +1,213 @@
+.. _change-streams:
+
+**************
+Change Streams
+**************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+As of version 3.6 of the MongoDB server, a new ``$changeStream`` pipeline stage
+is supported in the aggregation framework. Specifying this stage first in an
+aggregation pipeline allows users to request that notifications are sent for all
+changes to a particular collection. As of MongoDB 4.0, change streams are
+supported on databases and clusters in addition to collections.
+
+The Ruby driver provides an API for
+receiving notifications for changes to a particular collection, database
+or cluster using this
+new pipeline stage. Although you can create a change stream using the pipeline
+operator and aggregation framework directly, it is recommended to use the
+driver API described below as the driver resumes the change stream one time
+if there is a timeout, a network error, a server error indicating that a
+failover is taking place or another type of a resumable error.
+
+Change streams on the server require a ``"majority"`` read concern or no
+read concern.
+
+Change streams do not work properly with JRuby because of the issue documented here_.
+Namely, JRuby eagerly evaluates ``#next`` on an Enumerator in a background
+green thread, therefore calling ``#next`` on the change stream will cause
+getMores to be called in a loop in the background.
+
+.. _here: https://github.com/jruby/jruby/issues/4212
+
+Watching for Changes on a Collection
+====================================
+
+A collection change stream is created by calling the ``#watch`` method on a
+collection:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ collection = client[:test]
+ stream = collection.watch
+ collection.insert_one(a: 1)
+ doc = stream.to_enum.next
+ process(doc)
+
+
+You can also receive the notifications as they become available:
+
+.. code-block:: ruby
+
+ stream = collection.watch
+ enum = stream.to_enum
+ while doc = enum.next
+ process(doc)
+ end
+
+The ``next`` method blocks and polls the cluster until a change is available.
+Use the ``try_next`` method to iterate a change stream without blocking; this
+method will wait up to max_await_time_ms milliseconds for changes from the server,
+and if no changes are received it will return nil. If there is a non-resumable
+error, both ``next`` and ``try_next`` will raise an exception.
+See Resuming a Change Stream section below for an example that reads
+changes from a collection indefinitely.
+
+The change stream can take filters in the aggregation framework pipeline
+operator format:
+
+.. code-block:: ruby
+
+ stream = collection.watch([{'$match' => { 'operationType' => {'$in' => ['insert', 'replace'] } } },
+ {'$match' => { 'fullDocument.n' => { '$gte' => 1 } } }
+ ])
+ enum = stream.to_enum
+ while doc = enum.next
+ process(doc)
+ end
+
+Watching for Changes on a Database
+==================================
+
+A database change stream notifies on changes on any collection within the
+database as well as database-wide events, such as the database being dropped.
+
+A database change stream is created by calling the ``#watch`` method on a
+database object:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ database = client.database
+ stream = database.watch
+ client[:test].insert_one(a: 1)
+ doc = stream.to_enum.next
+ process(doc)
+
+
+Watching for Changes on a Cluster
+=================================
+
+A cluster change stream notifies on changes on any collection, any database
+within the cluster as well as cluster-wide events.
+
+A cluster change stream is created by calling the ``#watch`` method on a
+client object (not the cluster object):
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ stream = client.watch
+ client[:test].insert_one(a: 1)
+ doc = stream.to_enum.next
+ process(doc)
+
+
+Closing a Change Stream
+=======================
+
+You can close a change stream by calling its ``#close`` method:
+
+.. code-block:: ruby
+
+ stream.close
+
+
+Resuming a Change Stream
+========================
+
+A change stream consists of two types of operations: the initial aggregation
+and ``getMore`` requests to receive the next batch of changes.
+
+The driver will automatically retry each ``getMore`` operation once on
+network errors and when the server returns an error indicating it changed
+state (for example, it is no longer the primary). The driver does not retry
+the initial aggregation.
+
+In practical terms this means that, for example:
+
+- Calling ``collection.watch`` will fail if the cluster does not have
+ enough available nodes to satisfy the ``"majority"`` read preference.
+- Once ``collection.watch`` successfully returns, if the cluster subsequently
+ experiences an election or loses a node, but heals quickly enough,
+ change stream reads via ``next`` or ``each`` methods will continue
+ transparently to the application.
+
+To indefinitely and reliably watch for changes without losing any changes or
+processing a change more than once, the application must track the resume
+token for the change stream and restart the change stream when it experiences
+extended error conditions that cause the driver's automatic resume to also
+fail. The following code snippet shows an example of iterating a change stream
+indefinitely, retrieving the resume token using the ``resume_token`` change
+stream method and restarting the change stream using the ``:resume_after``
+option on all MongoDB or network errors:
+
+.. code-block:: ruby
+
+ token = nil
+ loop do
+ begin
+ stream = collection.watch([], resume_after: token)
+ enum = stream.to_enum
+ while doc = enum.next
+ process(doc)
+ token = stream.resume_token
+ end
+ rescue Mongo::Error
+ sleep 1
+ end
+ end
+
+The above iteration is blocking at the ``enum.next`` call, and does not
+permit resuming processing in the event the Ruby process running this code
+is terminated. The driver also provides the ``try_next`` method which returns
+``nil`` (after a small waiting period) instead of blocking indefinitely when
+there are no changes in the change stream. Using the ``try_next`` method,
+the resume token may be persisted after each ``getMore`` request, even when
+a particular request does not return any changes, such that the resume token
+remains at the top of the oplog and the application has an opportunity to
+persist it should the process handling changes terminates:
+
+.. code-block:: ruby
+
+ token = nil
+ loop do
+ begin
+ stream = collection.watch([], resume_after: token)
+ enum = stream.to_enum
+ doc = enum.try_next
+ if doc
+ process(doc)
+ end
+ token = stream.resume_token
+ # Persist +token+ to support resuming processing upon process restart
+ rescue Mongo::Error
+ sleep 1
+ end
+ end
+
+Note that the resume token should be retrieved from the change stream after
+every ``try_next`` call, even if the call returned no document.
+
+The resume token is also provided in the ``_id`` field of each change stream
+document. Reading the ``_id`` field is not recommended because it may be
+projected out by the application, and because using only the ``_id`` field
+would not advance the resume token when a ``getMore`` returns no documents.
diff --git a/source/reference/collations.txt b/source/reference/collations.txt
new file mode 100644
index 000000000..f58b63f0c
--- /dev/null
+++ b/source/reference/collations.txt
@@ -0,0 +1,311 @@
+**********
+Collations
+**********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+Overview
+========
+
+.. versionadded:: 3.4
+
+Collations are sets of rules for how to compare strings, typically in a
+particular natural language.
+
+For example, in Canadian French, the last accent in a given word
+determines the sorting order.
+
+Consider the following French words:
+
+.. code-block:: none
+
+ cote < coté < côte < côté
+
+The sort order using the Canadian French collation would result in
+the following:
+
+.. code-block:: none
+
+ cote < côte < coté < côté
+
+If collation is unspecified, MongoDB uses the simple binary comparison for
+strings. As such, the sort order of the words would be:
+
+.. code-block:: none
+
+ cote < coté < côte < côté
+
+Usage
+=====
+
+You can specify a default collation for collections and indexes when
+they are created, or specify a collation for CRUD operations and
+aggregations. For operations that support collation, MongoDB uses the
+collection's default collation unless the operation specifies a
+different collation.
+
+Collation Parameters
+~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: ruby
+
+ 'collation' => {
+ 'locale' => ,
+ 'caseLevel' => ,
+ 'caseFirst' => ,
+ 'strength' => ,
+ 'numericOrdering' => ,
+ 'alternate' => ,
+ 'maxVariable' => ,
+ 'normalization' => ,
+ 'backwards' =>
+ }
+
+The only required parameter is ``locale``, which the server parses as
+an `ICU format locale ID `_.
+For example, set ``locale`` to ``en_US`` to represent US English
+or ``fr_CA`` to represent Canadian French.
+
+For a complete description of the available parameters, see the
+:manual:`MongoDB manual entry`.
+
+.. _collation-on-collection:
+
+Assign a Default Collation to a Collection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following example creates a new collection
+called ``contacts`` on the ``test`` database and assigns a default
+collation with the ``fr_CA`` locale. Specifying a collation when you
+create the collection ensures that all operations involving a query
+that are run against the
+``contacts`` collection use the ``fr_CA`` collation, unless the query
+specifies another collation. Any indexes on the new collection also
+inherit the default collation, unless the creation command specifies
+another collation.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ client[:contacts, { "collation" => { "locale" => "fr_CA" } } ].create
+
+.. _collation-on-index:
+
+Assign a Collation to an Index
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To specify a collation for an index, use the ``collation``
+option when you create the index.
+
+The following example creates an index on the ``name``
+field of the ``address_book`` collection, with the ``unique`` parameter
+enabled and a default collation with ``locale`` set to ``en_US``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ client[:address_book].indexes.create_one( { "first_name" => 1 },
+ "unique" => true,
+ "collation" => { "locale" => "en_US" }
+ )
+
+To use this index, make sure your queries also specify the same
+collation. The following query uses the above index:
+
+.. code-block:: ruby
+
+ client[:address_book].find({"first_name" : "Adam" },
+ "collation" => { "locale" => "en_US" })
+
+The following queries do **NOT** use the index. The first query uses no
+collation, and the second uses a collation with a different ``strength``
+value than the collation on the index.
+
+.. code-block:: ruby
+
+ client[:address_book].find({"first_name" : "Adam" })
+
+ client[:address_book].find({"first_name" : "Adam" },
+ "collation" => { "locale" => "en_US", "strength" => 2 })
+
+Operations that Support Collation
+=================================
+
+All reading, updating, and deleting methods support collation. Some
+examples are listed below.
+
+``find()`` and ``sort()``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Individual queries can specify a collation to use when matching
+and sorting results. The following query and sort operation uses
+a German collation with the ``locale`` parameter set to ``de``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ docs = client[:contacts].find({ "city" => "New York" },
+ { "collation" => { "locale" => "de" } }).sort( "name" => 1 )
+
+``find_one_and_update()``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A collection called ``names`` contains the following documents:
+
+.. code-block:: javascript
+
+ { "_id" : 1, "first_name" : "Hans" }
+ { "_id" : 2, "first_name" : "Gunter" }
+ { "_id" : 3, "first_name" : "Günter" }
+ { "_id" : 4, "first_name" : "Jürgen" }
+
+The following ``find_one_and_update`` operation on the collection
+does not specify a collation.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ doc = client[:names].find_one_and_update( {"first_name" => { "$lt" => "Gunter" }},
+ { "$set" => { "verified" => true } })
+
+Because ``Gunter`` is lexically first in the collection,
+the above operation returns no results and updates no documents.
+
+Consider the same ``find_one_and_update`` operation but with the
+collation specified. The locale is set to ``de@collation=phonebook``.
+
+.. note::
+
+ Some locales have a ``collation=phonebook`` option available for
+ use with languages which sort proper nouns differently from other
+ words. According to the ``de@collation=phonebook`` collation,
+ characters with umlauts come before the same characters without
+ umlauts.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ doc = client[:names].find_one_and_update( { "first_name" => { "$lt" => "Gunter" } },
+ { "$set" => { "verified" => true } }, { "collation" => { "locale" => "de@collation=phonebook" },
+ :return_document => :after } )
+
+The operation returns the following updated document:
+
+.. code-block:: javascript
+
+ { "_id" => 3, "first_name" => "Günter", "verified" => true }
+
+``find_one_and_delete()``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set the ``numericOrdering`` collation parameter to ``true``
+to compare numeric string by their numeric values.
+
+The collection ``numbers`` contains the following documents:
+
+.. code-block:: javascript
+
+ { "_id" : 1, "a" : "16" }
+ { "_id" : 2, "a" : "84" }
+ { "_id" : 3, "a" : "179" }
+
+The following example matches the first document in which field ``a``
+has a numeric value greater than 100 and deletes it.
+
+.. code-block:: ruby
+
+ docs = numbers.find_one_and_delete({ "a" => { "$gt" => "100" } },
+ { "collation" => { "locale" => "en", "numericOrdering" => true } })
+
+After the above operation, the following documents remain in the
+collection:
+
+.. code-block:: javascript
+
+ { "_id" : 1, "a" : "16" }
+ { "_id" : 2, "a" : "84" }
+
+If you perform the same operation without collation, the server deletes
+the first document it finds in which the lexical value of ``a`` is
+greater than ``"100"``.
+
+.. code-block:: ruby
+
+ numbers = client[:numbers]
+ docs = numbers.find_one_and_delete({ "a" => { "$gt" => "100" } })
+
+After the above operation the document in which ``a`` was equal to
+``"16"`` has been deleted, and the following documents remain in the
+collection:
+
+.. code-block:: javascript
+
+ { "_id" : 2, "a" : "84" }
+ { "_id" : 3, "a" : "179" }
+
+``delete_many()``
+~~~~~~~~~~~~~~~~~
+
+You can use collations with all the various bulk operations which
+exist in the Ruby driver.
+
+The collection ``recipes`` contains the following documents:
+
+.. code-block:: javascript
+
+ { "_id" : 1, "dish" : "veggie empanadas", "cuisine" : "Spanish" }
+ { "_id" : 2, "dish" : "beef bourgignon", "cuisine" : "French" }
+ { "_id" : 3, "dish" : "chicken molé", "cuisine" : "Mexican" }
+ { "_id" : 4, "dish" : "chicken paillard", "cuisine" : "french" }
+ { "_id" : 5, "dish" : "pozole verde", "cuisine" : "Mexican" }
+
+Setting the ``strength`` parameter of the collation document to ``1``
+or ``2`` causes the server to disregard case in the query filter. The
+following example uses a case-insensitive query filter
+to delete all records in which the ``cuisine`` field matches
+``French``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ "127.0.0.1:27017" ], :database => "test")
+ recipes = client[:recipes]
+ docs = recipes.delete_many({ "cuisine" => "French" },
+ "collation" => { "locale" => "en_US", "strength" => 1 })
+
+After the above operation runs, the documents with ``_id`` values of
+``2`` and ``4`` are deleted from the collection.
+
+Aggregation
+~~~~~~~~~~~
+
+To use collation with an aggregation operation, specify a collation in
+the aggregation options.
+
+The following aggregation example uses a collection called ``names``
+and groups the ``first_name`` field together, counts the total
+number of results in each group, and sorts the
+results by German phonebook order.
+
+.. code-block:: ruby
+
+ aggregation = names.aggregate(
+ [
+ {
+ "$group" => { "_id" => "$first_name", "name_count" => { "$sum" => 1 } }
+ },
+ {
+ "$sort" => { "_id" => 1 }
+ },
+
+ ], { "collation" => { "locale" => "de@collation=phonebook" } }
+ )
+
+ aggregation.each do |doc|
+ #=> Yields a BSON::Document.
+ end
diff --git a/source/reference/collection-tasks.txt b/source/reference/collection-tasks.txt
new file mode 100644
index 000000000..e5437c81c
--- /dev/null
+++ b/source/reference/collection-tasks.txt
@@ -0,0 +1,334 @@
+***********
+Collections
+***********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+MongoDB stores documents in collections. If a collection does not
+exist, MongoDB creates the collection when you first insert a
+document in that collection.
+
+You can also explicitly create a collection with various options,
+such as setting the maximum size or the documentation validation rules.
+
+Time Series Collections
+```````````````````````
+
+Time series collections were added in MongoDB 5.0. You can read the documentation
+`here `_.
+
+Time series collections efficiently store sequences of measurements over a
+period of time. Time series data is any data that is collected over time and is
+uniquely identified by one or more unchanging parameters. The unchanging
+parameters that identify your time series data is generally your data source's
+metadata.
+
+Creating a Time Series Collection
+---------------------------------
+In order to create a time series collection, you must explicitly create a
+collection using the time series options:
+
+.. code-block:: ruby
+
+ opts = {
+ time_series: {
+ timeField: "timestamp",
+ metaField: "metadata",
+ granularity: "hours"
+ },
+ expire_after: 604800
+ }
+
+ db['weather', opts].create
+
+When creating a time series collection, specify the following options:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Field
+ - Description
+ * - ``time_series[:timeField]``
+ - Required. The name of the field which contains the date in each time series document.
+ * - ``time_series[:metaField]``
+ - Optional. The name of the field which contains metadata in each time series document. The metadata in the specified field should be data that is used to label a unique series of documents. The metadata should rarely, if ever, change.
+ * - ``time_series[:granularity]``
+ - Optional. Possible values are "seconds", "minutes", and "hours". By default, MongoDB sets the granularity to "seconds" for high-frequency ingestion.
+ * - ``:expireAfterSeconds``
+ - Optional. Enable the automatic deletion of documents in a time series collection by specifying the number of seconds after which documents expire. MongoDB deletes expired documents automatically.
+
+See the MongoDB `docs `_
+for more information about time series collection options.
+
+Inserting into a Time Series Collection
+---------------------------------------
+
+Inserting into a time series collection is similar to inserting into a regular collection:
+
+.. code-block:: ruby
+
+ db['weather'].insert_many([
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 0, 0, 0),
+ temp: 12
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 4, 0, 0),
+ temp: 11
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 8, 0, 0),
+ temp: 11
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 12, 0, 0),
+ temp: 12
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 16, 0, 0),
+ temp: 16
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 18, 20, 0, 0),
+ temp: 15
+ }, {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 0, 0, 0),
+ temp: 13
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 4, 0, 0),
+ temp: 12
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 8, 0, 0),
+ temp: 11
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 12, 0, 0),
+ temp: 12
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 16, 0, 0),
+ temp: 17
+ },
+ {
+ metadata: { sensorId: 5578, type: "temperature" },
+ timestamp: Time.utc(2021, 5, 19, 20, 0, 0),
+ temp: 12
+ }
+ ])
+
+
+Querying a Time Series Collection
+---------------------------------
+
+Querying a time series collection is also very similar to a regular collection:
+
+.. code-block:: ruby
+
+ weather.find(timestamp: Time.utc(2021, 5, 18, 0, 0, 0)).first
+
+The result of this query:
+
+.. code-block:: ruby
+
+ {
+ "timestamp" => 2021-05-18 00:00:00 UTC,
+ "metadata" => {
+ "sensorId" => 5578,
+ "type" => "temperature"
+ },
+ "temp" => 12,
+ "_id" => BSON::ObjectId('624dfb87d1327a60aeb048d2')
+ }
+
+
+Using the Aggregation Pipeline on a Time Series Collection
+----------------------------------------------------------
+
+The aggregation pipeline can also be used for additional query functionality:
+
+.. code-block:: ruby
+
+ weather.aggregate([
+ {
+ "$project": {
+ date: {
+ "$dateToParts": { date: "$timestamp" }
+ },
+ temp: 1
+ }
+ },
+ {
+ "$group": {
+ _id: {
+ date: {
+ year: "$date.year",
+ month: "$date.month",
+ day: "$date.day"
+ }
+ },
+ avgTmp: { "$avg": "$temp" }
+ }
+ }
+ ]).to_a
+
+The example aggregation pipeline groups all documents by the date of the
+measurement and then returns the average of all temperature measurements
+that day:
+
+.. code-block:: ruby
+
+ [{
+ "_id" => {
+ "date" => {
+ "year" => 2021,
+ "month" => 5,
+ "day" => 18
+ }
+ },
+ "avgTmp" => 12.833333333333334
+ },
+ {
+ "_id" => {
+ "date" => {
+ "year" => 2021,
+ "month" => 5,
+ "day" => 19
+ }
+ },
+ "avgTmp" => 12.833333333333334
+ }]
+
+See the MongoDB documentation on `time series collections `_
+for more information.
+
+Capped Collections
+``````````````````
+
+Capped collections have maximum size or document counts that prevent
+them from growing beyond maximum thresholds. All capped collections must
+specify a maximum size and may also specify a maximum document count.
+MongoDB removes older documents if a collection reaches the maximum size
+limit before it reaches the maximum document count.
+
+To create a :manual:`capped collection`, use
+the ``capped: true`` option along with a ``size`` in bytes.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ collection = client[:artists, capped: true, size: 10000]
+ collection.create
+ collection.capped? # => true
+
+Convert an Existing Collection to Capped
+````````````````````````````````````````
+
+To convert an existing collection from non-capped to capped, use
+the ``convertToCapped`` command.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ db = client.database
+ db.command({ 'convertToCapped' => 'artists', 'size' => 10000 })
+
+
+Document Validation
+```````````````````
+
+If you're using MongoDB version 3.2 or later, you can use
+:manual:`document validation`.
+Collections with validations compare each inserted or updated
+document against the criteria specified in the validator option.
+Depending on the ``validationLevel`` and ``validationAction``, MongoDB
+either returns a warning, or refuses to insert or update the document
+if it fails to meet the specified criteria.
+
+The following example creates a ``contacts`` collection with a validator
+that specifies that inserted or updated documents should match at
+least one of three following conditions:
+
+- the ``phone`` field is a string
+- the ``email`` field matches the regular expression
+- the ``status`` field is either ``Unknown`` or ``Incomplete``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ client[:contacts,
+
+ {
+ 'validator' => { '$or' =>
+ [
+ { 'phone' => { '$type' => "string" } },
+ { 'email' => { '$regex' => /@mongodb\.com$/ } },
+ { 'status' => { '$in' => [ "Unknown", "Incomplete" ] } }
+ ]
+ }
+ }
+
+ ].create
+
+Add Validation to an Existing Collection
+````````````````````````````````````````
+
+To add document validation criteria to an existing collection, use the
+``collMod`` command. The example below demonstrates how to add a
+validation to the ``contacts`` collection, ensuring that all new
+documents must contain an ``age`` field which is a number.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ db = client.database
+ db.command({ 'collMod' => 'contacts',
+ 'validator' =>
+ { 'age' =>
+ { '$type' => "number" }
+ }
+ })
+
+Listing Collections
+```````````````````
+
+Use ``collections`` or ``collection_names`` methods on a database
+objects to list collections:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ database = client.database
+
+ database.collections # Returns an array of Collection objects.
+ database.collection_names # Returns an array of collection names as strings.
+
+Dropping Collections
+````````````````````
+
+To drop a collection, call ``drop`` on the collection object.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+ artists.drop
diff --git a/source/reference/connection-and-configuration.txt b/source/reference/connection-and-configuration.txt
new file mode 100644
index 000000000..7532e73f9
--- /dev/null
+++ b/source/reference/connection-and-configuration.txt
@@ -0,0 +1,18 @@
+.. _connection-and-configuration:
+
+**************************
+Connection & Configuration
+**************************
+
+.. default-domain:: mongodb
+
+This section describes how to create the client objects and what configuration
+options the driver provides, including authentication.
+
+.. toctree::
+ :titlesonly:
+
+ /reference/create-client
+ /reference/authentication
+ /reference/monitoring
+ /reference/user-management
diff --git a/source/reference/create-client.txt b/source/reference/create-client.txt
new file mode 100644
index 000000000..44630ee55
--- /dev/null
+++ b/source/reference/create-client.txt
@@ -0,0 +1,2161 @@
+*****************
+Creating a Client
+*****************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: singlecol
+
+Using ``Mongo::Client``
+=======================
+
+To connect to a MongoDB deployment, create a ``Mongo::Client`` object.
+Provide a list of hosts and options or a :manual:`connection string URI
+` to the``Mongo::Client`` constructor.
+The client's selected database defaults to ``admin``.
+
+By default, the driver will automatically detect the topology used by the
+deployment and connect appropriately.
+
+To connect to a local standalone MongoDB deployment, specify the host and
+port of the server. In most cases you would also specify the database name
+to connect to; if no database name is specified, the client will use the
+``admin`` database:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'mydb')
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://127.0.0.1:27017/mydb")
+
+.. note::
+
+ The hostname ``localhost`` is treated specially by the driver and will
+ be resolved to IPv4 addresses only.
+
+To `connect to MongoDB Atlas `_,
+specify the Atlas deployment URI:
+
+.. code-block:: ruby
+
+ Mongo::Client.new("mongodb+srv://username:myRealPassword@cluster0.mongodb.net/mydb?w=majority")
+
+The driver will discover all nodes in the cluster and connect to them as
+needed.
+
+Block Syntax
+------------
+
+Another way to create a Mongo::Client object is to use the block syntax:
+
+.. code-block:: ruby
+
+ Mongo::Client.new(...) do |client|
+ # work with the client
+ end
+
+Note that when creating a client using this syntax, the client is automatically closed after the block finishes executing.
+
+Database Selection
+==================
+
+By default, the client will connect to the ``admin`` database.
+
+The ``admin`` database is a special database in MongoDB often used for
+administrative tasks and storing administrative data such as users and
+roles (although users and roles may also be defined in other databases).
+In a sharded cluster, the ``admin`` database
+:manual:`exists on the config servers `
+rather than the shard servers. Although it is possible to use the ``admin``
+database for ordinary operations (such as storing application data), this
+is not recommended and the application should explicitly specify the
+database it wishes to use.
+
+The database can be specified during ``Client`` construction:
+
+.. code-block:: ruby
+
+ # Using Ruby client options:
+ client = Mongo::Client.new(['localhost'], database: 'mydb')
+
+ # Using a MongoDB URI:
+ client = Mongo::Client.new('mongodb://localhost/mydb')
+
+Given a ``Client`` instance, the ``use`` method can be invoked to obtain a
+new ``Client`` instance configured with the specified database:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], database: 'mydb')
+
+ admin_client = client.use('admin')
+
+ # Issue an administrative command
+ admin_client.database.command(replSetGetConfig: 1).documents.first
+
+There are other special databases in MongoDB which should be only used for
+their stated purposes:
+
+- The :manual:`config ` database.
+- The :manual:`local ` database.
+- The ``$external`` database, which is used with :ref:`PLAIN `,
+ :ref:`Kerberos ` and :ref:`X.509 ` authentication
+ mechanisms.
+
+
+Connection Types
+================
+
+The driver will, by default, discover the type of deployment it is instructed
+to connect to (except for load-balanced deployments)
+and behave in the manner that matches the deployment type.
+The subsections below describe how the driver behaves in each of the deployment
+types as well as how to force particular behavior, bypassing automatic
+deployment type detection.
+
+Note that the detection of deployment type happens when the driver receives
+the first reply from any of the servers it is instructed to connect to
+(unless the load-balancing mode is requested, see below). The driver will
+remain in the discovered or configured topology even if the underlying
+deployment is replaced by one of a different type. In particular, when
+replacing a replica set with a sharded cluster at the same address
+the client instance must be recreated (such as by restarting the application)
+for it to communicate with the sharded cluster.
+
+Automatic discovery of load-balanced deployments is currently not supported.
+Load-balanced deployments will be treated as deployments of their underlying
+type, which would generally be sharded clusters. The driver will fail to
+correctly operate when treating a load-balanced deployment as a sharded
+cluster, therefore when the deployment is a load-balanced one the client
+must be explicitly configured to :ref:`connect to a load balancer
+`.
+
+
+Standalone Server Connection
+----------------------------
+
+If the deployment is a single server, also known as a standalone deployment,
+all operations will be directed to the specified server.
+
+If the server is shut down and replaced by a replica set node, the driver
+will continue sending all operations to that node, even if the node is or
+becomes a secondary.
+
+To force a standalone connection, see the :ref:`direct connection
+` section below.
+
+
+.. _connect-replica-set:
+
+Replica Set Connection
+----------------------
+
+When connecting to a :manual:`replica set`, it is sufficient
+to pass the address of any node in the replica set to the driver.
+The node does not have to be the primary and it may be a hidden node.
+The driver will then automatically discover the remaining nodes.
+
+However, it is recommended to specify all nodes that are part of the
+replica set, so that in the event of one or more nodes being unavailable
+(for example, due to maintenance or reconfiguration) the driver can still
+connect to the replica set.
+
+Replica set connection examples:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'mydb')
+
+ Mongo::Client.new([ '127.0.0.1:27017', '127.0.0.1:27018' ], database: 'mydb')
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://127.0.0.1:27017,127.0.0.1:27018/mydb")
+
+To make the driver verify the replica set name upon connection, pass it using
+the ``replica_set`` Ruby option or the ``replicaSet`` URI option:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017', '127.0.0.1:27018' ],
+ database: 'mydb', replica_set: 'myapp')
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://127.0.0.1:27017,127.0.0.1:27018/mydb?replicaSet=myapp")
+
+If the deployment is not a replica set or uses a different replica set name,
+all operations will fail (until the expected replica set is returned by
+the servers).
+
+It is also possible to force a replica set connection without specifying
+the replica set name. Doing so is generally unnecessary and is deprecated:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017', '127.0.0.1:27018' ],
+ database: 'mydb', connect: :replica_set)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://127.0.0.1:27017,127.0.0.1:27018/mydb?connect=replica_set")
+
+To connect to a MongoDB Atlas cluster which is deployed as a replica set,
+connect to the URI:
+
+.. code-block:: ruby
+
+ Mongo::Client.new("mongodb+srv://username:myRealPassword@cluster0.mongodb.net/test?w=majority")
+
+Please review the :ref:`SRV URI notes ` if using SRV URIs.
+
+
+.. _connect-sharded-cluster:
+
+Sharded Cluster Connection
+--------------------------
+
+To connect to a :manual:`sharded cluster` deployment, specify
+the addresses of the ``mongos`` routers:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '1.2.3.4:27017', '1.2.3.5:27017' ], database: 'mydb')
+
+ Mongo::Client.new("mongodb://1.2.3.4:27017,1.2.3.5:27017/mydb")
+
+Note that unlike a replica set connection, you may choose to connect to a
+subset of the ``mongos`` routers that exist in the deployment. The driver
+will monitor each router and will use the ones that are available
+(i.e., the driver will generally handle individual routers becoming
+unavailable due to failures or maintenance). When specifying the list of
+routers explicitly, the driver will not discover remaining routers that
+may be configured and will not attempt to connect to them.
+
+The driver will automatically balance the operation load among the routers
+it is aware of.
+
+To connect to a MongoDB Atlas cluster which is deployed as a sharded cluster,
+connect to the URI:
+
+.. code-block:: ruby
+
+ Mongo::Client.new("mongodb+srv://username:myRealPassword@cluster0.mongodb.net/test?w=majority")
+
+When the driver connects to a sharded cluster via an SRV URI, it will
+periodically poll the SRV records of the address specified in the URI
+for changes and will automatically add and remove the ``mongos`` hosts
+to/from its list of servers as they are added and removed to/from the
+sharded cluster.
+
+To force a sharded cluster connection, use the ``connect: :sharded``
+option. Doing so is generally unnecessary and is deprecated:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017', '127.0.0.1:27018' ],
+ database: 'mydb', connect: :sharded)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://127.0.0.1:27017,127.0.0.1:27018/mydb?connect=sharded")
+
+Please review the :ref:`SRV URI notes ` if using SRV URIs.
+
+
+.. _direct-connection:
+
+Direct Connection
+-----------------
+
+To disable the deployment type discovery and force all operations to be
+performed on a particular server, specify the ``direct_connection`` option:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '1.2.3.4:27017' ], database: 'mydb', direct_connection: true)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://1.2.3.4:27017/mydb?directConnection=true")
+
+Alternatively, the deprecated ``connect: :direct`` option is equivalent:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '1.2.3.4:27017' ], database: 'mydb', connect: :direct)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://1.2.3.4:27017/mydb?connect=direct")
+
+The direct connection mode is most useful for performing operations on a
+particular replica set node, although it also permits the underlying server
+to change type (e.g. from a replica set node to a ``mongos`` router, or vice
+versa).
+
+
+.. _load-balancer-connection:
+
+Load Balancer Connection
+------------------------
+
+Unlike other deployment types, the driver does not currently automatically
+detect a load-balanced deployment.
+
+To connect to a load balancer, specify the ``load_balanced: true`` Ruby option
+or the ``loadBalanced=true`` URI option:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '1.2.3.4:27017' ], database: 'mydb', load_balanced: true)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://1.2.3.4:27017/mydb?loadBalanced=true")
+
+When using these options, if the specified server is not a load balancer,
+the client will fail all operations (until the server becomes a load balancer).
+
+To treat the server as a load balancer even if it doesn't identify as such,
+use the ``connect: :load_balanced`` Ruby option or the ``connect=load_balanced``
+URI option:
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '1.2.3.4:27017' ],
+ database: 'mydb', load_balanced: true, connect: :load_balanced)
+
+ # Or using the URI syntax:
+ Mongo::Client.new("mongodb://1.2.3.4:27017/mydb?loadBalanced=true&connect=load_balanced")
+
+MongoDB Atlas Connection
+------------------------
+
+To connect to a MongoDB deployment on Atlas, first create a ``Mongo::Client`` instance using your
+cluster's connection string and other client options.
+
+You can set the `Stable API `_ version as
+a client option to avoid breaking changes when you upgrade to a new server version.
+
+The following code shows how you can specify the connection string and the Stable API client option
+when connecting to a MongoDB deployment and verify that the connection is successful:
+
+.. code-block:: ruby
+
+ require 'mongo'
+
+ # Replace the placeholders with your credentials
+ uri = "mongodb+srv://:@cluster0.sample.mongodb.net/?retryWrites=true&w=majority"
+
+ # Set the server_api field of the options object to Stable API version 1
+ options = { server_api: { version: "1" } }
+
+ # Create a new client and connect to the server
+ client = Mongo::Client.new(uri, options)
+
+ # Send a ping to confirm a successful connection
+ begin
+ admin_client = client.use('admin')
+ result = admin_client.database.command(ping: 1).documents.first
+ puts "Pinged your deployment. You successfully connected to MongoDB!"
+ rescue Mongo::Error::OperationFailure => ex
+ puts ex
+ ensure
+ client.close
+ end
+
+Connect to MongoDB Atlas from AWS Lambda
+----------------------------------------
+
+To learn how to connect to Atlas from AWS Lambda, see the
+`Manage Connections with AWS Lambda `__
+documentation.
+
+.. _srv-uri-notes:
+
+SRV URI Notes
+=============
+
+When the driver connects to a
+:manual:`mongodb+srv protocol `
+URI, keep in mind the following:
+
+1. SRV URI lookup is performed synchronously when the client is constructed.
+ If this lookup fails for any reason, client construction will fail with an
+ exception. When a client is constructed with a list of hosts, the driver
+ will attempt to contact and monitor those hosts for as long as the client
+ object exists. If one of these hosts does not resolve initially but becomes
+ resolvable later, the driver will be able to establish a connection to such
+ a host when it becomes available. The initial SRV URI lookup must succeed
+ on the first attempt; subsequent host lookups will be retried by the driver
+ as needed.
+2. The driver looks up URI options in the DNS TXT records corresponding to the
+ SRV records. These options can be overridden by URI options specified in the
+ URI and by Ruby options, in this order.
+3. Because the URI options are retrieved in a separate DNS query from the
+ SRV lookup, in environments with unreliable network connectivity
+ the URI option query may fail when the SRV lookup succeeds. Such a failure
+ would cause the driver to use the wrong auth source leading to
+ authentication failures. This can be worked around by explicitly specifying
+ the auth source:
+
+ .. code-block:: ruby
+
+ Mongo::Client.new("mongodb+srv://username:myRealPassword@cluster0.mongodb.net/test?w=majority&authSource=admin")
+
+4. If the topology of the constructed ``Client`` object is unknown or a
+ sharded cluster, the driver will begin monitoring the specified SRV DNS
+ records for changes and will automatically update the list of servers in the
+ cluster. The updates will stop if the topology becomes a single or a replica
+ set.
+
+
+.. _client-options:
+
+Client Options
+==============
+
+``Mongo::Client``'s constructor accepts a number of options configuring the
+behavior of the driver. The options can be provided in the options hash as
+Ruby options, in the URI as URI options, or both. If both a Ruby option and
+the analogous URI option are provided, the Ruby option takes precedence.
+
+
+Ruby Options
+------------
+
+.. note::
+
+ The options passed directly should be symbols.
+
+.. note::
+
+ Unless otherwise specified, Ruby options that deal with times are given in
+ seconds.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 25 40 10 15
+
+ * - Option
+ - Description
+ - Type
+ - Default
+
+ * - ``:app_name``
+ - Application name that is printed to the mongod logs upon establishing a connection
+ in server versions >= 3.4.
+ - ``String``
+ - none
+
+ * - ``:auth_mech``
+ - Specifies the authenticaion mechanism to use. Can be one of:
+ ``:gssapi``, ``:mongodb_cr``, ``:mongodb_x509``, ``:plain``,
+ ``:scram``, ``:scram256``. GSSAPI (Kerberos) authentication
+ :ref:`requires additional dependencies `.
+ - ``Symbol``
+ - If user credentials are not supplied, ``nil``. If user credentials
+ are supplied, the default depends on server version.
+ MongoDB 4.0 and later: ``:scram256`` if user credentials correspond
+ to a user which supports SCRAM-SHA-256 authentication, otherwise
+ ``:scram``.
+ MongoDB 3.0-3.6: ``:scram``.
+ MongoDB 2.6: ``:mongodb_cr``
+
+ * - ``:auth_mech_properties``
+ - Provides additional authentication mechanism properties.
+
+ The keys in properties are interpreted case-insensitively.
+ When the client is created, keys are lowercased.
+
+ - ``Hash``
+ - When using the GSSAPI authentication mechanism, the default properties
+ are ``{service_name: "mongodb"}``. Otherwise the default is nil.
+
+ * - ``:auth_source``
+ - Specifies the authentication source.
+ - ``String``
+ - For MongoDB 2.6 and later: **admin** if credentials are
+ supplied, otherwise the current database
+
+ * - ``:auto_encryption_options``
+ - A ``Hash`` of options for configuring automatic encryption.
+
+ - ``:key_vault_client`` - A client connected to the MongoDB instance
+ storing the encryption data keys (``Mongo::Client``, defaults to the
+ top-level client instance).
+ - ``:key_vault_namespace`` - The namespace of the key vault collection
+ in the format ``"database.collection"`` (``String``, required).
+ - ``:kms_providers`` - Key management service configuration information.
+ One or both of the keys ``:local`` and ``:aws`` must be specified
+ (``Hash``, required). See the "The ``kms_providers`` option`` section of the
+ :ref:`Client-Side Encryption tutorial` for more
+ information about this option.
+ - ``:schema_map`` - The JSONSchema for one or more collections specifying
+ which fields should be encrypted (``Hash``, optional, defaults to ``nil``).
+ - ``:bypass_auto_encryption`` - Whether to skip automatic encryption when
+ performing database operations (``Boolean``, defaults to ``false``).
+ - ``:extra_options`` - Options related to spawning mongocryptd (``Hash``,
+ optional, defaults to ``nil``).
+
+ For more information about formatting these options, see the
+ "Auto-Encryption Options" section of the :ref:`Client-Side Encryption tutorial`.
+ - ``Hash``
+ - none
+
+ * - ``:bg_error_backtrace``
+ - Experimental. Controls whether and how backtraces are logged when
+ errors occur in background threads. If ``true``, the driver will log
+ complete backtraces. If set to a positive integer, the driver will
+ log up to that many backtrace lines. If set to ``false`` or ``nil``,
+ no backtraces will be logged. Other values are an error.
+ - ``true``, ``false``, ``nil``, ``Integer``
+ - none
+
+ * - ``:compressors``
+ - A list of potential compressors to use, in order of preference.
+ Please see below for details on how the driver implements compression.
+ - ``Array``
+ - none
+
+ * - ``:connect``
+ - **Deprecated.** Disables deployment topology discovery normally
+ performed by the dirver and forces the cluster topology to a specific
+ type. Valid values are ``:direct``, ``:load_balanced``,
+ ``:replica_set`` or ``:sharded``. If ``:load_balanced`` is used,
+ the client will behave as if it is connected to a load balancer
+ regardless of whether the server(s) it connects to advertise themselves
+ as load balancers.
+ - ``Symbol``
+ - none
+
+ * - ``:connect_timeout``
+ - The number of seconds to wait to establish a socket connection
+ before raising an exception. This timeout is also used for SRV DNS
+ record resolution. ``nil`` and ``0`` mean no timeout.
+ Client creation will fail with an error if an invalid timeout value
+ is passed (such as a negative value or a non-numeric value).
+ - ``Float``
+ - 10
+
+ * - ``:database``
+ - The name of the database to connect to.
+ - ``String``
+ - admin
+
+ * - ``:direct_connection``
+ - Connect directly to the specified host, do not discover deployment
+ topology.
+ - ``Boolean``
+ - false
+
+ * - ``:heartbeat_frequency``
+ - The number of seconds for the server monitors to refresh
+ server states asynchronously.
+ - ``Float``
+ - 10
+
+ * - ``:id_generator``
+ - A custom object to generate ids for documents. Must respond to #generate.
+ - ``Object``
+ - none
+
+ * - ``:load_balanced``
+ - Whether to expect to connect to a load balancer.
+ - ``Boolean``
+ - false
+
+ * - ``:local_threshold``
+ - Specifies the maximum latency in seconds between the nearest
+ server and the servers that can be available for selection to operate on.
+ - ``Float``
+ - 0.015
+
+ * - ``:logger``
+ - A custom logger.
+ - ``Object``
+ - ``Logger``
+
+ * - ``:max_connecting``
+ - The maximum number of connections that the connection pool will try to establish in parallel.
+ - ``Integer``
+ - 2
+
+ * - ``:max_idle_time``
+ - The maximum time, in seconds, that a connection can be idle before it
+ is closed by the connection pool.
+
+ *Warning:* when connected to a load balancer, the driver uses existing
+ connections for iterating cursors (which includes change streams)
+ and executing transactions. Setting an idle time via this option may
+ cause the driver to close connections that are needed for subsequent
+ operations, causing those operations to fail.
+ - ``Integer``
+ - none
+
+ * - ``:max_pool_size``
+ - The maximum size of the connection pool for each server.
+ Setting this option to zero removes the max size limit from the connection pool, permitting it to grow to any number of connections.
+ - ``Integer``
+ - 20
+
+ * - ``:max_read_retries``
+ - The maximum number of read retries, when legacy read retries are used.
+ Set to 0 to disable legacy read retries.
+ - ``Integer``
+ - 1
+
+ * - ``:max_write_retries``
+ - The maximum number of write retries, when legacy write retries are used.
+ Set to 0 to disable legacy write retries.
+ - ``Integer``
+ - 1
+
+ * - ``:min_pool_size``
+ - The minimum number of connections in the connection pool for each
+ server. The driver will establish connections in the background until
+ the pool contains this many connections.
+ - ``Integer``
+ - 0
+
+ * - ``:monitoring``
+ - The monitoring object.
+ - ``Object``
+ - none
+
+ * - ``:password``
+ - The password of the user to authenticate with.
+ - ``String``
+ - none
+
+ * - ``:platform``
+ - Platform information to include in the metadata printed to the mongod logs upon establishing a
+ connection in server versions >= 3.4.
+ - ``String``
+ - none
+
+ * - ``:read``
+ - Specifies the read preference mode and tag sets for selecting servers
+ as a ``Hash``. Allowed Keys in the hash are ``:mode``, ``:tag_sets`` and
+ ``:max_staleness``.
+
+ .. code-block:: ruby
+
+ { read:
+ { mode: :secondary,
+ tag_sets: [ "data_center" => "berlin" ],
+ max_staleness: 5,
+ }
+ }
+
+ If tag sets are provided, they must be an array of hashes. A server
+ satisfies the read preference if its tags match any one hash in the
+ provided tag sets.
+
+ Each tag set must be a hash, and will be converted internally to
+ a ``BSON::Document`` instance prior to being used for server selection.
+ Hash keys can be strings or symbols. The keys are case sensitive.
+ Hash values must be strings, and are matched exactly against the values
+ in the replica set configuration.
+
+ - ``Hash``
+ - ``{ :mode => :primary }``
+
+ * - ``:read_concern``
+ - Specifies the read concern options. The only valid key is ``level``,
+ for which the valid values are ``:local``, ``:majority``, and
+ ``:snapshot``.
+ - ``Hash``
+ - none
+
+ * - ``:read_retry_interval``
+ - The interval, in seconds, in which reads on a mongos are retried.
+ - ``Integer``
+ - 5
+
+ * - ``:replica_set``
+ - When connecting to a replica set, this is the name of the set to
+ filter servers by.
+ - ``String``
+ - none
+
+ * - ``:retry_writes``
+ - If a single-statement write operation fails from a network error, the driver automatically retries it once
+ when connected to server versions 3.6+.
+ - ``Boolean``
+ - true
+
+ * - ``:sdam_proc``
+ - Since the client begins monitoring the deployment in background as
+ soon as it is constructed, constructing a client and then subscribing
+ to :ref:`SDAM ` events in a separate statement may result in the
+ subscriber not receiving some of the SDAM events. The ``:sdam_proc``
+ option permits adding event subscribers on the client being constructed
+ before any SDAM events are published.
+
+ Pass a ``Proc`` which will be called with the ``Client`` as the argument
+ after the client's event subscription mechanism has been initialized
+ but before any of the servers are added to the client. Use this
+ ``Proc`` to set up SDAM event subscribers on the client.
+
+ Note: the client is not fully constructed when the ``Proc`` provided in
+ ``:sdam_proc is invoked, in particular the cluster is nil at this time.
+ ``:sdam_proc`` procedure should limit itself to calling
+ ``Client#subscribe`` and ``Client#unsubscribe`` methods on on the
+ passed client only.
+ - ``Proc``
+ - none
+
+ * - ``:server_api``
+ - The server API version requested.
+ This is a hash with the following allowed items:
+ - ``:version`` (String)
+ - ``:strict`` (true or false)
+ - ``:deprecation_errors`` (true or false)
+
+ Note that the server API version can only be specified as a Ruby option,
+ not as a URI option, and it cannot be overridden for database and
+ collection objects.
+
+ If server API version is changed on a client (such as via the ``with``
+ call), the entire API version hash is replaced with the new specification
+ (the old and the new individual fields are NOT merged).
+ - ``Hash``
+ - none
+
+ * - ``:server_selection_timeout``
+ - The number of seconds to wait for an appropriate server to
+ be selected for an operation to be executed before raising an exception.
+ - ``Float``
+ - 30
+
+ * - ``:socket_timeout``
+ - The number of seconds to wait for an operation to execute on a
+ socket before raising an exception. ``nil`` and ``0`` mean no timeout.
+ Client creation will fail with an error if an invalid timeout value
+ is passed (such as a negative value or a non-numeric value).
+ - ``Float``
+ - none
+
+ * - ``:srv_max_hosts``
+ - The maximum number of mongoses that the driver will communicate with
+ for sharded topologies. If this option is set to 0, there will
+ be no maximum number of mongoses. If the given URI resolves
+ to more hosts than ``:srv_max_hosts``, the client will ramdomly
+ choose an ``:srv_max_hosts`` sized subset of hosts. Note that the
+ hosts that the driver ignores during client construction will never
+ be used. If the hosts chosen by the driver become unavailable, the
+ client will quit working completely, even though the deployment has
+ other functional mongoses.
+ - ``Integer``
+ - 0
+
+ * - ``:srv_service_name``
+ - The service name to use in the SRV DNS query.
+ - ``String``
+ - mongodb
+
+ * - ``:ssl``
+ - Tell the client to connect to the servers via TLS.
+ - ``Boolean``
+ - false
+
+ * - ``:ssl_ca_cert``
+ - The file path containing concatenated certificate authority certificates
+ used to validate certs passed from the other end of the connection.
+ One of ``:ssl_ca_cert``, ``:ssl_ca_cert_string`` or ``:ssl_ca_cert_object``
+ (in order of priority) is required for ``:ssl_verify``.
+ - ``String``
+ - none
+
+ * - ``:ssl_ca_cert_object``
+ - An array of OpenSSL::X509::Certificate representing the certificate
+ authority certificates used to validate certs passed from the other end
+ of the connection. One of ``:ssl_ca_cert``, ``:ssl_ca_cert_string`` or
+ ``:ssl_ca_cert_object`` (in order of priority) is required for ``:ssl_verify``.
+ - ``Array< OpenSSL::X509::Certificate >``
+ - none
+
+ * - ``:ssl_ca_cert_string``
+ - A string containing concatenated certificate authority certificates
+ used to validate certs passed from the other end of the connection.
+ One of ``:ssl_ca_cert``, ``:ssl_ca_cert_string`` or ``:ssl_ca_cert_object``
+ (in order of priority) is required for ``:ssl_verify``.
+ - ``String``
+ - none
+
+ * - ``:ssl_cert``
+ - Path to the client certificate file used to identify the application to
+ the MongoDB servers. The file may also contain the certificate's private
+ key; if so, the private key is ignored by this option. The file may
+ also contain intermediate certificates forming the certificate chain
+ from the client certificate to the CA certificate; any intermediate
+ certificates will be parsed by the driver and provided to the OpenSSL
+ context in ``extra_chain_cert`` attribute. If intermediate certificates
+ are provided, they must follow the client certificate which must be
+ the first certificate in the file.
+
+ This option, if present, takes precedence over ``:ssl_cert_string`` and
+ ``:ssl_cert_object`` options.
+ - ``String``
+ - none
+
+ * - ``:ssl_cert_object``
+ - The OpenSSL::X509::Certificate used to identify the application to
+ the MongoDB servers. Only one certificate may be passed through this
+ option.
+ - ``OpenSSL::X509::Certificate``
+ - none
+
+ * - ``:ssl_cert_string``
+ - A string containing the PEM-encoded certificate used to identify the
+ application to the MongoDB servers. The string may also contain the
+ certificate's private key; if so, the private key is ignored by this
+ option. The string may also contain intermediate certificates forming
+ the certificate chain from the client certificate to the CA certificate;
+ any intermediate certificates will be parsed by the driver and provided
+ to the OpenSSL context in ``extra_chain_cert`` attribute. If intermediate
+ certificates are provided, they must follow the client certificate which
+ must be the first certificatet in the string.
+
+ This option, if present, takes precedence over the ``:ssl_cert_object``
+ option.
+ - ``String``
+ - none
+
+ * - ``:ssl_key``
+ - The private keyfile used to identify the connection against MongoDB. Note that even if the key is stored in
+ the same file as the certificate, both need to be explicitly specified. This option, if present, takes
+ precedence over the values of :ssl_key_string and :ssl_key_object.
+ - ``String``
+ - none
+
+ * - ``:ssl_key_object``
+ - The private key used to identify the connection against MongoDB.
+ - ``OpenSSL::PKey``
+ - none
+
+ * - ``:ssl_key_pass_phrase``
+ - A passphrase for the private key.
+ - ``String``
+ - none
+
+ * - ``:ssl_key_string``
+ - A string containing the PEM-encoded private key used to identify the
+ connection against MongoDB. This parameter, if present, takes precedence
+ over the value of option :ssl_key_object.
+ - ``String``
+ - none
+
+ * - ``:ssl_verify``
+ - Whether to perform peer certificate, hostname and OCSP endpoint
+ validation. Note that the decision of whether to validate certificates
+ will be overridden if ``:ssl_verify_certificate`` is set, the decision
+ of whether to validate hostnames will be overridden if
+ ``:ssl_verify_hostname`` is set and the decision of whether to validate
+ OCSP endpoint will be overridden if ``:ssl_verify_ocsp_endpoint`` is set.
+ - ``Boolean``
+ - true
+
+ * - ``:ssl_verify_certificate``
+ - Whether to perform peer certificate validation. This setting overrides
+ the ``:ssl_verify`` setting with respect to whether certificate
+ validation is performed.
+ - ``Boolean``
+ - true
+
+ * - ``:ssl_verify_hostname``
+ - Whether to perform peer hostname validation. This setting overrides
+ the ``:ssl_verify`` setting with respect to whether hostname validation
+ is performed.
+ - ``Boolean``
+ - true
+
+ * - ``:ssl_verify_ocsp_endpoint``
+ - Whether to validate server-supplied certificate against the OCSP
+ endpoint specified in the certificate, if the OCSP endpoint is specified
+ in the certificate. This setting overrides :ssl_verify with respect to
+ whether OCSP endpoint validation is performed.
+ - ``Boolean``
+ - true
+
+ * - ``:truncate_logs``
+ - Whether to truncate the logs at the default 250 characters.
+ - ``Boolean``
+ - true
+
+ * - ``:user``
+ - The name of the user to authenticate with.
+ - ``String``
+ - none
+
+ * - ``:wait_queue_timeout``
+ - The number of seconds to wait for a connection in the connection
+ pool to become available.
+ - ``Float``
+ - 10
+
+ * - ``:wrapping_libraries``
+ - Information about libraries such as ODMs that are wrapping the driver.
+ Specify the lower level libraries first. Allowed hash keys: :name,
+ :version, :platform. Example: ``[name: 'Mongoid', version: '7.1.2']``
+ - ``Array``
+ - none
+
+ * - ``:write``
+ - Deprecated. Equivalent to ``:write_concern`` option. If both ``:write``
+ and ``:write_concern`` are specified, their values must be identical.
+
+ - ``Hash``
+ - ``{ w: 1 }``
+
+ * - ``:write_concern``
+ - Specifies write concern options as a ``Hash``.
+ Keys in the hash can be ``:w``, ``:wtimeout``, ``:j``, ``:fsync``.
+ Note that ``:wtimeout`` is specified in milliseconds, not seconds.
+
+ .. code-block:: ruby
+
+ { write_concern: { w: 2 } }
+
+ - ``Hash``
+ - ``{ w: 1 }``
+
+ * - ``:zlib_compression_level``
+ - The Zlib compression level to use, if using compression. See Ruby's Zlib module for valid levels.
+ - ``Integer``
+ - none
+
+.. note::
+
+ The Ruby driver does not implement certificate revocation list (CRL)
+ checking.
+
+
+URI Options
+-----------
+
+Since the URI options are required to be in camel case, which is not the Ruby
+standard, the following table shows URI options and their corresponding Ruby
+options.
+
+URI options are explained in detail in the :manual:`Connection URI reference
+`.
+
+.. note::
+
+ Options that are set in **milliseconds** in the URI are
+ represented as a ``float`` in Ruby and the units are **seconds**.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 105
+
+ * - URI Option
+ - Ruby Option
+
+ * - appName=String
+ - ``:app_name => String``
+
+ * - authMechanism=String
+ - ``:auth_mech => Symbol``
+
+ Auth mechanism values are converted as follows from URI options to
+ Ruby options:
+
+ - ``GSSAPI`` => ``:gssapi``
+ - ``MONGODB-CR`` => ``:mongodb_cr``
+ - ``MONGODB-X509`` => ``:mongodb_x509``
+ - ``PLAIN`` => ``:plain``
+ - ``SCRAM-SHA-1`` => ``:scram``
+ - ``SCRAM-SHA-256`` => ``:scram256``
+
+ If a different value is provided for auth mechanism, it is converted
+ to the Ruby option unmodified and retains its ``String`` type.
+ Note that, while currently the driver allows a ``Client`` instance
+ to be constructed with an unrecognized auth mechanism, this behavior
+ `may change in a future version of the driver `_.
+
+ * - authMechanismProperties=Strings
+ - ``{ :auth_mech_properties => { :service_realm => String, :canonicalize_host_name => true|false, :service_name => String } }``
+
+ Specified as comma-separated key:value pairs, e.g. ``"SERVICE_REALM:foo,CANONICALIZE_HOST_NAME:TRUE"``.
+
+ * - authSource=String
+ - ``:auth_source => String``
+
+ * - compressors=Strings
+ - ``:compressors => Array``
+
+ A comma-separated list of potential compressors to use, in order of
+ preference. Please see below for details on how the driver implements
+ compression.
+
+ * - connect=String
+ - ``:connect => Symbol``
+
+ The same values that the ``:connect`` Ruby option accepts are
+ accepted here. For multi-word values, the values must be provided
+ using underscores to separate the words, i.e.
+ ``connect=replica_set`` and ``connect=load_balanced``.
+
+ * - connectTimeoutMS=Integer
+ - ``:connect_timeout => Float``
+
+ Unlike the corresponding Ruby option which fails client creation on
+ invalid values (e.g. negative and non-numeric values), invalid values
+ provided via this URI option are ignored with a warning.
+
+ * - directConnection=Boolean
+ - ``:direct_connection => Boolean``
+
+ * - fsync=Boolean
+ - ``{ :write_concern => { :fsync => true|false }}``
+
+ * - heartbeatFrequencyMS=Integer
+ - ``:heartbeat_frequency => Float``
+
+ * - journal=Boolean
+ - ``{ :write_concern => { :j => true|false }}``
+
+ * - loadBalanced=Boolean
+ - ``:load_balanced => Boolean``
+
+ * - localThresholdMS=Integer
+ - ``:local_threshold => Float``
+
+ * - maxConnecting=Integer
+ - ``:max_connecting => Integer``
+
+ * - maxIdleTimeMS=Integer
+ - ``:max_idle_time => Float``
+
+ * - maxStalenessSeconds=Integer
+ - ``{ :read => { :max_staleness => Integer }}``
+
+ If the maxStalenessSeconds URI option value is -1, the driver treats
+ this as if the option was not given at all. Otherwise,
+ if the option value is numeric, the Ruby option is set to the
+ specified value converted to an ``Integer``.
+ Note that numeric values greater than 0 but less than 90, or less than
+ -1, are accepted by the ``Client`` constructor but will cause server
+ selection to fail (unless the option is changed via, for example, the
+ ``with`` method prior to any operations being performed on the driver).
+ If the option value is non-numeric, it is ignored and the driver
+ treats this case as if the option was not given at all.
+
+ * - maxPoolSize=Integer
+ - ``:max_pool_size => Integer``
+
+ * - minPoolSize=Integer
+ - ``:min_pool_size => Integer``
+
+ * - readConcernLevel=String
+ - ``:read_concern => Hash``
+
+ * - readPreference=String
+ - ``{ :read => { :mode => Symbol }}``
+
+ * - readPreferenceTags=Strings
+ - ``{ :read => { :tag_sets => Array }}``
+
+ Each instance of the readPreferenceTags field is a comma-separated key:value pair which will appear in the :tag_sets array in the order they are specified. For instance, ``"readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:ny"`` will be converted to ``[ { 'dc' => 'ny', 'rack' => '1' }, { 'dc' => 'ny' }]``.
+
+ * - replicaSet=String
+ - ``:replica_set => String``
+
+ * - retryWrites=Boolean
+ - ``:retry_writes => boolean``
+
+ * - serverSelectionTimeoutMS=Integer
+ - ``:server_selection_timeout => Float``
+
+ * - socketTimeoutMS=Integer
+ - ``:socket_timeout => Float``
+
+ Unlike the corresponding Ruby option which fails client creation on
+ invalid values (e.g. negative and non-numeric values), invalid values
+ provided via this URI option are ignored with a warning.
+
+ * - srvMaxHosts=Integer
+ - ``:srv_max_hosts => Integer``
+
+ * - srvServiceName=String
+ - ``:srv_service_name => String``
+
+ * - ssl=Boolean
+ - ``:ssl => true|false``
+
+ * - tls=Boolean
+ - ``:ssl => boolean``
+
+ * - tlsAllowInvalidCertificates=Boolean
+ - ``:ssl_verify_certificate => boolean``
+
+ Because ``tlsAllowInvalidCertificates`` uses ``true`` to signify that
+ verification should be disabled and ``ssl_verify_certificate`` uses
+ ``false`` to signify that verification should be disabled, the boolean
+ is inverted before being used to set ``ssl_verify_certificate``.
+
+ * - tlsAllowInvalidHostnames=Boolean
+ - ``:ssl_verify_hostname => boolean``
+
+ Because ``tlsAllowInvalidHostnames`` uses ``true`` to signify that
+ verification should be disabled and ``ssl_verify_hostname`` uses
+ ``false`` to signify that verification should be disabled, the boolean
+ is inverted before being used to set ``ssl_verify_hostname``.
+
+ * - tlsCAFile=String
+ - ``:ssl_ca_cert => String``
+
+ * - tlsCertificateKeyFile=String
+ - ``:ssl_cert => String``
+
+ * - tlsCertificateKeyFile=String
+ - ``:ssl_key => String``
+
+ * - tlsCertificateKeyFilePassword=String
+ - ``:ssl_key_pass_phrase => String``
+
+ * - tlsDisableOCSPEndpointCheck=Boolean
+ - ``:ssl_verify_ocsp_endpoint => boolean``
+
+ Because ``tlsDisableOCSPEndpointCheck`` uses ``true`` to signify that
+ verification should be disabled and ``ssl_verify_ocsp_endpoint`` uses
+ ``false`` to signify that verification should be disabled, the boolean
+ is inverted before being used to set ``ssl_verify_ocsp_endpoint``.
+
+ * - tlsInsecure=Boolean
+ - ``:ssl_verify => boolean``
+
+ Because tlsInsecure uses ``true`` to signify that verification should
+ be disabled and ``ssl_verify`` uses ``false`` to signify that
+ verification should be disabled, the boolean is inverted before being
+ used to set ``ssl_verify``.
+
+ * - w=Integer|String
+ - ``{ :write_concern => { :w => Integer|String }}``
+
+ * - waitQueueTimeoutMS=Integer
+ - ``:wait_queue_timeout => Float``
+
+ * - wtimeoutMS=Integer
+ - ``{ :write_concern => { :wtimeout => Integer }}``
+
+ * - zlibCompressionLevel=Integer
+ - ``:zlib_compression_level => Integer``
+
+.. note::
+
+ The Ruby driver only fails connections when it receives a definitive signed
+ response indicating that the server's certificate has been revoked.
+ Because of this, the driver does not recognize the
+ ``tlsDisableCertificateRevocationCheck`` URI option. If this option is
+ provided in a URI, it will be ignored.
+
+
+Timeout Options
+===============
+
+``server_selection_timeout``
+----------------------------
+
+When executing an operation, the number of seconds to wait for the driver
+to find an appropriate server to send an operation to. Defaults to 30.
+
+A value of 0 means no timeout.
+
+When an invalid value (e.g. a negative value or a non-numeric value) is passed
+via the URI option, the invalid input is ignored with a warning. When an
+invalid value is passed directly to Client via a Ruby option, Client
+construction fails with an error.
+
+In replica set deployments, this timeout should be set to exceed the typical
+:manual:`replica set election times `
+in order for the driver to transparently handle primary changes. This timeout
+also allows the application and the database to be started simultaneously;
+the application will wait up to this much time for the database to become
+available.
+
+If the application server is behind a reverse proxy, server selection timeout
+should be lower than the request timeout configured on the reverse proxy (for
+example, this applies to deployments on Heroku which has a fixed 30 second
+timeout in the routing layer). In development this value can be lowered to
+provide quicker failure when the server is not running.
+
+``socket_timeout``
+------------------
+
+The number of seconds to wait for a socket read or write to complete on
+regular (non-monitoring) connections. Default is no timeout.
+
+A value of 0 means no timeout.
+
+When an invalid value (e.g. a negative value or a non-numeric value) is passed
+via the URI option, the invalid input is ignored with a warning. When an
+invalid value is passed directly to Client via a Ruby option, Client
+construction fails with an error.
+
+This timeout should take into account both network latency and operation
+duration. For example, setting this timeout to 5 seconds will abort queries
+taking more than 5 seconds to execute on the server with ``Mongo::Error::SocketTimeoutError``.
+
+Note that even though by default there is no socket timeout set, the
+operating system may still time out read operations depending on its
+configuration. The keepalive settings are intended to detect broken network
+connections (as opposed to aborting operations simply because they take a
+long time to execute).
+
+Note that if an operation is timed out by the driver due to exceeding the
+``socket_timeout`` value, it is not aborted on the server. For this reason
+it is recommended to use ``max_time_ms`` option for potentially long running
+operations, as this will abort their execution on the server.
+
+This option does not apply to monitoring connections.
+
+``connect_timeout``
+-------------------
+
+The number of seconds to wait for a socket connection to be established to
+a server. Defaults to 10.
+
+This timeout is also used as both connect timeout and socket timeout for
+monitoring connections.
+
+When using a ``mongodb+srv://`` URI, this timeout is also used for SRV and TXT
+DNS lookups. Note that the timeout applies per lookup; due to DNS suffix search
+lists, multiple lookups may be performed as part of a single name resolution.
+
+``wait_queue_timeout``
+``````````````````````
+
+The number of seconds to wait for a connection in the connection pool to
+become available. Defaults to 10.
+
+As of driver version 2.11, this timeout should be set to a value at least
+as large as ``connect_timeout`` because connection pool now fully establishes
+connections prior to returning them, which may require several network
+round trips.
+
+``max_time_ms``
+---------------
+
+Specified as an option on a particular operation, the number of milliseconds
+to allow the operation to execute for on the server. Not set by default.
+
+Consider using this option instead of a ``socket_timeout`` for potentially
+long running operations to be interrupted on the server when they take too
+long.
+
+``wtimeout``
+------------
+
+The number of milliseconds to wait for a write to be acknowledged by the
+number of servers specified in the write concern. Not set by default, which
+instructs the server to apply its default. This option can be set globally
+on the client or passed to individual operations under ``:write_concern``.
+
+
+TLS Connections
+===============
+
+To connect to the MongoDB deployment using TLS:
+
+- Enable TLS connections in ``Mongo::Client``.
+- Specify the client TLS certificate.
+- Specify the CA certificate to verify the server's TLS certificate.
+
+.. note::
+
+ When using JRuby, ECDSA certificates are not currently supported.
+
+TLS vs SSL Option Names
+-----------------------
+
+All MongoDB server versions supported by the Ruby driver (2.6 and higher)
+only implement TLS. 2.6 and higher servers do not use SSL.
+
+For historical reasons, the Ruby option names pertaining to TLS configuration
+use the ``ssl`` rather than the ``tls`` prefix. The next major version of
+the Ruby driver (3.0) will use the ``tls`` prefix for Ruby option names.
+
+The URI option names use the ``tls`` prefix, with one exception: there is
+a ``ssl`` URI option that is deprecated and equivalent to the ``tls`` URI
+option.
+
+Enable TLS Connections
+----------------------
+
+TLS must be explicitly requested on the client side when the deployment
+requires TLS connections - there is currently no automatic detection of
+whether the deployment requires TLS.
+
+To request TLS connections, specify the following client options when
+constructing a ``Mongo::Client``:
+
+- The ``:ssl`` Ruby option.
+- The ``tls`` URI option.
+- The ``ssl`` URI option (deprecated).
+
+Specify Client TLS Certificate
+------------------------------
+
+By default, MongoDB server will attempt to verify the connecting clients'
+TLS certificates, which requires the clients to specify their TLS certificates
+when connecting. This can be accomplished via:
+
+- The ``:ssl_cert``/``:ssl_cert_object``/``:ssl_cert_string`` and
+ ``:ssl_key``/``:ssl_key_object``/``:ssl_key_string``/``:ssl_key_pass_phrase``
+ Ruby options.
+- The ``tlsCertificateKeyFile`` URI option.
+
+When using the Ruby options, the client TLS certificate and the corresponding
+private key may be provided separately. For example, if the certificate is
+stored in ``client.crt`` and the private key is stored in ``client.key``,
+a ``Mongo::Client`` may be constructed as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"],
+ ssl: true,
+ ssl_cert: 'path/to/client.crt',
+ ssl_key: 'path/to/client.key',
+ ssl_ca_cert: 'path/to/ca.crt',
+ )
+
+``ssl_cert``, ``ssl_cert_string``, ``ssl_key`` and ``ssl_key_string`` Ruby
+options also permit the certificate and the key to be provided in the same
+file or string, respectively. The files containing both certificate and
+private key frequently have the ``.pem`` extension. When both certificate
+and the private key are provided in the same file or string, both the
+certifcate and the key options must be utilized, as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"],
+ ssl: true,
+ ssl_cert: 'path/to/client.pem',
+ ssl_key: 'path/to/client.pem',
+ ssl_ca_cert: 'path/to/ca.crt',
+ )
+
+When using the URI option, the certificate and the key must be stored in a
+file and both must be stored in the same file. Example usage:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(
+ "mongodb://localhost:27017/?tls=true&tlsCertificateKeyFile=path%2fto%2fclient.pem&tlsCertificateKeyFile=path%2fto%2fca.crt")
+
+.. note::
+
+ URI option values must be properly URI escaped. This applies, for example, to
+ slashes in the paths.
+
+
+.. _modifying-tls-context:
+
+Modifying ``SSLContext``
+------------------------
+It may be desirable to further configure TLS options in the driver, for example
+by enabling or disabling certain ciphers. Currently, the Ruby driver does not
+provide a way to do this when initializing a ``Mongo::Client``.
+
+However, the Ruby driver provides a way to set global "TLS context hooks" --
+these are user-provided ``Proc``s that will be invoked before any TLS socket
+connection and can be used to modify the underlying ``OpenSSL::SSL::SSLContext``
+object used by the socket.
+
+To set the TLS context hooks, add ``Proc``s to the ``Mongo.tls_context_hooks``
+array. This should be done before creating any Mongo::Client instances.
+For example, in a Rails application this code could be placed in an initializer.
+
+.. code-block:: ruby
+
+ Mongo.tls_context_hooks.push(
+ Proc.new { |context|
+ context.ciphers = ["AES256-SHA"]
+ }
+ )
+
+ # Only the AES256-SHA cipher will be enabled from this point forward
+
+Every ``Proc`` in ``Mongo.tls_context_hooks`` will be passed an
+``OpenSSL::SSL::SSLContext`` object as its sole argument. These ``Proc``s will
+be executed sequentially during the creation of every ``Mongo::Socket::SSL`` object.
+
+It is possible to assign the entire array of hooks calling ``Mongo.tls_context_hooks=``,
+but doing so will remove any previously assigned hooks. It is recommended to use
+the ``Array#push`` or ``Array#unshift`` methods to add new hooks.
+
+It is also possible to remove hooks from ``Mongo.tls_context_hooks`` by storing
+a reference to the Procs somewhere else in the application, and then using
+``Array#delete_if`` to remove the desired hooks.
+
+.. warning::
+
+ TLS context hooks are global and will affect every instance of ``Mongo::Client``.
+ Any library that allows applications to enable these hooks should expose methods to
+ modify the hooks (which can be called by the application) rather than
+ automatically enabling the hooks when the library is loaded.
+
+Further information on configuring MongoDB server for TLS is available in the
+:manual:`MongoDB manual `.
+
+Using Intermediate Certificates
+```````````````````````````````
+
+It is possible to use certificate chains for both the client and the server
+certificates. When using chains, the certificate authority parameter should
+be configured to contain the trusted root certificates only; the intermediate
+certificates, if any, should be provided in the server or client certificates
+by concatenating them after the leaf server and client certificates, respectively.
+
+``:ssl_cert`` and ``:ssl_cert_string`` Ruby options, as well as
+``tlsCertificateKeyFile`` URI option, support certificate chains.
+``:ssl_cert_object`` Ruby option, which takes an instance of
+``OpenSSL::X509::Certificate``, does not support certificate chains.
+
+The Ruby driver performs strict X.509 certificate verification, which requires
+that both of the following fields are set in the intermediate certificate(s):
+
+- X509v3 Basic Constraints: CA: TRUE -- Can sign certificates
+- X509v3 Key Usage: Key Cert Sign -- Can sign certificates
+
+More information about these flags can be found `in this Stack Overflow question
+`_.
+
+It is a common pitfall to concatenate intermediate certificates to the root
+CA certificates passed in ``tlsCAFile`` / ``ssl_ca_cert`` options. By doing
+so, the intermediate certificates are elevated to trusted status and are
+themselves not verified against the actual CA root. More information on this
+issue is available `in this mailing list post
+`_.
+
+Specify CA Certificate
+----------------------
+
+The driver will attempt to verify the server's TLS certificate by default, and
+will abort the connection if this verification fails. By default, the driver
+will use the default system root certificate store as the trust anchor.
+To specify the CA certificate that the server's certificate is signed with,
+use:
+
+- The ``:ssl_ca_cert``/``:ssl_ca_cert_string``/``:ssl_ca_cert_object``
+ Ruby options
+- The ``tlsCAFile`` URI option.
+
+If any of these options are given, the server's certificate will be verified
+only against the specified CA certificate and the default system root
+certificate store will not be used.
+
+To not perform server TLS certificate verification, which is not
+recommended, specify the ``ssl_verify: false`` Ruby option or the
+``tlsInsecure=true`` URI option.
+
+Specifying Multiple CA Certificates
+```````````````````````````````````
+
+The ``:ssl_ca_cert`` Ruby option and ``tlsCAFile`` URI option can be used with
+a file containing multiple certificates. All certificates thus referenced
+will become trust anchors.
+
+The ``:ssl_ca_cert_object`` option takes an array of certificates, and thus
+can also be used to add multiple certificates as certificate authorities.
+
+The ``:ssl_ca_cert_string`` option supports specifying only one CA certificate.
+
+.. warning::
+
+ Intermediate certificates must not be provided in files specified by the
+ CA certificate options. Doing so would elevate the intermediate certificates
+ to the status of root certificates, rather than verifying intermediate
+ certificates against the root certificates.
+
+ If intermediate certificates need to be used, specify them as part of the
+ client or server TLS certificate files.
+
+
+.. _ocsp-verification:
+
+OCSP Verification
+-----------------
+
+If the certificate provided by the server contains an OCSP endpoint URI,
+the driver will issue an OCSP request to the specified endpoint to verify the
+validity of the certificate.
+
+The OCSP endpoint check may be disabled by setting the
+``:ssl_verify_ocsp_endpoint`` Ruby option to ``false`` or by setting the
+``tlsDisableOCSPEndpointCheck`` URI option to ``true`` when creating a client.
+
+.. note::
+
+ OCSP endpoint checking is not currently performed when running on JRuby,
+ since JRuby does not correctly expose the OCSP endpoint URI.
+
+
+IPv4/IPv6 Connections
+=====================
+
+When a client is constructed with ``localhost`` as the host name, it will
+attempt an IPv4 connection only (i.e. if ``localhost`` resolves to
+``127.0.0.1`` and ``::1``, the driver will only try to connect to
+``127.0.0.1``).
+
+When a client is constructed with hostnames other than ``localhost``, it will
+attempt both IPv4 and IPv6 connections depending on the addresses that the
+hostnames resolve to. The driver respects the order in which ``getaddrinfo``
+returns the addresses, and will attempt to connect to them sequentially.
+The first successful connection will be used.
+
+The driver does not currently implement the Happy Eyeballs algorithm.
+
+
+TCP Keepalive Configuration
+===========================
+
+Where allowed by system configuration and the Ruby language runtime,
+the driver enables TCP keepalive and, for each of the keepalive parameters
+listed below, sets the value of the respective parameter to the specified
+value if the system value can be determined and is higher than the
+listed driver value:
+
+- ``tcp_keepalive_time``: 120 seconds
+- ``tcp_keepalive_intvl``: 10 seconds
+- ``tcp_keepalive_cnt``: 9 probes
+
+.. note::
+
+ As of JRuby 9.2.14.0, JRuby does not implement the APIs required to
+ set the keepalive parameters. When using JRuby, the driver will not be
+ able to set the keepalive parameters and the system configuration will
+ be in effect.
+
+To use lower values, or to change the parameters in environments like JRuby
+that do not expose the required APIs, please adjust the parameters at the
+system level as described in the `MongoDB Diagnostics FAQ keepalive section
+`_.
+
+
+Connection Pooling
+==================
+
+``Mongo::Client`` instances have a connection pool per server that the client
+is connected to. The pool creates connections on demand to support concurrent
+MongoDB operations issued by the application. There is no thread-affinity
+for connections.
+
+The client instance opens one additional connection per known server
+for monitoring the server's state.
+
+The size of each connection pool is capped at ``max_pool_size``, which defaults
+to 5. When a thread in the application begins an operation on MongoDB, it tries
+to retrieve a connection from the pool to send that operation on. If there
+are some connections available in the pool, it checks out a connection from
+the pool and uses it for the operation. If there are no connections available
+and the size of the pool is less than the ``max_pool_size``, a new connection
+will be created. If all connections are in use and the pool has reached its
+maximum size, the thread waits for a connection to be returned to the pool by
+another thread. If ``max_pool_size`` is set to zero, there is no limit for the
+maximum number of connections in the pool.
+
+Each pool has a limit on the number of connections that can be concurrently
+connecting to a server. This limit is called ``max_connecting`` and defaults to
+2. If the number of connections that are currently connecting to a server
+reaches this limit, the pool will wait for a connection attempt to succeed or
+fail before attempting to create a new connection. If your application
+has a large number of threads, you may want to increase ``max_connecting`` to avoid
+having threads wait for a connection to be established.
+
+The number of seconds the thread will wait for a connection to become available
+is configurable. This setting, called ``wait_queue_timeout``, is defined in
+seconds. If this timeout is reached, a ``Timeout::Error`` is raised. The
+default is 1 second.
+
+As of driver version 2.11, the driver eagerly creates connections up to
+``min_pool_size`` setting. Prior to driver version 2.11, the driver always
+created connections on demand. In all versions of the driver, once a connection
+is established, it will be kept in the pool by the driver as long as the pool
+size does not exceed ``min_pool_size``.
+
+Note that, if ``min_pool_size`` is set to a value greater than zero, the
+driver will establish that many connections to secondaries in replica set
+deployments even if the application does not perform secondary reads. The
+purpose of these connections is to provide faster failover when the primary
+changes.
+
+Here is an example of estimating the number of connections a multi-threaded
+application will open: A client connected to a 3-node replica set opens 3
+monitoring sockets. It also opens as many sockets as needed to support a
+multi-threaded application's concurrent operations on each server, up to
+``max_pool_size``. If the application only uses the primary (the default),
+then only the primary connection pool grows and the total connections is at
+most 8 (5 connections for the primary pool + 3 monitoring connections).
+If the application uses a read preference to query the secondaries, their
+pools also grow and the total connections can reach 18 (5 + 5 + 5 + 3).
+
+The default configuration for a ``Mongo::Client`` works for most applications:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"])
+
+Create this client **once** for each process, and reuse it for all operations.
+It is a common mistake to create a new client for each request, which is very
+inefficient and not what the client was designed for.
+
+To support extremely high numbers of concurrent MongoDB operations within one
+process, increase ``max_pool_size``:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"], max_pool_size: 200)
+
+To support extremely high numbers of threads that share the same client
+within one process, increase ``max_connecting``:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"], max_pool_size: 200, max_connecting: 10)
+
+
+Any number of threads are allowed to wait for connections to become available,
+and they can wait the default (1 second) or the ``wait_queue_timeout`` setting:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(["localhost:27017"], wait_queue_timeout: 0.5)
+
+When ``#close`` is called on a client by any thread, all connections are closed:
+
+.. code-block:: ruby
+
+ client.close
+
+Note that when creating a client using the `block syntax <#block-syntax>`_ described above, the client is automatically closed after the block finishes executing.
+
+.. _forking:
+
+Usage with Forking Servers
+==========================
+
+.. note::
+
+ Applications using Mongoid should follow `Mongoid's forking guidance
+ `_.
+ The guidance and sample code below is provided for applications using the
+ Ruby driver directly.
+
+When using the Mongo Ruby driver in a Web application with a forking web server
+such as Unicorn, Puma or Passenger, or when the application otherwise forks,
+each process should generally each have their own ``Mongo::Client`` instances.
+This is because:
+
+1. The background threads remain in the parent process and are not transferred
+ to the child process.
+2. File descriptors like network sockets are shared between parent and
+ child processes.
+
+The driver attempts to detect client use from forked processes and
+reestablish network connections when such use is detected, alleviating
+the issue of file descriptor sharing.
+
+If both parent and child processes need to perform MongoDB operations,
+it is recommended for each of the processes to create their own
+``Mongo::Client`` instances. Specifically, the child process should create
+its own client instance and not use any of the instances that were created
+in the parent.
+
+If the parent continues to perform MongoDB operations using an already
+established client instance after forking children, this client instance will
+continue to operate normally as long as no child uses it in any way.
+The child processes will not inherit any of the monitoring threads, and
+will not perform background operations on the client instance.
+
+If the parent does not need to perform MongoDB operation after forking
+children (which is what typically happens in web applications), the parent
+should close all of the client instances it created to free up connections
+and cease background monitoring:
+
+.. code-block:: ruby
+
+ client.reconnect
+
+.. note::
+
+ If the parent process performs operations on the Mongo client and does not
+ close it, the parent process will continue consuming a connection slot
+ in the cluster and will continue monitoring the cluster for as long as the
+ parent remains alive.
+
+Reconnecting Client Instances
+-----------------------------
+
+When the Ruby driver is used in a web application, it is recommended to not
+create any ``Mongo::Client`` instances in the management processes (prior to
+the workers being forked), and instead only create client instances in the
+workers.
+
+It is possible, although not recommended, to use the same ``Mongo::Client``
+instances in parent and child processes. In order to do so, the instance
+must be closed and reconnected in the child process so that the background
+threads can be recreated:
+
+.. code-block:: ruby
+
+ client.close
+ client.reconnect
+
+.. note::
+
+ This pattern should be used with Ruby driver version 2.6.2 or higher.
+ Previous driver versions did not recreate monitoring threads when
+ reconnecting.
+
+.. note::
+
+ When closing and reconnecting the client instance in the child,
+ due to file descriptor sharing, the parent process may experience network
+ and monitoring errors.
+
+Web servers generally provide hooks that can be used by applications to
+perform actions when the worker processes are forked. The recommended hooks
+to use are:
+
+- For `Puma `_, ``before_fork`` to close clients in the
+ parent process and ``on_worker_boot`` to reconnect in the child processes.
+- For `Unicorn `_,
+ ``before_fork`` to close clients in the parent process and
+ ``after_fork`` to reconnect clients in the child processes.
+- For `Passenger `_,
+ ``starting_worker_process`` to reconnect clients in the child processes
+ (Passenger does not appear to have a pre-fork hook).
+
+This documentation does not provide example code for using the aforementioned
+hooks, because there is no standard for client instance management when
+using the Ruby driver directly. `Mongoid documentation
+`_
+however provides examples for closing clients in the parent process and
+reconnecting clients in the child processes.
+
+Troubleshooting
+---------------
+
+The client's ``summary`` method returns the current state of the client,
+including servers that the client is monitoring and their state. If any of
+the servers are not being monitored, this is indicated by the ``NO-MONITORING``
+flag.
+
+A normally operating client will produce a summary similar to the following:
+
+.. code-block:: ruby
+
+ client.summary
+ => "#>,
+ #>,
+ #>,
+ #]>>"
+
+A client that is missing background threads will produce a summary similar to
+the following:
+
+.. code-block:: ruby
+
+ client.summary
+ => "#>,
+ #>,
+ #>,
+ #]>>"
+
+
+Retryable Reads
+===============
+
+The driver implements two mechanisms for retrying reads: modern and legacy.
+As of driver version 2.9.0, the modern mechanism is used by default, and the
+legacy mechanism is deprecated.
+
+Modern Retryable Reads
+----------------------
+
+When the modern mechanism is used, read operations are retried once in the
+event of a network error, a "not master" error, or a "node is recovering" error.
+The following operations are covered:
+
+- `Collection#find `_
+ and related methods
+- `Collection#aggregate `_
+- `Collection#count `_,
+ `Collection#count_documents `_
+- Change stream helpers: `Collection#watch `_,
+ `Database#watch `_,
+ `Client#watch `_
+- Enumeration commands: `Client#list_mongo_databases `_,
+ `Client#list_databases `_,
+ `Client#database_names `_,
+ `Database#collection_names `_,
+ `Database#collections `_,
+ `Database#list_collections `_,
+ `Collection#indexes `_
+
+When an operation returns a cursor, only the initial read command can be retried.
+``getMore`` operations on cursors are not retried by driver version 2.9.0 or
+newer. Additionally, when a read operation is retried, a new server for the
+operation is selected; this may result in the retry being sent to a different
+server from the one which received the first read.
+
+The behavior of modern retryable reads is covered in detail by the
+`retryable reads specification
+`_.
+
+Note that the modern retryable reads can only be used with MongoDB 3.6 and
+higher servers. When used with MongoDB 3.4 and lower servers, Ruby driver
+version 2.9.0 and higher will not retry reads by default - the application
+must explicitly request legacy retryable reads by setting the
+``retry_reads: false`` client option or using ``retryReads=false`` URI option.
+
+Legacy Retryable Reads
+----------------------
+
+The legacy read retry behavior of the Ruby driver is available by setting the
+``retry_reads: false`` client option or passing the ``retryReads=false`` URI
+option to the client.
+
+When using legacy read retry behavior, the number of retries can be set
+by specifying the ``max_read_retries`` client option. When using driver version
+2.9.0 or higher, the set of operations which would be retried with legacy
+retryable reads is identical to the one described above for modern retryable
+reads. In older driver versions the behavior of legacy retryable writes was
+different in that some of the operations were not retried.
+
+As of driver version 2.9.0, legacy read retries perform server selection prior
+to retrying the operation, as modern retriable writes do. In older driver
+versions read retries would be sent to the same server which the initial read
+was sent to.
+
+Disabling Retryable Reads
+-------------------------
+
+To disable all read retries, set the following client options:
+``retry_reads: false, max_read_retries: 0``.
+
+
+Retryable Writes
+================
+
+The driver implements two mechanisms for retrying writes: modern and legacy.
+As of driver version 2.9.0, the modern mechanism is used by default on servers
+that support it, and the legacy mechanism is deprecated and disabled by default
+on all server versions.
+
+The following write methods used in day-to-day operations on collections
+are subject to write retries:
+
+- ``collection#insert_one``
+- ``collection#update_one``
+- ``collection#delete_one``
+- ``collection#replace_one``
+- ``collection#find_one_and_update``
+- ``collection#find_one_and_replace``
+- ``collection#find_one_and_delete``
+- ``collection#bulk_write`` (for all single statement ops, i.e. not for ``update_many`` or ``delete_many``)
+
+Modern Retryable Writes
+-----------------------
+
+The modern mechanism will retry failing writes once when the driver is
+connected to a MongoDB 3.6 or higher replica set or a sharded cluster,
+because they require an oplog on the serer. Modern mechanism will not retry
+writes when the driver is connected to a standalone MongoDB server or
+server versions 3.4 or older.
+
+The following errors will cause writes to be retried:
+
+- Network errors including timeouts
+- "not master" errors
+- "node is recovering" errors
+
+Prior to retrying the write the driver will perform server selection,
+since the server that the original write was sent to is likely no longer
+usable.
+
+Legacy Retryable Writes
+-----------------------
+
+If modern retryable writes mechanism is disabled by setting the client
+option ``retry_writes: false`` or by using the ``retryWrites=false``
+URI option, the driver will utilize the legacy retryable writes mechanism.
+The legacy mechanism retries writes on the same operations as the modern
+mechanism. By default the legacy mechanism retries once, like the modern
+mechanism does; to change the number of retries, set ``:max_write_retries``
+client option.
+
+The difference between legacy and modern retry mechanisms is that the
+legacy mechanism retries writes for a different set
+of errors compared to the modern mechanism, and specifically does not
+retry writes when a network timeout is encountered.
+
+Disabling Retryable Writes
+--------------------------
+
+To disable all write retries, set the following client options:
+``retry_writes: false, max_write_retries: 0``.
+
+Logging
+=======
+
+You can either use the default global driver logger or set your own. To set your own:
+
+.. code-block:: ruby
+
+ Mongo::Logger.logger = other_logger
+
+See the `Ruby Logger documentation `_
+for more information on the default logger API and available levels.
+
+Changing the Logger Level
+-------------------------
+
+To change the logger level:
+
+.. code-block:: ruby
+
+ Mongo::Logger.logger.level = Logger::WARN
+
+For more control, a logger can be passed to a client for per-client control over logging.
+
+.. code-block:: ruby
+
+ my_logger = Logger.new(STDOUT)
+ Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test', :logger => my_logger )
+
+Truncation
+----------
+
+The default logging truncates logs at 250 characters by default. To turn this off pass an
+option to the client instance.
+
+.. code-block:: ruby
+
+ Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test', :truncate_logs => false )
+
+
+.. _compression:
+
+Compression
+===========
+
+To use wire protocol compression, at least one compressor must be explicitly
+requested using either the ``:compressors`` Ruby option or the ``compressors``
+URI option. If no compressors are explicitly requested, the driver will not
+use compression, even if the required dependencies for one or more compressors
+are present on the system.
+
+The driver chooses the first compressor of the ones requested that is also
+supported by the server. The driver currently supports ``zstd``, ``snappy`` and
+``zlib`` compressors. ``zstd`` compressor is recommended as it produces
+the highest compression at the same CPU consumption compared to the other
+compressors. For maximum server compatibility all three compressors can be
+specified, e.g. as ``compressors: ["zstd", "snappy", "zlib"]``.
+
+``zstd`` compressor requires the
+`zstd-ruby `_ library to be installed.
+``snappy`` compressor requires the
+`snappy `_ library to be installed.
+If ``zstd`` or ``snappy`` compression is requested, and the respective
+library is not loadable, the driver will raise an error during
+``Mongo::Client`` creation. ``zlib`` compression requires the ``zlib``
+standard library extension to be present.
+
+The server support for various compressors is as follows:
+
+- ``zstd`` requires and is enabled by default in MongoDB 4.2 or higher.
+- ``snappy`` requires MongoDB 3.4 or higher and is enabled by default in
+ MongoDB 3.6 or higher.
+- ``zlib`` requires MongoDB 3.6 or higher and is enabled by default in
+ MongoDB 4.2 and higher.
+
+
+.. _server-api-parameters:
+
+Server API Parameters
+=====================
+
+Starting with MongoDB 5.0, applications can request that the server behaves
+in accordance with a particular server API version.
+
+Server API parameters can be specified via the ``:server_api`` option to
+``Client``. These parameters cannot be provided via a URI.
+
+Currently the only defined API version is ``"1"``. It can be requested
+as follows:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], server_api: {version: "1"})
+
+MongoDB server defines API versions as string values. For convenience, if the
+API version is provided as an integer, the Ruby driver will stringify it and
+send it to the server as a string:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], server_api: {version: 1})
+
+Note that the server may define API versions that are not stringified integers.
+Applications must not assume that all legal API versions can be expressed
+as integers.
+
+When a particular API version is requested, operations which are part of that
+API version behave as specified in that API version. Operations which are not
+part of the specified API version behave as they would had the API version
+not been specified at all. Operations whose behavior is subject to the
+configured API version are commands including command arguments, queries,
+aggregation pipeline stages and arguments.
+
+Applications may request that the server rejects all operations which are not
+part of the specified API version by setting the ``:strict`` option:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], server_api: {version: "1", strict: true})
+
+For example, since the ``:tailable`` option is not part of the server API
+version 1, the following query would fail:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], server_api: {version: "1", strict: true})
+ client['collection'].find({}, tailable: true)
+ # => Mongo::Error::OperationFailure (BSON field 'FindCommand.tailable' is not allowed with apiStrict:true. (323) (on localhost:27017, modern retry, attempt 1))
+
+Applications may request that the server rejects all operations which are
+deprecated in the specified API version by setting the ``:deprecation_errors``
+option:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost'], server_api: {version: "1", deprecation_errors: true})
+
+Note that, as of this writing, there are no deprecated operations in API
+version ``"1"``.
+
+If the server API parameters have been defined on a ``Client`` object,
+they will be sent by the client as part of each [*]_ executed operation.
+
+.. [*] ``getMore`` commands and commands in transactions do not accept
+ API parameters, thus the driver will not send them in these cases.
+
+MongoDB servers prior to 5.0 do not recognize the API parameters, and will
+produce a variety of errors should the application configure them.
+The Ruby driver will send the API parameters to all MongoDB 3.6 and newer
+servers, but the API parameters should only be configured when the application
+is communicating with MongoDB 5.0 or newer servers. The API parameters
+cannot be sent to MongoDB 3.4 and older servers that use the legacy wire
+protocol; if an application configures the API parameters and connects to
+MongoDB 3.4 or older servers, the driver will produce an error on every
+operation.
+
+The :ref:`command helper ` permits the application to
+send manually constructed commands to the server. If the client is not
+configured with server API parameters, the command helper may be used to
+issue commands with API parameters:
+
+.. code-block:: ruby
+
+ client.database.command(
+ ping: 1,
+ apiVersion: "1",
+ apiStrict: false,
+ apiDeprecationErrors: false,
+ )
+
+If the client is configured with server API parameters, the command helper
+may not be used to issue commands with server API parameters. This includes the
+case when the server API parameters provided to the client and to the
+command helper are identical. If a client is constructed with server API
+parameters, to send different API parameters (or none at all) a new client
+must be constructed, either from scratch or using the ``with`` method.
+
+The server API parameters may only be specified on the client level.
+They may not be specified on the database, collection, session, transaction
+or individual operation level.
+
+
+Development Configuration
+=========================
+
+Driver's default configuration is suitable for production deployment.
+In development, some settings can be adjusted to provide a better developer
+experience.
+
+- ``:server_selection_timeout``: set this to a low value (e.g., ``1``)
+ if your MongoDB server is running locally and you start it manually. A low
+ server selection timeout will cause the driver to fail quickly when there is
+ no server running.
+
+
+Production Configuration
+========================
+
+Please consider the following when deploying an application using the Ruby
+driver in production:
+
+- As of driver version 2.11, the ``:min_pool_size`` client option is completely
+ respected - the driver will create that many connections to each server
+ identified as a standalone, primary or secondary. In previous driver versions
+ the driver created connections on demand. Applications using ``:min_pool_size``
+ will see an increase in the number of idle connections to all servers as of
+ driver version 2.11, and especially to secondaries in replica set deployments
+ and to nodes in sharded clusters.
+- If the application is reverse proxied to by another web server or a load
+ balancer, ``server_selection_timeout`` should generally be set to a lower
+ value than the reverse proxy's read timeout. For exampe, `Heroku request timeout
+ `_ is 30 seconds and
+ is not configurable; if deploying a Ruby application using MongoDB to Heroku,
+ consider lowering server selection timeout to 20 or 15 seconds.
+
+
+.. _feature-flags:
+
+Feature Flags
+=============
+
+The following is a list of feature flags that the Mongo Ruby Driver provides:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 30 60
+
+ * - Flag
+ - Description
+ * - ``broken_view_aggregate``
+ - When this flag is off, an aggregation done on a view will be executed over
+ the documents included in that view, instead of all documents in the
+ collection. When this flag is on, the view fiter is ignored and the
+ aggregation is applied over all of the documents in the view's
+ collection. (default: true)
+ * - ``broken_view_options``
+ - When this flag is turned off, the view options will be correctly
+ propagated to the ``aggregate``, ``count``, ``count_documents``,
+ ``distinct``, and ``estimated_document_count`` mehods. When this flag is
+ switched on, the view options will be ignored in those methods.
+ (default: true)
+ * - ``validate_update_replace``
+ - Validates that there are no atomic operators (those that start with $)
+ in the root of a replacement document, and that there are only atomic
+ operators at the root of an update document. If this feature flag is on,
+ an error will be raised on an invalid update or replacement document,
+ if not, a warning will be output to the logs. (default: false)
+
+These feature flags can be set directly on the ``Mongo`` module or using
+the ``options`` method:
+
+.. code::
+
+ Mongo.validate_update_replace = true
+ Mongo.options = { validate_update_replace: true }
diff --git a/source/reference/crud-operations.txt b/source/reference/crud-operations.txt
new file mode 100644
index 000000000..9e638ca02
--- /dev/null
+++ b/source/reference/crud-operations.txt
@@ -0,0 +1,1008 @@
+***************
+CRUD Operations
+***************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+CRUD operations are those which deal with creating, reading, updating,
+and deleting documents.
+
+Key-value Pair Notation
+=======================
+
+Key-value pairs appear in many different contexts in the MongoDB Ruby
+driver, and there are some quirks of syntax with regard to how they can
+be notated which depend on which version of Ruby you're using.
+
+When constructing a document, the following syntax is acceptable and
+correct for Ruby version 1.9 and later:
+
+.. code-block:: javascript
+
+ document = { name: "Harriet", age: 36 }
+
+If you're using Ruby version 2.2 or greater, you can optionally enclose
+your keys in quotes.
+
+.. code-block:: javascript
+
+ document = { "name": "Harriet", "age": 36 }
+
+If you need to use any MongoDB operator which begins with ``$``,
+such as ``$set``, ``$gte``, or ``$near``, you must enclose it in
+quotes. If you're using Ruby version 2.2 or greater, you can notate
+it as follows:
+
+.. code-block:: ruby
+
+ collection.update_one({ name: "Harriet" }, { "$set": { age: 42 } })
+
+If you're using an earlier version of Ruby, use the hashrocket symbol:
+
+.. code-block:: ruby
+
+ collection.update_one({ name: "Harriet" }, { "$set" => { age: 42 } })
+
+Quoted strings and hashrockets for key-value pairs will work with any
+version of Ruby:
+
+.. code-block:: ruby
+
+ collection.update_one({ "name" => "Harriet" }, { "$set" => { age: 42 } })
+
+
+Creating Documents
+==================
+
+To insert documents into a collection, select a
+collection on the client and call ``insert_one`` or ``insert_many``.
+
+Insert operations return a ``Mongo::Operation::Result`` object which
+gives you information about the insert itself.
+
+On MongoDB 2.6 and later, if the insert fails, an exception is
+raised, because write commands are used.
+
+On MongoDB 2.4, an exception is only raised if the insert fails and the
+:manual:`write concern` is 1 or higher.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ result = client[:artists].insert_one( { :name => 'FKA Twigs' } )
+ result.n # returns 1, because 1 document was inserted.
+
+ result = client[:artists].insert_many([
+ { :name => 'Flying Lotus' },
+ { :name => 'Aphex Twin' }
+ ])
+ result.inserted_count # returns 2, because 2 documents were inserted.
+
+.. _specify-decimal128:
+
+Specify a ``Decimal128`` number
+-------------------------------
+
+.. versionadded:: 3.4
+
+:manual:`Decimal128` is a
+:doc:`BSON datatype `
+that employs 128-bit decimal-based floating-point values capable
+of emulating decimal rounding with exact precision. This
+functionality is intended for applications that handle
+:manual:`monetary data `,
+such as financial and tax computations.
+
+The following example inserts a value of type ``Decimal128`` into
+the ``price`` field of a collection named ``inventory``:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+
+ price = BSON::Decimal128.new("428.79")
+ client[:inventory].insert_one({ "_id" => 1,
+ "item" => "26 inch monitor",
+ "price" => price })
+
+The above operation produces the following document:
+
+.. code-block:: javascript
+
+ { "_id" : 1, "item" : "26 inch monitor", "price" : NumberDecimal("428.79") }
+
+You can also create a ``Decimal128`` object from a Ruby ``BigDecimal``
+object, or with ``Decimal128.from_string()``.
+
+.. code-block:: ruby
+
+ big_decimal = BigDecimal.new(428.79, 5)
+ price = BSON::Decimal128.new(big_decimal)
+ # => BSON::Decimal128('428.79')
+
+ price = BSON::Decimal128.from_string("428.79")
+ # => BSON::Decimal128('428.79')
+
+Query Cache
+===========
+
+The Ruby driver provides a query cache. When enabled, the query cache will
+save the results of find and aggregation queries and return those saved results
+when the same queries are performed again.
+
+To read more about the query cache, visit the
+:ref:`query cache tutorial `.
+
+Reading
+=======
+
+The Ruby driver provides a fluent interface for queries using the ``find``
+method on the collection. Various options are available
+to the ``find`` method.
+
+The query is lazily executed against the server only when iterating the
+results - at that point the query is dispatched and a ``Mongo::Cursor`` is
+returned.
+
+To find all documents for a given filter, call ``find`` with the
+query:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ client[:artists].find(:name => 'Flying Lotus').each do |document|
+ #=> Yields a BSON::Document.
+ end
+
+To query nested documents, specify the keys in nested order using dot
+notation.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ client[:artists].find("records.releaseYear": 2008).each do |document|
+ #=> Yields a BSON::Document.
+ end
+
+
+Legacy ``$query`` Syntax
+------------------------
+
+*This usage is deprecated.*
+
+The ``find`` method allows providing the query and the options using the
+legacy ``$query`` syntax in the first parameter:
+
+.. code-block:: ruby
+
+ collection.find(:'$query' => {name: 'Mr. Smith'})
+ # Equivalent to:
+ collection.find(name: 'Mr. Smith')
+
+ collection.find(:'$query' => {name: 'Mr. Smith'}, :'$sort' => {age: 1})
+ # Equivalent to:
+ collection.find(name: 'Mr. Smith').sort(age: 1)
+
+When the query is executed against MongoDB 3.2 or newer, the driver will
+use the protocol appropriate for the server version in question, automatically
+converting the query as needed to either a find command or an OP_MSG payload.
+
+
+.. _query-options:
+
+Query Options
+-------------
+
+To add options to a query, chain the appropriate methods after the
+``find`` method. Note that the underlying object, the ``Mongo::Collection::View``,
+is immutable and a new object will be returned after each method call.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ documents = client[:artists].find(:name => 'Flying Lotus').skip(10).limit(10)
+ documents.each do |document|
+ #=> Yields a BSON::Document.
+ end
+
+The following is a full list of the available options that can be added
+when querying and their corresponding methods as examples.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``allow_disk_use``
+ - When set to true, the server can write temporary data to disk while
+ executing the find operation. This option is only available on MongoDB
+ server versions 4.4 and newer.
+ * - ``allow_partial_results``
+ - For use with sharded clusters. If a shard is down, allows the query
+ to return results from the shards that are up, potentially only getting
+ a portion of the results.
+ * - ``batch_size(Integer)``
+ - Specifies the size of each batch of documents the cursor will return on
+ each ``GETMORE`` operation.
+ * - ``comment(String)``
+ - Adds a comment to the query.
+
+ * - ``explain(**opts)``
+ - Returns the query plan for the query. Pass the :manual:`explain options
+ ` via the keyword arguments using symbol
+ keys.
+
+ .. code-block:: ruby
+
+ # All server versions - default explain behavior
+ client[:artists].find.explain
+
+ # MongoDB 3.0 and newer
+ client[:artists].find.explain(verbosity: :query_planner)
+ client[:artists].find.explain(verbosity: :execution_stats)
+ client[:artists].find.explain(verbosity: :all_plans_execution)
+
+ # Alternative syntax using camel case
+ client[:artists].find.explain(verbosity: "queryPlanner")
+ client[:artists].find.explain(verbosity: "executionStats")
+ client[:artists].find.explain(verbosity: "allPlansExecution")
+
+ # MongoDB 2.6
+ client[:artists].find.explain(verbose: true)
+
+ The explain operation supports ``:session`` and ``:read``
+ (for read preference) options. To specify these options for a single
+ explain operation, they must be given to the ``find`` method as
+ follows:
+
+ .. code-block:: ruby
+
+ client[:artists].find({}, session: session).explain
+
+ client[:artists].find({}, read: {mode: :secondary_preferred}).explain
+
+ If the read preference option is specified on the client or on the
+ collection, it will be passed to the explain operation:
+
+ .. code-block:: ruby
+
+ client[:artists, read: {mode: :secondary_preferred}].find.explain
+
+ Note that the session option is not accepted when creating a collection
+ object.
+
+ The explain command does not support passing the read concern option.
+ If the read concern is specifed on the client or collection level, or
+ if the read concern is specified as a find option, it will NOT be passed
+ by the driver to the explain command.
+
+ .. note::
+
+ The information returned by the server for the ``explain`` command
+ varies with server version and deployment topology. The driver's
+ ``explain`` method returns whatever the server provided.
+
+ **The return value of ``explain`` method is not part of the driver's
+ public API and depends on the server version and deployment topology.**
+
+ * - ``hint(Hash)``
+ - Provides the query with an
+ :manual:`index hint` to use.
+ * - ``let(Hash)``
+ - Mapping of :manual:`variables`
+ to use in the query.
+ * - ``limit(Integer)``
+ - Limits the number of returned documents to the provided value.
+ * - ``max_scan(Integer)``
+ - Sets the maximum number of documents to scan if a full collection scan
+ would be performed. Deprecated as of MongoDB server version 4.0.
+ * - ``max_time_ms(Integer)``
+ - The maximum amount of time to allow the query to run, in milliseconds.
+ * - ``no_cursor_timeout``
+ - MongoDB automatically closes inactive cursors after a period of 10
+ minutes. Call this for cursors to remain open indefinitely on the server.
+ * - ``projection(Hash)``
+ - Specifies the fields to include or exclude from the results.
+
+ .. code-block:: ruby
+
+ client[:artists].find.projection(:name => 1)
+
+ * - ``read(Hash)``
+ - Changes the read preference for this query only.
+
+ .. code-block:: ruby
+
+ client[:artists].find.read(:mode => :secondary_preferred)
+
+ * - ``session(Session)``
+ - The session to use.
+ * - ``show_disk_loc(Boolean)``
+ - Tells the results to also include the location of the documents on disk.
+ * - ``skip(Integer)``
+ - Skip the provided number of documents in the results.
+ * - ``snapshot``
+ - Execute the query in snapshot mode. Deprecated as of MongoDB server version 4.0.
+ * - ``sort(Hash)``
+ - Specifies sort criteria for the query.
+
+ .. code-block:: ruby
+
+ client[:artists].find.sort(:name => -1)
+
+
+Additional Query Operations
+---------------------------
+
+``count_documents``
+ Get the total number of documents matching a filter, or the total number
+ of documents in a collection.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ client[:artists].find(:name => 'Flying Lotus').count_documents
+
+``estimated_document_count``
+ Get an approximate number of documents in the collection.
+
+ Note that unlike ``count_documents``, ``estimated_document_count`` does not
+ accept a filter.
+
+ The ``count`` server command is used to implement ``estimated_document_count``.
+ More information can be found via `Count: Behavior `_.
+
+ Due to an oversight in MongoDB versions 5.0.0-5.0.7, the ``count`` command,
+ which ``estimated_document_count`` uses in its implementation, was not
+ included in v1 of the Stable API. Therefore, users of the Stable API with
+ ``estimated_document_count`` are recommended to upgrade their server version to
+ 5.0.8+ or set ``api_strict: false`` to avoid encountering errors.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ client[:artists].estimated_document_count
+
+``count``
+ Get an approximate number of documents matching a filter, or an approximate
+ number of documents in the collection.
+
+ *Deprecated:* The ``count`` method is deprecated and does not work in
+ transactions. Please use ``count_documents`` to obtain an exact count of
+ documents potentially matching a filter or ``estimated_document_count``
+ to obtain an approximate number of documents in the collection.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ client[:artists].find(:name => 'Flying Lotus').count
+
+``distinct``
+ Filters out documents with duplicate values. Equivalent to the SQL
+ ``distinct`` clause.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+
+ client[:artists].find.distinct(:name )
+
+Tailable Cursors
+----------------
+
+For capped collections you may use a :manual:`tailable cursor
+` that remains open
+after the client exhausts the results in the initial cursor. The
+following code example shows how a tailable cursor might be used:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ client[:artists].drop
+ client[:artists, capped: true, size: 512].create
+
+ result = client[:artists].insert_many([
+ { :name => 'Flying Lotus' },
+ { :name => 'Aphex Twin' }
+ ])
+
+ enum = client[:artists].find({}, cursor_type: :tailable_await).to_enum
+
+ while true
+ doc = enum.next
+ # do something
+ sleep(1)
+ end
+
+
+Read Concern
+------------
+
+Read concern can be :ref:`set on the client `
+or on the collection:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(['localhost:14420'], database: 'music',
+ read_concern: {level: :local})
+
+ client['collection'].find.to_a
+
+ collection = client['collection', read_concern: {level: :majority}]
+
+ collection.find.to_a
+
+The driver does not currently support setting read concern on an individual
+query.
+
+Read concern can be specified when :ref:`starting a transaction
+`. When a transaction is active, :manual:`any read concern
+specified on the client or on the collection is ignored
+`.
+
+When using the generic command helper, the read concern can be specified as
+part of the command:
+
+.. code-block:: ruby
+
+ client.database.command(dbStats: 1, readConcern: {level: :majority})
+
+
+.. _read-preference:
+
+Read Preference
+---------------
+
+Read preference determines the candidate :manual:`replica set`
+members to which a query or command can be sent. They consist of a **mode**
+specified as a symbol, an array of hashes known as **tag_sets**,
+the ``hedge`` option, which is a Hash specifying hedged read behavior, and two
+timing options: **local_threshold** and **server_selection_timeout**.
+
+``local_threshold``
+ Defines the upper limit in seconds of the latency window
+ between the nearest server and suitable servers to which an operation may be sent.
+ The default is 15 milliseconds, or 0.015 seconds.
+
+``server_selection_timeout``
+ Defines how long to block for server selection
+ before throwing an exception. The default is 30,000 milliseconds, or 30 seconds.
+
+.. note::
+
+ Read preference does not apply to Standalone deployments. When a client
+ is connected to a Standalone deployment, any application-specified read
+ preference is ignored.
+
+For more information on the algorithm used to select a server, please
+refer to the `Server Selection documentation, available on GitHub
+`_.
+
+Read preference can be set as an option on the client or passed an
+option when a command is run on a database:
+
+.. code-block:: ruby
+
+ # Set read preference on a client, used for all operations
+ client = Mongo::Client.new([ '127.0.0.1:27017' ],
+ read: { mode: :secondary,
+ tag_sets: [ { 'dc' => 'nyc' } ]
+ } )
+
+ # Set read preference for a given command
+ client.database.command( { dbStats: 1 }, read: { mode: secondary,
+ tag_sets: [ { 'dc' => 'nyc' } ] } )
+
+Read preference can also be set for specific operations on a collection
+using the ``with`` method:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+ artists.with(:read => { :mode => :primary_preferred }).find.to_a
+
+Mode
+----
+
+There are five possible read preference modes: ``:primary``, ``:secondary``,
+``:primary_preferred``, ``:secondary_preferred`` and``:nearest``.
+Please see the :manual:`read preference documentation in the MongoDB Manual
+` for an explanation of the modes.
+
+.. note::
+
+ When a client is directly connected to a server using the ``:direct_connection``
+ Ruby option or the ``directConnection`` URI option, read preference mode
+ is automatically set to ``:primary_preferred`` to permit read operations
+ against secondaries. If the application specified a ``:primary`` read
+ preference mode, the mode is automatically converted to ``:primary_preferred``.
+ If another read preference mode is specified, it is passed to the server
+ unchanged.
+
+Tag sets
+--------
+
+The ``tag_sets`` parameter is an ordered list of tag sets used to
+restrict the eligibility of servers for selection, such as for data
+center awareness. Please see the :manual:`read preference documentation in
+the MongoDB Manual ` for an explanation of tag sets.
+
+
+A read preference tag set (T) matches a server tag set (S) – or
+equivalently a server tag set (S) matches a read preference tag set
+(T) — if T is a subset of S.
+
+For example, the read preference tag set ``{ dc: 'ny', rack: 2 }``
+matches a secondary server with tag set ``{ dc: 'ny', rack: 2, size: 'large' }``.
+
+A tag set that is an empty document matches any server, because
+the empty tag set is a subset of any tag set. This means the default
+``tag_sets`` parameter ``[{}]`` matches all servers.
+
+Hedge
+-----
+
+The ``hedge`` parameter is a Hash that specifies whether the server should use
+hedged reads. With hedged reads, sharded clusters can route read operations to
+two replica set members and return results from the first respondent.
+
+The ``hedge`` option may only be specified on non-primary read preferences. It
+must be provided as Hash with the key ``enabled`` set to ``true`` or ``false``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(
+ [ '127.0.0.1:27017' ],
+ read: { mode: :secondary, hedge: { enabled: true } },
+ )
+
+See the :manual:`MongoDB Manual ` for
+more information about hedged reads.
+
+.. note::
+
+ The ``hedge`` option is only available on MongoDB server versions 4.4 and newer.
+ Attempting to use this option on older server versions will result in an error.
+
+.. _updating:
+
+Updating
+========
+
+Updating documents is possible by executing a single or
+multiple update, or by using the ``$findAndModify`` command.
+
+``update_one``
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+
+ result = artists.find(:name => 'Goldie').update_one("$inc" => { :plays => 1 } )
+ result.n # Returns 1.
+
+ result = artists.update_one( { :name => 'Goldie' }, { "$inc" => { :plays => 1 } } )
+ result.n # Returns 1.
+
+``update_many``
+
+.. code-block:: ruby
+
+ result = artists.find(:label => 'Hospital').update_many( "$inc" => { :plays => 1 } )
+ result.modified_count # Returns the number of documents that were updated.
+
+ result = artists.update_many( { :label => 'Hospital' }, { "$inc" => { :plays => 1 } } )
+ result.modified_count # Returns the number of documents that were updated.
+
+``replace_one``
+
+.. code-block:: ruby
+
+ result = artists.find(:name => 'Aphex Twin').replace_one(:name => 'Richard James')
+ result.modified_count # Returns 1.
+
+ result = artists.replace_one( { :name => 'Aphex Twin' }, { :name => 'Richard James' } )
+ result.modified_count # Returns 1.
+
+To update documents and return a document via ``$findAndModify``, use one of
+the three provided helpers: ``find_one_and_delete``, ``find_one_and_replace``,
+or ``find_one_and_update``. You can opt to return the document before or after
+the modification occurs.
+
+``find_one_and_delete``
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new( [ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+
+ artists.find(:name => 'José James').find_one_and_delete # Returns the document.
+
+``find_one_and_replace``
+
+.. code-block:: ruby
+
+ doc = artists.find(:name => 'José James').find_one_and_replace(:name => 'José')
+ doc # Return the document before the update.
+
+ doc = artists.find_one_and_replace({ :name => 'José James' }, { :name => 'José' })
+ doc # Return the document before the update.
+
+ doc = artists.find(:name => 'José James').
+ find_one_and_replace( { :name => 'José' }, :return_document => :after )
+ doc # Return the document after the update.
+
+``find_one_and_update``
+
+.. code-block:: ruby
+
+ doc = artists.find(:name => 'José James').
+ find_one_and_update( '$set' => { :name => 'José' } )
+ doc # Return the document before the update.
+
+ doc = artists.find_one_and_update( { :name => 'José James' }, { '$set' => { :name => 'José' } } )
+ doc # Return the document before the update.
+
+Update Options
+--------------
+
+To add options to an update command, specify them as key-value pairs in the options
+Hash argument.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+
+ artists.indexes.create_one(name: 1)
+
+ # Force the server to use the name index to perform this operation
+ result = artists.update_one(
+ { :name => 'Goldie' },
+ { "$inc" => { :plays => 1 } },
+ { hint: { name: 1 } }
+ )
+ result.n # Returns 1.
+
+The following is a list of the options that can be added to update operations,
+including ``update_one``, ``update_many``, ``replace_one``,
+``find_one_and_delete``, ``find_one_and_update``, and ``find_one_and_replace``.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``array_filters``
+ - An Array of filter documents that determine which array elements to modify
+ for an update operation on an array field.
+ * - ``bypass_document_validation``
+ - Whether to skip document-level validation before writing the document.
+ * - ``collation``
+ - Specifies a set of rules to use when comparing strings complying with the
+ conventions of a particular language.
+ * - ``hint``
+ - The index to use for this operation. May be specified as a Hash
+ (e.g. { _id: 1 }) or as a String (e.g. "_id_"). Supported on MongoDB
+ server versions 4.2 and newer for ``update_one``, ``update_many``, and
+ ``replace_one`` commands, and on server versions 4.4 and newer for
+ ``find_one_and_delete``, ``find_one_and_update``, and ``find_one_and_replace``
+ commands.
+ * - ``let(Hash)``
+ - Mapping of :manual:`variables`
+ to use for this operation.
+ * - ``projection``
+ - The fields to exclude or include in the operation result (only available
+ on ``find_one_and_delete``, ``find_one_and_replace``, and
+ ``find_one_and_update`` commands).
+ * - ``return_document``
+ - A symbol specifying whether to return the updated document as it was before or
+ after the update. Potential values are ``:before`` or ``:after``.
+ (Only available on ``find_one_and_update`` and ``find_one_and_replace`` commands).
+ * - ``sort``
+ - How to sort the results of a find and modify command. Specified as a Hash
+ key-value pair, where the key is the name of the field to sort by, and
+ the value is either 1 or -1, specifying a sort in ascending or descending
+ order (only available on ``find_one_and_delete``, ``find_one_and_replace``,
+ and ``find_one_and_update`` commands).
+ * - ``session``
+ - The session to use for this operation.
+ * - ``upsert``
+ - Whether to upsert if the document doesn't exist. Cannot be used on
+ ``find_one_and_delete`` operation.
+
+For more information about update options, see the MongoDB server documentation
+on the following commands:
+
+- :manual:`update `
+- :manual:`findAndModify `
+
+Deleting
+========
+
+``delete_one``
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+
+ result = artists.find(:name => 'Björk').delete_one
+ result.deleted_count # Returns 1.
+
+ result = artists.delete_one(:name => 'Björk')
+ result.deleted_count # Returns 1.
+
+``delete_many``
+
+.. code-block:: ruby
+
+ result = artists.find(:label => 'Mute').delete_many
+ result.deleted_count # Returns the number deleted.
+
+ result = artists.delete_many(:label => 'Mute')
+ result.deleted_count # Returns the number deleted.
+
+Delete Options
+--------------
+
+To add options to a delete command, specify them as key-value pairs in the
+options Hash argument.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ artists = client[:artists]
+
+ artists.indexes.create_one(name: 1)
+
+ # Force the server to use the name index to perform this operation
+ result = artists.find(:name => 'Björk').delete_one(hint: { name: 1 })
+ result.deleted_count # Returns 1.
+
+The following is a full list of the available options that can be added
+to ``delete_one`` and ``delete_many`` operations.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``collation``
+ - Specifies a set of rules to use when comparing strings complying with the
+ conventions of a particular language.
+ * - ``hint``
+ - The index to use for this operation. May be specified as a Hash
+ (e.g. { _id: 1 }) or as a String (e.g. "_id_"). Supported on MongoDB
+ server versions 4.4 and newer.
+ * - ``let(Hash)``
+ - Mapping of :manual:`variables`
+ to use for this operation.
+ * - ``session``
+ - The session to use for this operation.
+
+For more information about update options, see the MongoDB server documentation
+on the :manual:`delete command. `
+
+.. _write-concern:
+
+Write Concern
+=============
+
+All write operations in MongoDB are executed with a write concern which is
+the level of acknowledgment requested from MongoDB for the particular write.
+More information about write concerns in general is available in the
+`MongoDB manual `_.
+
+The Ruby driver supports specifying write concern on client, collection,
+session (for transactions on that session), transaction, GridFS bucket
+and write stream levels, as well as when manually issuing commands via
+``Database#command``.
+
+As of driver version 2.10, all driver objects accepting write concerns do so
+through the ``:write_concern`` option, which should be given a hash with
+the write concern options. Usage of the ``:write`` option is deprecated.
+In driver versions 2.9 and below, client, collection and GridFS objects
+took write concern options in the ``:write`` option with session and
+transaction objects employing the ``:write_concern`` option.
+
+Below are some examples of passing write concerns to client and collection
+objects. The ``:write_concern`` option can be provided when constructing
+new client and collection objects, or to the ``#with`` methods.
+
+GridFS examples are provided on the :ref:`GridFS ` page.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 2})
+ alt_client = client.with(write_concern: {w: :majority})
+
+ collection = client[:artists, write_concern: {w: 3}]
+ alt_collection = collection.with(write_concern: {w: :majority})
+
+ # Uses w: 3
+ collection.insert_one({name: 'SUN Project'})
+ # Uses w: :majority
+ alt_collection.insert_one({name: 'SUN Project'})
+
+Driver versions 2.9 and earlier accepted write concerns on client and collection
+level via the ``:write`` option. This usage continues to be supported for
+backwards compatibility, but is deprecated:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write: {w: 2})
+ alt_client = client.with(write: {w: :majority})
+
+ collection = client[:artists, write: {w: 3}]
+ alt_collection = collection.with(write: {w: :majority})
+
+If both ``:write`` and ``:write_concern`` options are provided, their
+values must be identical or an exception will be raised:
+
+.. code-block:: ruby
+
+ # OK
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 3}, write: {w: 3})
+
+ # Error
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 3}, write: {w: :majority})
+
+When ``#with`` methods are used to alter the options on a client or collection,
+the last provided option wins in case of naming differences:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 2})
+ alt_client = client.with(write: {w: 3})
+
+ alt_client.options[:write]
+ # => {"w"=>3}
+
+ alt_client.options[:write_concern]
+ # => nil
+
+When using transactions, write concern is only sent to the server in
+``commit_transaction`` and ``abort_transaction`` operations
+per the `transactions specification
+`_.
+Write concern may be set via the ``:write_concern`` option in a
+``with_transaction`` or ``start_transaction`` call, or via
+``default_transaction_options`` option on a session object.
+If neither of these is set, write concern of the client is used; note
+that transactions ignore write concerns of collections that are involved
+in their operations. Note that when setting the write concern as a
+transaction option, the ``:write`` option is not recognized by any
+driver version.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 2})
+ collection = client[:artists, write_concern: {w: :majority}]
+
+
+ session = client.start_session
+ session.with_transaction do
+ collection.insert_one({test: 1}, session: session)
+
+ # Uses w: 2 when committing
+ end
+
+
+ session = client.start_session(default_transaction_options:
+ {write_concern: {w: 3})
+ )
+ session.with_transaction do
+ collection.insert_one({test: 1}, session: session)
+
+ # Uses w: 3 when committing
+ end
+
+
+ session = client.start_session
+ session.with_transaction(write_concern: {w: 3}) do
+ collection.insert_one({test: 1}, session: session)
+
+ # Uses w: 3 when committing
+ end
+
+When write concerns are inherited, inheritance applies to the entire
+write concern hash rather than individual elements. For example, ``j: true``
+is not inherited in the following case:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 1, j: true})
+ collection = client[:artists, write_concern: {w: 2}]
+
+ collection.write_concern.options
+ # => #2}>
+
+Although CRUD operations accept an options hash, they currently do not
+recognize the ``:write_concern`` option:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music',
+ write_concern: {w: 2})
+ collection = client[:artists, write_concern: {w: :majority}]
+
+ # Still uses w: :majority
+ collection.insert_one({name: 'SUN Project'}, write_concern: {w: 1})
+
+The easiest workaround for this is to use ``#with`` to obtain a new collection
+instance with the desired write concern:
+
+.. code-block:: ruby
+
+ # Uses w: 1
+ collection.with(write_concern: {w: 1}).insert_one(name: 'SUN Project')
+
+Write concern can also be manually specified in ``Database#command``:
+
+.. code-block:: ruby
+
+ client.database.command(create: 'foo-collection', writeConcern: {w: :majority})
+
+Note that writeConcern here is part of the operation rather than options,
+and the syntax is the camel case one that MongoDB server recognizes, not the
+underscore one that Ruby driver uses.
+
+.. _dots-dollars-in-field-names:
+
+Field Names with Dots/Periods (.) and Dollar Signs ($)
+======================================================
+
+Starting in Mongo Ruby Driver version 2.18.0, the ability to work with fields
+that begin with dollar signs ($) and fields with dots/periods (.) in them is available.
+In Driver version 2.17.0 and earlier, any attempt to work with dotted or dollared
+fields would result in an ``IllegalKey`` error being raised. See the MongoDB docs
+on `Field Names with Periods (.) and Dollar Signs ($) `_
+for more information on working with these types of fields.
+
+A Note about the BSON Symbol type
+=================================
+
+Because the BSON specification deprecated the BSON symbol type, the ``bson`` gem
+will serialize Ruby symbols into BSON strings when used on its own. However, in
+order to maintain backwards compatibility with older datasets, the Ruby driver
+overrides this behavior to serialize Ruby symbols as BSON symbols. This is
+necessary to be able to specify queries for documents which contain BSON
+symbols as fields. Despite this, new documents with symbol type fields should
+*not* be stored in the database; instead, use string fields.
+
+To override default behavior and configure the driver to encode symbol values
+as strings, include the following code snippet in your project:
+
+.. code-block:: ruby
+
+ class Symbol
+ def bson_type
+ BSON::String::BSON_TYPE
+ end
+ end
diff --git a/source/reference/database-tasks.txt b/source/reference/database-tasks.txt
new file mode 100644
index 000000000..1806f2f9c
--- /dev/null
+++ b/source/reference/database-tasks.txt
@@ -0,0 +1,63 @@
+*********
+Databases
+*********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+The driver provides various helpers on database objects for executing
+commands, getting collection lists, and administrative tasks.
+
+
+List Collections
+================
+
+To get a list of collections or collection names for a database, use
+``collections`` and ``collection_names``, respectively.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music')
+ database = client.database
+
+ database.collections # Returns an array of Collection objects.
+ database.collection_names # Returns an array of collection names as strings.
+
+
+.. _arbitrary-commands:
+
+Arbitrary Comands
+=================
+
+To execute any command on the database, use the ``command`` method.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music')
+ database = client.database
+
+ result = database.command(:ping => 1)
+ result.first # Returns the BSON::Document returned from the server.
+
+.. note::
+
+ Specifying server API version as a client option and also specifying
+ any of the respective command parameters to the ``command`` method
+ (i.e. the ``apiVersion``, ``apiStrict`` and ``apiDeprecationErrors``
+ command parameters) at the same time is not allowed and will produce an error.
+
+
+Drop Database
+=============
+
+To drop a database, use the ``drop`` method.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ client.database.drop
diff --git a/source/reference/driver-compatibility.txt b/source/reference/driver-compatibility.txt
new file mode 100644
index 000000000..0402c17f8
--- /dev/null
+++ b/source/reference/driver-compatibility.txt
@@ -0,0 +1,646 @@
+.. _compatibility:
+
+********************
+Driver Compatibility
+********************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+
+.. _mongodb-compatibility:
+
+MongoDB Compatibility
+=====================
+
+The following compatibility table specifies the recommended
+version(s) of the MongoDB Ruby driver for use with a specific version of
+MongoDB. Except when indicated, the specified driver versions expose or
+take advantage of the features added in the corresponding server versions.
+
+MongoDB server releases are generally backwards compatible, meaning a
+particular version of the driver will generally work with newer versions of
+the server but may not take advantage of the functionality released in the
+newer version of the server.
+
+The first column lists the driver versions.“D” in other columns means support
+for that MongoDB version is deprecated and will be removed in a future driver
+version.
+
+.. list-table::
+ :header-rows: 1
+ :stub-columns: 1
+ :class: compatibility-large no-padding
+
+ * - Ruby Driver
+ - MongoDB 7.0
+ - MongoDB 6.0
+ - MongoDB 5.0
+ - MongoDB 4.4
+ - MongoDB 4.2
+ - MongoDB 4.0
+ - MongoDB 3.6
+ - MongoDB 3.4
+ - MongoDB 3.2
+ - MongoDB 3.0
+ - MongoDB 2.6
+
+ * - 2.19
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+
+ * - 2.18
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+
+ * - 2.17
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+
+ * - 2.16
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ - D
+ - D
+
+ * - 2.15
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.14
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.13
+ -
+ -
+ -
+ - |checkmark| [#ocsp]_
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.12
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.11
+ -
+ -
+ -
+ -
+ - |checkmark| [#client-side-encryption]_
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.10
+ -
+ -
+ -
+ -
+ - |checkmark| [#srv-polling]_ [#client-side-encryption]_
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.9
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.8
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.7
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.6
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.5
+ -
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+
+.. [#ocsp] OCSP verification is implemented as of driver version 2.14.
+
+.. [#srv-polling] Polling of SRV records in sharded topologies is
+ implemented as of driver version 2.11.
+
+.. [#client-side-encryption] Client-side encryption is implemented as of
+ driver version 2.12.
+
+The driver does not support older versions of MongoDB.
+
+
+.. _ruby-compatibility:
+
+Ruby Compatibility
+==================
+
+The following compatibility table specifies the versions of Ruby supported
+by the various versions of the MongoDB Ruby driver.
+
+The first column lists the driver versions. "D" in a column means support
+for that Ruby version is deprecated.
+
+.. list-table::
+ :header-rows: 1
+ :stub-columns: 1
+ :class: compatibility-large no-padding
+
+ * - Ruby Driver
+ - Ruby 3.2
+ - Ruby 3.1
+ - Ruby 3.0
+ - Ruby 2.7
+ - Ruby 2.6
+ - Ruby 2.5
+ - Ruby 2.4
+ - Ruby 2.3
+ - Ruby 2.2
+ - Ruby 2.1
+ - Ruby 2.0
+ - Ruby 1.9
+ - JRuby 9.3
+ - JRuby 9.2
+ - JRuby 9.1
+
+ * - 2.19
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ -
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ -
+
+ * - 2.18
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ -
+
+ * - 2.17
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.16
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ -
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.15
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.14
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.13
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.12
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.11
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ -
+ -
+ -
+ -
+ - |checkmark|
+ -
+
+ * - 2.10
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ - D
+ - D
+ -
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.9
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - D
+ - D
+ - D
+ - D
+ -
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.8
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.7
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ - |checkmark|
+ - |checkmark|
+
+ * - 2.6
+ -
+ -
+ -
+ -
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ - |checkmark|
+ -
+ - |checkmark|
+ - |checkmark|
+
+The driver does not support older versions of Ruby.
+
+
+Rails/ActiveSupport Compatibility
+=================================
+
+The Ruby driver does not depend on ActiveSupport. However, when an
+application uses ActiveSupport or Ruby on Rails,
+it must load the driver's ActiveSupport
+compatibility code for behavior like time serialization to be correct:
+
+.. code-block:: ruby
+
+ require 'mongo'
+ require 'mongo/active_support'
+
+Applications using Mongoid 7.0.6 or newer do not need to explicitly load
+the driver's ActiveSupport code, since Mongoid automatically does so.
+
+
+.. _tls-compatibility:
+
+TLS/SSL Compatibility
+=====================
+
+The driver will utilize the protocols supported by the underlying Ruby
+``openssl`` extension. In turn, the ``openssl`` extension generally exposes
+the functionality that exists in the operating system's OpenSSL library.
+
+Industry best practices, and some regulations, require the use of TLS 1.1
+or newer. Some operating systems or versions may not provide an OpenSSL version
+new enough to support these TLS versions.
+
+Users of macOS older than 10.13 (High Sierra) will need to install Ruby from
+`rvm`_, `homebrew`_, `macports`_, or another similar source. See
+`installation information on ruby-lang.org`_ for more options.
+
+Users of Linux or other non-macOS Unix can check their OpenSSL version
+as follows:
+
+.. code-block:: sh
+
+ openssl version
+
+If the version number is less than 1.0.1 support for TLS 1.1 or newer is
+not available. Contact your operating system vendor for a solution or upgrade
+to a newer distribution.
+
+You can check your Ruby interpreter by executing the following command:
+
+.. code-block:: sh
+
+ ruby -e "require 'net/http'; require 'json'; puts JSON.parse(Net::HTTP.get(URI('https://www.howsmyssl.com/a/check')))['tls_version']"
+
+You should see "TLS 1.X" where X is >= 1.
+
+You can read more about TLS versions and their security implications `here
+`_.
+
+.. _rvm: https://rvm.io/
+.. _homebrew: https://brew.sh/
+.. _macports: https://www.macports.org/
+.. _installation information on ruby-lang.org: https://www.ruby-lang.org/en/documentation/installation
+
+
+Atlas Compatibility
+===================
+
+`Driver version 2.6.1 `_
+or higher is recommended when using MongoDB Atlas, as this version has
+significant performance improvements when TLS connections are used, and all
+Atlas connections use TLS.
+
+When running on JRuby and connecting to Atlas Free Tier,
+`driver version 2.6.4 `_
+or higher and Java 8 or higher are required.
+
+
+``mongo_kerberos`` Compatibility
+================================
+
+The following compatibility table specifies the version(s) of the
+:ref:`mongo_kerberos library ` to use with a specific version of
+the driver.
+
+.. list-table::
+ :header-rows: 1
+ :stub-columns: 1
+ :class: compatibility-large no-padding
+
+ * - Ruby Driver
+ - mongo_kerberos |nbsp| 2.1
+
+ * - 2.7 - 2.19
+ - |checkmark|
+
+
+JRuby and Kerberos Authentication
+=================================
+
+If the ``mongo_kerberos`` gem is used for Kerberos authentication with JRuby, the the JVM system
+property "sun.security.jgss.native" to will be set to "true" in order to facilitate the use of
+the system cache of TGTs (e.g. TGTs obtained with ``kinit``). Any other use of the JGSS library
+will also be affected by this setting, meaning any TGTs in the system cache will be available for
+obtaining Kerberos credentials as well.
+
+.. include:: /includes/unicode-checkmark.rst
+.. include:: /includes/unicode-nbsp.rst
+
+
+JRuby and TLS Connections
+=========================
+
+Due to JRuby limitations:
+
+- ECDSA server certificates are not supported.
+- OCSP endpoint checking is not performed.
diff --git a/source/reference/geospatial-search.txt b/source/reference/geospatial-search.txt
new file mode 100644
index 000000000..8550d64b3
--- /dev/null
+++ b/source/reference/geospatial-search.txt
@@ -0,0 +1,106 @@
+*****************
+Geospatial Search
+*****************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+MongoDB offers a number of indexes and query mechanisms to handle
+geospatial information. This section demonstrates how to create
+and use
+:manual:`geospatial indexes`
+with the Ruby driver.
+
+The examples on this page use a sample collection called
+``restaurants`` in the ``test`` database.
+A `sample dataset `_
+is available for download.
+
+The following is a sample document in the ``restaurants``
+collection:
+
+.. code-block:: javascript
+
+ {
+ "address": {
+ "building": "1007",
+ "coord": [ -73.856077, 40.848447 ],
+ "street": "Morris Park Ave",
+ "zipcode": "10462"
+ },
+ "borough": "Bronx",
+ "cuisine": "Bakery",
+ "grades": [
+ { "date": { "$date": 1393804800000 }, "grade": "A", "score": 2 },
+ { "date": { "$date": 1299715200000 }, "grade": "B", "score": 14 }
+ ],
+ "name": "Morris Park Bake Shop",
+ "restaurant_id": "30075445"
+ }
+
+The following example creates a ``2dsphere`` index on the
+``address.coord`` field:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test' )
+ client[:restaurants].indexes.create_one( { 'address.coord' => '2dsphere' })
+
+Once the index is created, you can use several operators to query
+against it, including the
+:manual:`$near`,
+:manual:`$geoWithin`, and
+:manual:`$geoIntersects`
+operators. The following example uses the ``$near`` operator to find
+all restaurants within 500 meters of the given coordinates.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new('mongodb://127.0.0.1:27017/test')
+ collection = client[:restaurants]
+
+ collection.find(
+ { 'address.coord' =>
+ { "$near" =>
+ { "$geometry" =>
+ { "type" => "Point", "coordinates" => [ -73.96, 40.78 ] },
+ "$maxDistance" => 500
+ }
+ }
+ }
+ ).each do |doc|
+
+ #=> Yields a BSON::Document.
+
+ end
+
+To find all documents with a location within the
+perimeter of a given polygon, use the ``$geoWithin``
+operator:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new('mongodb://127.0.0.1:27017/test')
+ collection = client[:restaurants]
+
+ collection.find(
+ { "address.coord" =>
+ { "$geoWithin" =>
+ { "$geometry" =>
+ { "type" => "Polygon" ,
+ "coordinates" => [ [ [ -73, 40 ], [ -74, 41 ], [ -72, 39 ], [ -73, 40 ] ] ]
+ }
+ }
+ }
+ }
+ ).each do |doc|
+
+ #=> Yields a BSON::Document.
+
+ end
+
diff --git a/source/reference/gridfs.txt b/source/reference/gridfs.txt
new file mode 100644
index 000000000..756750b18
--- /dev/null
+++ b/source/reference/gridfs.txt
@@ -0,0 +1,325 @@
+.. _gridfs:
+
+******
+GridFS
+******
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+The driver provides a clean and simple interface to work with storage of
+chunked files in the database, also known as the pattern "GridFS". The API allows you to either
+work with Grid::File objects or with read and write streams.
+
+Creating a GridFS object ("Grid::FSBucket")
+===========================================
+
+You can create a GridFS object by calling ``fs`` on a database, with optional
+arguments. ``fs`` returns a ``Grid::FSBucket`` object.
+
+The options that ``Grid::FSBucket`` supports are:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``:bucket_name``
+ - The name of the GridFS Bucket. Default is ``fs``.
+ * - ``:fs_name``
+ - The name of the GridFS Bucket. Takes precedence over ``bucket_name``.
+ Default is ``fs``.
+ * - ``:chunk_size``
+ - Specifies the size of each file chunk in the database.
+ * - ``:write_concern``
+ - The write concern to use when uploading files. Please see the
+ :ref:`Write Concern ` section under CRUD operations
+ for how to work with write concerns.
+ * - ``:write``
+ - Deprecated. Same as ``:write_concern``.
+ * - ``:read``
+ - The read preference to use when downloading files.
+
+
+For example, you can create a GridFS bucket object with a particular read preference:
+
+.. code-block:: ruby
+
+ fs_bucket = database.fs( read: { mode: :secondary } )
+
+
+Working with write streams
+==========================
+
+To upload a file to GridFS using a write stream, you can either open a stream
+and write to it directly or write the entire contents of an ``IO`` object to
+GridFS all at once.
+
+To open an upload stream and write to it:
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-file.txt', 'r') do |file|
+ fs_bucket.open_upload_stream('my-file.txt') do |stream|
+ stream.write(file)
+ end
+ end
+
+To upload the entire contents of an IO object in one call:
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-file.txt', 'r') do |file|
+ fs_bucket.upload_from_stream('my-file.txt', file)
+ end
+
+Write streams support the following options:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``:chunk_size``
+ - Specifies the size of each file chunk in the database.
+ * - ``:write_concern``
+ - The write concern to use when uploading files. Please see the
+ :ref:`Write Concern ` section under CRUD operations
+ for how to work with write concerns.
+ * - ``:write``
+ - Deprecated. Same as ``:write_concern``.
+
+The options can be provided as the last argument to the write stream methods:
+
+.. code-block:: ruby
+
+ fs_bucket.open_upload_stream('my-file.txt', write_concern: {w: 2}) do |stream|
+ stream.write_concern
+ # => #2}>
+
+ # ...
+ end
+
+ fs_bucket.upload_from_stream('my-file.txt', file, write_concern: {w: 2})
+
+
+Working with read streams
+=========================
+
+To download a file from GridFS using a read stream, you can either open a
+read stream and read from it directly or download the entire file all at once.
+
+To open a download stream and read from it:
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-output-file.txt', 'w') do |file|
+ fs_bucket.open_download_stream(file_id) do |stream|
+ file.write(stream.read)
+ end
+ end
+
+To download the file all at once and write it to an IO object:
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-output-file.txt', 'w') do |file|
+ fs_bucket.download_from_stream(file_id, file)
+ end
+
+You can also download a file specified by a name and (optionally)
+revision number. Revision numbers are used to distinguish between files
+sharing the same name, ordered by date of upload. The revision number passed to
+``open_download_stream_by_name`` can be positive or negative.
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-output-file.txt', 'w') do |file|
+ fs_bucket.open_download_stream_by_name('my-file.txt', revision: -2) do |stream|
+ file.write(stream.read)
+ end
+ end
+
+To download the entire contents of the file specified by name and (optionally)
+revision number:
+
+.. code-block:: ruby
+
+ File.open('/path/to/my-output-file.txt', 'w') do |file|
+ fs_bucket.download_to_stream_by_name('my-file.txt', file, revision: -2)
+ end
+
+Read streams support the following options:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``:read``
+ - The read preference to use when downloading files.
+
+Some, but not all, of the read methods listed above pass these options to
+the underlying read streams. Please consult the API documentation for each
+method to determine whether it supports a particular option.
+
+Finding file metadata
+=====================
+
+You can retrieve documents containing metadata about files in the GridFS files collection.
+
+.. code-block:: ruby
+
+ fs_bucket.find(filename: 'my-file.txt')
+
+Deleting files
+==============
+
+You can delete a file by id.
+
+.. code-block:: ruby
+
+ fs_bucket.delete(file_id)
+
+
+Working with Grid::File objects
+===============================
+
+This object can be used to wrap a file to be inserted into the database using
+GridFS and the object that is retrieved.
+
+To create a file with raw data:
+
+.. code-block:: ruby
+
+ file = Mongo::Grid::File.new('I am a file', :filename => 'new-file.txt')
+
+To create a file from a Ruby ``File`` object:
+
+.. code-block:: ruby
+
+ file = File.open('/path/to/my-file.txt')
+ grid_file = Mongo::Grid::File.new(file.read, :filename => File.basename(file.path))
+
+To change file options such as chunk size, pass options to the constructor:
+
+.. code-block:: ruby
+
+ file = File.open('/path/to/my-file.txt')
+ grid_file = Mongo::Grid::File.new(
+ file.read,
+ :filename => File.basename(file.path),
+ :chunk_size => 1024
+ )
+
+The following is a full list of the available options that files support.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``:chunk_size``
+ - Sets the size of each file chunk in the database.
+ * - ``:content_type``
+ - Set a content type for the file.
+ * - ``:filename`` (Required)
+ - The file name.
+ * - ``:upload_date``
+ - The date the file was uploaded (stored).
+
+
+Inserting Files
+===============
+
+Files can be inserted into the database one at a time. File chunks are inserted
+by default into the ``fs.chunks`` collection and file metadata is inserted into the
+``fs.files`` collection.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ file = Mongo::Grid::File.new('I am a file', :filename => 'new-file.txt')
+
+ client.database.fs.insert_one(file)
+
+To insert into collections with a name prefix other than ``fs``, access the
+filesystem with a ``:fs_name`` option.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ file = Mongo::Grid::File.new('I am a file', :filename => 'new-file.txt')
+
+ client.database.fs(:fs_name => 'grid').insert_one(file)
+
+When the driver is inserting the first file into a bucket, it will attempt to create the required
+indexes on ``files`` and ``chunks`` collections. The required indexes are as follows:
+
+.. code-block:: ruby
+
+ # files collection
+ { :filename => 1, :uploadDate => 1 }
+
+ # chunks collection
+ { :files_id => 1, :n => 1 }, { :unique => true }
+
+.. note::
+
+ If the indexes cannot be created, such as due to the current user lacking the permissions to do so,
+ the file insert will be aborted. If the application does not have permissions to create indexes,
+ a database administrator must create the required indexes ahead of time.
+
+ If the bucket already has files, the driver will not attempt to create indexes, even if they are
+ missing and the current user has permissions to create them. In this case a database administrator
+ should create the needed indexes as soon as possible to ensure data integrity.
+
+Files can also be streamed as an alternative to a direct insert.
+
+.. code-block:: ruby
+
+ client.database.fs.open_upload_stream(filename) do |stream|
+ stream.write(file)
+ end
+
+Finding Files
+=============
+
+To retrieve a file from the database, call ``find_one`` with the appropriate filter.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ client.database.fs.find_one(:filename => 'new-file.txt') # Returns a Mongo::Grid::File
+
+Files can also be streamed as an alternative to a direct find.
+
+.. code-block:: ruby
+
+ client.database.fs.open_download_stream(file_id) do |stream|
+ io.write(stream.read)
+ end
+
+ fs.download_to_stream(file_id, io)
+
+
+Deleting Files
+==============
+
+To delete a file, pass the file object to ``delete_one``.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'music')
+ fs = client.database.fs
+ file = fs.find_one(:filename => 'new-file.txt')
+ fs.delete_one(file)
diff --git a/source/reference/in-use-encryption.txt b/source/reference/in-use-encryption.txt
new file mode 100644
index 000000000..165f399b8
--- /dev/null
+++ b/source/reference/in-use-encryption.txt
@@ -0,0 +1,17 @@
+.. _in-use-encryption:
+
+*****************
+In-Use Encryption
+*****************
+
+.. default-domain:: mongodb
+
+This section describes the different encryption methods in use by the Ruby
+driver for MongoDB.
+
+.. toctree::
+ :titlesonly:
+
+ /reference/in-use-encryption/queryable-encryption
+ /reference/in-use-encryption/client-side-encryption
+
\ No newline at end of file
diff --git a/source/reference/in-use-encryption/client-side-encryption.txt b/source/reference/in-use-encryption/client-side-encryption.txt
new file mode 100644
index 000000000..78d1ca73c
--- /dev/null
+++ b/source/reference/in-use-encryption/client-side-encryption.txt
@@ -0,0 +1,900 @@
+.. _client-side-encryption:
+
+**********************
+Client-Side Encryption
+**********************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+New in MongoDB 4.2, client-side encryption allows administrators and developers
+to encrypt specific fields in MongoDB documents before inserting them into the
+database.
+
+With client-side encryption, developers can encrypt fields client-side without
+any server-side configuration or directives. Client-side encryption supports
+workloads where applications must guarantee that unauthorized parties,
+including server administrators, cannot read the encrypted data.
+
+.. warning::
+
+ Enabling Client Side Encryption reduces the maximum write batch size and may
+ have a negative performance impact.
+
+Installation
+============
+
+Client-side encryption requires the installation of additional packages.
+
+libmongocrypt
+~~~~~~~~~~~~~
+
+Libmongocrypt is a C library used by the driver for client-side encryption.
+To use client-side encryption, you must install the libmongocrypt library
+on the machine running your Ruby program.
+
+The easiest way to install this library is to install `libmongocrypt-helper
+`_ as follows:
+
+.. code-block:: bash
+
+ gem install libmongocrypt-helper --pre
+
+The version number of libmongocrypt-helper is the version of included
+libmongocrypt followed by the release number, e.g. 1.3.2.r1.
+Because Ruby considers any letters in the version number to indicate a
+pre-release version, the ``--pre`` flag is needed.
+
+The driver will automatically load libmongocrypt-helper - no further
+configuration is needed.
+
+.. note::
+
+ libmongocrypt-helper currently only supports Linux operating systems.
+
+Alternatively you can download a pre-built binary distribution of libmongocrypt
+and manually place the required shared object on your computer, as follows:
+
+- Download a tarball of all libmongocrypt variations `here `_.
+
+- Extract the file you downloaded. You will see a list of directories, each
+ corresponding to an operating system. Find the directory that matches your
+ operating system and open it.
+
+- Inside that folder, open the folder called "nocrypto." In either the
+ lib or lb64 folder, you will find the libmongocrypt.so or
+ libmongocrypt.dylib or libmongocrypt.dll file, depending on your OS.
+
+- Move that file to wherever you want to keep it on your machine. You may delete
+ the other files included in the tarball.
+
+To build the binary from source:
+
+- Follow the instructions in the README in the `libmongocrypt GitHub repo `_.
+
+Once you have the libmongocrypt binary on your machine, specify the path to the
+binary using the LIBMONGOCRYPT_PATH environment variable. It is recommended that
+you add this variable to your rc files. For example:
+
+.. code-block:: bash
+
+ export LIBMONGOCRYPT_PATH=/path/to/your/libmongocrypt.so
+
+.. note::
+
+ The binary referenced in this section can be a pre-release version of
+ libmongocrypt which is not recommended for production environments.
+
+Automatic Encryption Shared Library
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Automatic Encryption Shared Library is a dynamic library that enables your
+client application to perform automatic encryption. It is only required for
+automatic encryption, which is an enterprise-only feature.
+If you only intend to use explicit encryption, you may skip this step.
+The Automatic Encryption Shared Library provides the same functionality as
+mongocryptd (see below), but does not require you to spawn another process
+to perform automatic encryption.
+
+For installation instructions, see
+`the MongoDB manual `_.
+
+When automatic encryption is enabled, libmongocrypt will look for the shared
+library in the system library path, or try to load the library from a particular
+place if ``:crypt_shared_lib_path`` option is provided when creating a client.
+If the library can be loaded, then the driver will not try to spawn mongocryptd daemon.
+The daemon will be still spawned if the shared library cannot be found.
+
+It is also possible to require using the shared library by passing
+``crypt_shared_lib_required: true`` option when creating a client. In this case,
+an error will be raised if the shared library cannot be loaded.
+
+.. note::
+ All ``Mongo::Client`` objects in the same process should use the same setting
+ ``:crypt_shared_lib_path``, as it is an error to load more that one crypt_shared dynamic library simultaneously in a single operating system process.
+
+mongocryptd
+~~~~~~~~~~~
+
+Mongocryptd is an alternative to the Automatic Encryption Shared Library.
+Mongocryptd is a daemon that tells the driver which fields to encrypt in a
+given operation. It is only required for automatic encryption, which is an
+enterprise-only feature. If you only intend to use explicit encryption, you may
+skip this step.
+
+Mongocryptd comes pre-packaged with enterprise builds of the MongoDB server
+(versions 4.2 and newer). For installation instructions, see the
+`MongoDB manual `_.
+
+In order to configure mongocryptd (for example, which port it listens on or the
+path used to spawn the daemon), it is necessary to pass different options to the
+``Mongo::Client`` performing automatic encryption. See the :ref:`:extra_options `
+section of this tutorial for more information.
+
+Automatic Encryption
+====================
+
+Automatic encryption is a feature that allows users to configure a
+``Mongo::Client`` instance to always encrypt specific document fields when
+performing database operations. Once the ``Mongo::Client`` is configured, it
+will automatically encrypt any field that requires encryption before writing
+it to the database, and it will automatically decrypt those fields when reading
+them.
+
+Client-side encryption implements envelope encryption, which is the practice of
+encrypting data with a data key, which is in turn encrypted using a master key.
+Thus, using client-side encryption with MongoDB involves three main steps:
+
+1. Create a master key
+2. Create a data key (and encrypt it using the master key)
+3. Encrypt data using the data key
+
+The example below demonstrates how to follow these steps with a local master key
+in order to perform automatic encryption.
+
+.. note::
+
+ Automatic encryption is an enterprise only feature that only applies to
+ operations on a collection. Automatic encryption is not supported for operations
+ on a database or view, and operations that are not bypassed will result in
+ error (see `Auto Encryption Allow-List `_
+ ). To bypass automatic encryption for all operations, set ``bypass_auto_encryption``
+ to true in ``auto_encryption_options``.
+
+.. note::
+
+ Automatic encryption requires the authenticated user to have the listCollections privilege action.
+
+.. note::
+
+ When using Automatic Encryption, and a ``Mongo::Client`` instance that is configured
+ with ``:auto_encryption_options`` has a limited connection pool size
+ (i.e a non-zero ``:max_pool_size``, which is the default setting), a separate
+ internal ``Mongo::Client`` instance is created if any of the following are true:
+
+ - ``auto_encryption_options[:key_vault_client]`` is not passed.
+ - ``auto_encryption_options[:bypass_automatic_encryption]`` is not passed or false.
+
+ If an internal ``Mongo::Client`` instance is created, it is configured with
+ the same options as the parent client except ``:min_pool_size`` is set to 0
+ and ``:auto_encryption_options`` is omitted.
+
+.. code-block:: ruby
+
+ require 'mongo'
+
+ #####################################
+ # Step 1: Create a local master key #
+ #####################################
+
+ # A local master key is a 96-byte binary blob.
+ local_master_key = SecureRandom.random_bytes(96)
+ # => "\xB2\xBE\x8EN\xD4\x14\xC2\x13\xC3..."
+
+ #############################
+ # Step 2: Create a data key #
+ #############################
+
+ kms_providers = {
+ local: {
+ key: local_master_key
+ }
+ }
+
+ # The key vault client is a Mongo::Client instance connected to the collection
+ # that will store your data keys.
+ key_vault_client = Mongo::Client.new(['localhost:27017'])
+
+ # Use an instance of Mongo::ClientEncryption to create a new data key
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers
+ )
+
+ data_key_id = client_encryption.create_data_key('local')
+ # =>
+
+ #######################################################
+ # Step 3: Configure Mongo::Client for auto-encryption #
+ #######################################################
+
+ # Create a schema map, which tells the Mongo::Client which fields to encrypt
+ schema_map = {
+ 'encryption_db.encryption_coll': {
+ properties: {
+ encrypted_field: {
+ encrypt: {
+ keyId: [data_key_id],
+ bsonType: "string",
+ algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
+ }
+ }
+ },
+ bsonType: "object"
+ }
+ }
+
+ # Configure the client for automatic encryption
+ client = Mongo::Client.new(
+ ['localhost:27017'],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers,
+ schema_map: schema_map
+ },
+ database: 'encryption_db',
+ )
+
+ collection = client['encryption_coll']
+ collection.drop # Make sure there is no data in the collection
+
+ # The string "sensitive data" will be encrypted and stored in the database
+ # as ciphertext
+ collection.insert_one(encrypted_field: 'sensitive data')
+
+ # The data is decrypted before being returned to the user
+ collection.find(encrypted_field: 'sensitive data').first['encrypted_field']
+ # => "sensitive data"
+
+ # A client with no auto_encryption_options is unable to decrypt the data
+ client_no_encryption = Mongo::Client.new(
+ ['localhost:27017'],
+ database: 'encryption_db',
+ )
+ client_no_encryption['encryption_coll'].find.first['encrypted_field']
+ # =>
+
+The example above demonstrates using automatic encryption with a local master key.
+For more information about using other key management services to create a
+master key and create data keys, see the following sections of this tutorial:
+
+- :ref:`Creating A Master Key `
+- :ref:`Creating A Data Key `
+
+Explicit Encryption
+===================
+
+Explicit encryption is a feature that allows users to encrypt and decrypt
+individual pieces of data such as strings, integers, or symbols. Explicit
+encryption is a community feature and does not require an enterprise build
+of the MongoDB server to use. To perform all explicit encryption and decryption
+operations, use an instance of the ClientEncryption class.
+
+Client-side encryption implements envelope encryption, which is the practice of
+encrypting data with a data key, which is in turn encrypted using a master key.
+Thus, using client-side encryption with MongoDB involves three main steps:
+
+1. Create a master key
+2. Create a data key (and encrypt it using the master key)
+3. Encrypt data using the data key
+
+The example below demonstrates how to follow these steps with a local master key
+in order to perform explicit encryption.
+
+.. code-block:: ruby
+
+ require 'mongo'
+
+ #####################################
+ # Step 1: Create a local master key #
+ #####################################
+
+ # A local master key is a 96-byte binary blob.
+ local_master_key = SecureRandom.random_bytes(96)
+ # => "\xB2\xBE\x8EN\xD4\x14\xC2\x13\xC3..."
+
+ #############################
+ # Step 2: Create a data key #
+ #############################
+
+ kms_providers = {
+ local: {
+ key: local_master_key
+ }
+ }
+
+ # The key vault client is a Mongo::Client instance connected to the collection
+ # that will store your data keys.
+ key_vault_client = Mongo::Client.new(['localhost:27017'])
+
+ # Use an instance of Mongo::ClientEncryption to create a new data key
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers
+ )
+
+ data_key_id = client_encryption.create_data_key('local')
+ # =>
+
+ #####################################################
+ # Step 3: Encrypt a string with explicit encryption #
+ #####################################################
+
+ # The value to encrypt
+ value = 'sensitive data'
+
+ # Encrypt the value
+ encrypted_value = client_encryption.encrypt(
+ 'sensitive data',
+ {
+ key_id: data_key_id,
+ algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
+ }
+ )
+
+ # Create the client you will use to read and write the data to MongoDB
+ client = Mongo::Client.new(
+ ['localhost:27017'],
+ database: 'encryption_db',
+ )
+ collection = client['encryption_coll']
+ collection.drop # Make sure there is no data in the collection
+
+ # Insert the encrypted value into the collection
+ collection.insert_one(encrypted_field: encrypted_value)
+
+ # Use the client to read the encrypted value from the database, then
+ # use the ClientEncryption object to decrypt it
+ find_result = collection.find(encrypted_field: encrypted_value).first['encrypted_field']
+ # => (the find result is encrypted)
+
+ unencrypted_result = client_encryption.decrypt(find_result)
+ # => "sensitive data"
+
+The example above demonstrates using explicit encryption with a local master key.
+For more information about using other key management services to create a
+master key and create data keys, see the following sections of this tutorial:
+
+- :ref:`Creating A Master Key `
+- :ref:`Creating A Data Key `
+
+.. _creating-a-master-key:
+
+Creating a Master Key
+=====================
+Both automatic encryption and explicit encryption require an encryption master key.
+This master key is used to encrypt data keys, which are in turn used to encrypt
+user data. The master key can be generated in one of two ways: by creating a
+local key, or by creating a key in a key management service. Currently
+Ruby driver supports AWS Key Management Service (KMS), Azure Key Vault, and
+Google Cloud Key Management (GCP KMS).
+
+.. _local-master-key:
+
+Local Master Key
+~~~~~~~~~~~~~~~~
+
+A local master key is a 96-byte binary string. It should be persisted
+on your machine as an environment variable or in a text file.
+
+.. warning::
+
+ Using a local master key is insecure and not recommended if you plan
+ to use client-side encryption in production.
+
+Run the following code to generate a local master key using Ruby:
+
+.. code-block:: ruby
+
+ local_master_key = SecureRandom.random_bytes(96)
+ # => "\xB2\xBE\x8EN\xD4\x14\xC2\x13\xC3..." (a binary blob)
+
+.. _remote-master-key:
+
+Remote Master Key
+~~~~~~~~~~~~~~~~~
+It is recommended that you use a remote Key Management Service to create and
+store your master key. To do so, follow steps of the
+`"Set up a Remote Master Key" `_
+in the MongoDB Client-Side Encryption documentation.
+
+For more information about creating a master key, see the
+`Create a Master Key `_
+section of the MongoDB manual.
+
+.. _creating-a-data-key:
+
+Creating a Data Key
+===================
+
+Once you have created a master key, create a data key by calling the
+``#create_data_key`` method on an instance of the ``Mongo::ClientEncryption``
+class. This method generates a new data key and inserts it into the key vault
+collection, which is the MongoDB collection in which you choose to store your
+data keys. The ``#create_data_key`` method returns id of the newly-created
+data key in the form of a BSON::Binary object.
+
+Create a Data Key Using a Local Master Key
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you have created a local master key, you may use it to generate a new data
+key with the following code snippet:
+
+.. warning::
+
+ Using a local master key is insecure and not recommended if you plan
+ to use client-side encryption in production.
+
+.. code-block:: ruby
+
+ # A Mongo::Client instance that will be used to connect to the key vault
+ # collection. Replace the server address with the address of the MongoDB
+ # server where you would like to store your key vault collection.
+ key_vault_client = Mongo::Client.new(['localhost:27017'])
+
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ # Replace with the database and collection names for your key vault collection
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ local: {
+ key: local_master_key
+ }
+ }
+ )
+
+ data_key_id = client_encryption.create_data_key('local')
+ # =>
+
+See the :ref:`Local Master Key ` section for more information
+about generating a new local master key.
+
+Create a Data Key Using a Remote Master Key
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you have created an AWS KMS master key, note the access key ID and the secret access
+key of the IAM user that has permissions to use the key. Additionally, note
+the AWS region and the Amazon Resource Number (ARN) of your master key. You will
+use that information to generate a data key.
+
+If you have created an Azure master key, note the tenant id, the client id, and
+the client secret of the application that has permissions to use the key.
+Additionally, note the key name, key version (if any), and key vault endpoint
+for your master key. You will use that information to generate a data key.
+
+If you have created a GCP KMS master key, note the email and the private key,
+and the client secret of the application that has permissions to use the key.
+Additionally, note the project id, location, key ring, key name, and
+key version (if any) for your master key. You will use that information to
+generate a data key.
+
+Please note that GCP private key can be in different formats. Ruby driver
+supports DER encoded RSA private key as base64 encoded string. For MRI Ruby
+the driver additionally support PEM encoded RSA private key.
+
+If you have created a master key using a Key Management Interoperability
+Protocol (KMIP) compatible key management server, note the server host and port,
+and key id. You will use that information to generate a data key. You may also
+need certificate authority certificate(s), as well as and your client
+certificate and private key to authenticate to KMIP server.
+
+.. code-block:: ruby
+
+ # A Mongo::Client instance that will be used to connect to the key vault
+ # collection. Replace the server address with the address of the MongoDB
+ # server where you would like to store your key vault collection.
+ key_vault_client = Mongo::Client.new(['localhost:27017'])
+
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ # Replace with the database and collection names for your key vault collection
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ aws: {
+ access_key_id: 'IAM-ACCESS-KEY-ID',
+ secret_access_key: 'IAM-SECRET-ACCESS-KEY'
+ },
+ azure: {
+ tenant_id: 'AZURE-TENANT-ID',
+ client_id: 'AZURE-CLIENT-ID',
+ client_secret: 'AZURE-CLIENT-SECRET'
+ },
+ gcp: {
+ email: 'GCP-EMAIL',
+ # :private_key value should be GCP private key as base64 encoded
+ # DER RSA private key, or PEM RSA private key, if you are using MRI Ruby.
+ private_key: 'GCP-PRIVATE-KEY',
+ },
+ kmip: {
+ # KMIP server endpoint may include port.
+ endpoint: 'KMIP-SERVER-HOST'
+ },
+ # TLS options to connect to KMIP server.
+ kms_tls_options: {
+ kmip: {
+ ssl_ca_cert: 'PATH-TO-CA-FILE',
+ ssl_cert: 'PATH-TO-CLIENT-CERT-FILE',
+ ssl_key: 'PATH-TO-CLIENT-KEY-FILE'
+ }
+ }
+ }
+ )
+
+ aws_data_key_id = client_encryption.create_data_key(
+ 'aws',
+ {
+ master_key: {
+ region: 'REGION-OF-YOUR-MASTER-KEY',
+ key: 'ARN-OF-YOUR-MASTER-KEY'
+ }
+ }
+ )
+ # =>
+
+ azure_data_key_id = client_encryption.create_data_key(
+ 'azure',
+ {
+ master_key: {
+ key_vault_endpoint: 'AZURE-KEY-VAULT-ENDPOINT',
+ key_name: 'AZURE-KEY-NAME'
+ }
+ }
+ )
+ # =>
+
+ gcp_data_key_id = client_encryption.create_data_key(
+ 'gcp',
+ {
+ master_key: {
+ project_id: 'GCP-PROJECT-ID',
+ location: 'GCP-LOCATION',
+ key_ring: 'GCP-KEY-RING',
+ key_name: 'GCP-KEY-NAME',
+ }
+ }
+ )
+ # =>
+
+See the :ref:`Remote Master Key ` section of this tutorial
+for more information about generating a new remote master key and finding the
+information you need to create data keys.
+
+For more information about creating a data key, see the
+`Create a Data Encryption Key `_
+section of the MongoDB manual.
+
+For a list of possible KMS TLS options
+see :manual:`create client reference `.
+``Mongo::ClientEncryption`` constructor accepts same ``ssl_`` options as
+``Mongo::Client``.
+
+Auto-Encryption Options
+=======================
+
+Automatic encryption can be configured on a ``Mongo::Client`` using the
+``auto_encryption_options`` option ``Hash``. This section provides an overview
+of the fields inside ``auto_encryption_options`` and explains how to choose their
+values.
+
+``:key_vault_client``
+~~~~~~~~~~~~~~~~~~~~~
+
+The key vault client is a ``Mongo::Client`` instance that will be used to connect
+to the MongoDB collection containing your encryption data keys. For example, if
+your key vault was hosted on a MongoDB instance at ``localhost:30000``:
+
+.. code-block:: ruby
+
+ key_vault_client = Mongo::Client.new(['localhost:30000'])
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ key_vault_client: key_vault_client,
+ # ... (Fill in other options here)
+ }
+ )
+
+If your data keys are stored in the same MongoDB instance that stores your encrypted
+data, you may leave this option blank, and the top-level client will be used
+to insert and fetch data keys.
+
+``:key_vault_namespace``
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The key vault namespace is a ``String`` in the format ``"database_name.collection_name"``,
+where ``database_name`` and ``collection_name`` are the name of the database and
+collection in which you would like to store your data keys. For example, if your data
+keys are stored in the ``encryption`` database in the ``__keyVault`` collection:
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ # ... (Fill in other options here)
+ }
+ )
+
+There is no default key vault namespace, and this option must be provided.
+
+``:kms_providers``
+~~~~~~~~~~~~~~~~~~
+
+A Hash that contains KMS provider names as keys, and provider options as values.
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ aws: {
+ access_key_id: 'IAM-ACCESS-KEY-ID',
+ secret_access_key: 'IAM-SECRET-ACCESS-KEY'
+ },
+ azure: {
+ tenant_id: 'AZURE-TENANT-ID',
+ client_id: 'AZURE-CLIENT-ID',
+ client_secret: 'AZURE-CLIENT-SECRET'
+ },
+ gcp: {
+ email: 'GCP-EMAIL',
+ # :private_key value should be GCP private key as base64 encoded
+ # DER RSA private key, or PEM RSA private key, if you are using MRI Ruby.
+ private_key: 'GCP-PRIVATE-KEY',
+ },
+ kmip: {
+ # KMIP server endpoint may include port.
+ endpoint: 'KMIP-SERVER-HOST'
+ },
+ # TLS options to connect to KMIP server.
+ kms_tls_options: {
+ kmip: {
+ ssl_ca_cert: 'PATH-TO-CA-FILE',
+ ssl_cert: 'PATH-TO-CLIENT-CERT-FILE',
+ ssl_key: 'PATH-TO-CLIENT-KEY-FILE'
+ }
+ }
+ }
+ }
+ )
+
+The client can retrieve AWS credentials from the environment or from EC2 or ECS
+metadata endpoints. To retrieve credentials automatically, specify an empty Hash
+as KMS provider options for AWS:
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017'],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ aws: {}
+ }
+ }
+ )
+
+See :ref:`"Automatically Retrieving Credentials" `
+for more detailed information about the credential retrieval.
+
+The client can retrieve GCP credentials from the Google Compute Engine
+metadata endpoints. To retrieve credentials automatically, specify an empty Hash
+as KMS provider options for GCP:
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017'],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ gcp: {}
+ }
+ }
+ )
+
+``:kms_tls_options``
+~~~~~~~~~~~~~~~~~~~~
+
+A Hash that contains KMP provider names as keys, and TLS options to connect to
+corresponding providers.
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: {
+ kmip: {
+ endpoint: 'KMIP-SERVER-HOST'
+ }
+ },
+ kms_tls_options: {
+ kmip: {
+ ssl_ca_cert: 'PATH-TO-CA-FILE',
+ ssl_cert: 'PATH-TO-CLIENT-CERT-FILE',
+ ssl_key: 'PATH-TO-CLIENT-KEY-FILE'
+ }
+ }
+ }
+ )
+
+
+``:schema_map``
+~~~~~~~~~~~~~~~
+
+A schema map is a Hash with information about which fields to automatically
+encrypt and decrypt.
+
+The code snippet at the top of this tutorial demonstrates creating a schema
+map using a Ruby ``Hash``. While this will work, schema maps can grow quite
+large and it could be unweildy to include them in your Ruby code. Instead, it is
+recommended that you store them in a separate JSON (JavaScript Object Notation)
+file.
+
+Before creating the JSON file, Base64-encode the UUID of the your data key.
+
+.. code-block:: ruby
+
+ Base64.encode64(data_key_id.data)
+ # => "sr6OTtQUwhPD..." (a base64-encoded string)
+
+Then, create a new JSON file containing your schema map in the format defined by
+the JSON Schema Draft 4 standard syntax. You can read more about formatting
+your schema map in the :manual:`Automatic Encryption Rules`
+section of the MongoDB manual.
+
+.. code-block:: json
+
+ {
+ "encryption_db.encryption_coll": {
+ "properties": {
+ "encrypted_field": {
+ "encrypt": {
+ "keyId": [{
+ "$binary": {
+ "base64": "YOUR-BASE64-ENCODED-DATA-KEY-ID",
+ "subType": "04"
+ }
+ }],
+ "bsonType": "string",
+ "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
+ }
+ }
+ },
+ "bsonType": "object"
+ }
+ }
+
+When you intend to use your schema map, convert it to a Ruby ``Hash`` using the
+``BSON::ExtJSON`` module in the ``bson`` Ruby gem.
+
+.. code-block:: ruby
+
+ schema_map = BSON::ExtJSON.parse(File.read('/path/to/your/file.json'))
+ # => { 'encryption_db.encryption_coll' => { ... } }
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ schema_map: schema_map,
+ # ... (Fill in other options here)
+ }
+ )
+
+.. note::
+
+ It is also possible to supply a schema map as a validator on a MongoDB collection.
+ This is referred to as a "remote schema map," while providing the schema map as
+ an option on the ``Mongo::Client`` is called a "local schema map."
+
+ Supplying a local schema map provides more security than relying on JSON schemas
+ obtained from the server. It protects against a malicious server advertising
+ a false JSON schema, which could trick the client into sending unencrypted
+ data that should be encrypted.
+
+ See :manual:`Server-Side Field Level Encryption Enforcement`
+ in the MongoDB manual for more information about using the schema map to
+ create a JSON schema validator on your collection.
+
+.. seealso::
+
+ `Specify Encrypted Fields Using JSON Schema `_,
+ :manual:`Automatic Encryption Rules`
+
+.. _schema-map-path:
+
+``:schema_map_path``
+~~~~~~~~~~~~~~~~~~~~
+
+It is also possible to load schema map from a file. Prepare the schema map as
+described above, save it to file, and then pass path to the file using
+``:schema_map_path`` option.
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ schema_map_path: '/path/to/your/file.json',
+ # ... (Fill in other options here)
+ }
+ )
+
+``:bypass_auto_encryption``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``:bypass_auto_encryption`` option is a ``Boolean`` that specifies whether the
+``Mongo::Client`` should skip encryption when writing to the database. If
+``:bypass_auto_encryption`` is ``true``, the client will still perform automatic
+decryption of any previously-encrypted data.
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ bypass_auto_encryption: true,
+ # ... (Fill in other options here)
+ }
+ )
+
+.. _cse-extra-options:
+
+``:extra_options``
+~~~~~~~~~~~~~~~~~~
+
+``:extra_options`` is a ``Hash`` of options related to spawning mongocryptd.
+Every option in this ``Hash`` has a default value, so it is only necessary to
+provide the options whose defaults you want to override.
+
+- ``:mongocryptd_spawn_args`` - This is an ``Array`` containing arguments
+ for spawning mongocryptd. The Ruby driver will pass these arguments to
+ mongocryptd on spawning the daemon. Possible arguments are:
+
+ - ``"--idleShutdownTimeoutSecs"`` - The number of seconds mongocryptd must remain
+ idle before it shuts itself down. The default value is 60.
+ - ``"--port"`` - The port at which mongocryptd will listen for connections. The
+ default is 27020.
+
+- ``:mongocryptd_uri`` - The URI that the driver will use to connect to mongocryptd.
+ By default, this is ``"mongodb://localhost:27020"``.
+
+- ``:mongocryptd_spawn_path`` - The path to the mongocryptd executable. The default
+ is ``"mongocryptd"``.
+
+- ``:mongocryptd_bypass_spawn`` - A ``Boolean`` indicating whether the driver should
+ skip spawning mongocryptd.
+
+For example, if you would like to run mongocryptd on port 30000, provide
+``extra_options`` as follows:
+
+.. code-block:: ruby
+
+ Mongo::Client.new(['localhost:27017],
+ auto_encryption_options: {
+ extra_options: {
+ mongocryptd_spawn_args: ['--port=30000'],
+ mongocryptd_uri: 'mongodb://localhost:30000',
+ }
+ # ... (Fill in other options here)
+ }
+ )
+
+.. warning::
+
+ The contents of ``:extra_options`` is subject to change in future versions
+ of the client-side encryption API.
diff --git a/source/reference/in-use-encryption/queryable-encryption.txt b/source/reference/in-use-encryption/queryable-encryption.txt
new file mode 100644
index 000000000..2be80ed5f
--- /dev/null
+++ b/source/reference/in-use-encryption/queryable-encryption.txt
@@ -0,0 +1,236 @@
+.. _queryable-encryption:
+
+**********************
+Queryable Encryption
+**********************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+Queryable encryption is a new feature in MongoDB 6.0. It also requires
+libmongocrypt version 1.5.2 or above.
+
+You can find more information about queryable encryption in `MongoDB Manual
+`_.
+
+.. note::
+
+ The queryable encryption feature is in public technical preview.
+ Therefore, the following options should be considered experimental
+ and are subject to change:
+
+ - ``:encrypted_fields_map`` and ``:bypass_query_analysis`` in auto encryption options.
+ - ``:contention_factor`` and ``:query_type`` in client encryption options.
+
+The following examples assume you are familiar with the concepts and techniques
+described in :ref:`Client-Side Encryption `.
+
+Below is an example of using automatic queryable encryption using the Ruby driver:
+
+.. code-block:: ruby
+
+ require 'mongo'
+
+ #####################################
+ # Step 1: Create a local master key #
+ #####################################
+
+ # A local master key is a 96-byte binary blob.
+ local_master_key = SecureRandom.random_bytes(96)
+ # => "\xB2\xBE\x8EN\xD4\x14\xC2\x13\xC3..."
+
+ #############################
+ # Step 2: Create a data key #
+ #############################
+
+ kms_providers = {
+ local: {
+ key: local_master_key
+ }
+ }
+
+ # The key vault client is a Mongo::Client instance
+ # that will be used to store your data keys.
+ key_vault_client = Mongo::Client.new('mongodb://localhost:27017,localhost:27018')
+
+ # Use an instance of Mongo::ClientEncryption to create a new data key
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers
+ )
+
+ data_key_id = client_encryption.create_data_key('local')
+ # =>
+
+ #######################################################
+ # Step 3: Configure Mongo::Client for auto-encryption #
+ #######################################################
+
+ # Create an encrypted fields map, which tells the Mongo::Client which fields to encrypt.
+ encrypted_fields_map = {
+ 'encryption_db.encryption_coll' => {
+ fields: [
+ {
+ path: 'encrypted_field',
+ bsonType: 'string',
+ keyId: data_key_id,
+ queries: {
+ queryType: 'equality'
+ }
+ }
+ ]
+ }
+ }
+
+ # Configure the client for automatic encryption
+ client = Mongo::Client.new(
+ 'mongodb://localhost:27017,localhost:27018',
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers,
+ encrypted_fields_map: encrypted_fields_map,
+ },
+ database: 'encryption_db'
+ )
+
+ # Make sure there is no data in the collection.
+ client.database.drop
+
+ # Create encrypted collection explicitly.
+ collection = client['encryption_coll'].create
+
+ # The string "sensitive data" will be encrypted and stored in the database
+ # as ciphertext
+ collection.insert_one(encrypted_field: 'sensitive data')
+
+ # The data is decrypted before being returned to the user
+ collection.find(encrypted_field: 'sensitive data').first['encrypted_field']
+ # => "sensitive data"
+
+ # A client with no auto_encryption_options is unable to decrypt the data
+ client_no_encryption = Mongo::Client.new(['localhost:27017'], database: 'encryption_db')
+ client_no_encryption['encryption_coll'].find.first['encrypted_field']
+ # =>
+
+The example above demonstrates using automatic encryption with a local master key.
+For more information about using other key management services to create a
+master key and create data keys, see the following sections of the :ref:`Client-Side Encryption ` tutorial:
+
+- :ref:`Creating A Master Key `
+- :ref:`Creating A Data Key `
+
+Below is an example of explicit queryable encryption.
+
+.. code-block:: ruby
+
+ require 'mongo'
+
+ #####################################
+ # Step 1: Create a local master key #
+ #####################################
+
+ # A local master key is a 96-byte binary blob.
+ local_master_key = SecureRandom.random_bytes(96)
+ # => "\xB2\xBE\x8EN\xD4\x14\xC2\x13\xC3..."
+
+ #############################
+ # Step 2: Create a data key #
+ #############################
+
+ kms_providers = {
+ local: {
+ key: local_master_key
+ }
+ }
+
+ # The key vault client is a Mongo::Client instance
+ # that will be used to store your data keys.
+ key_vault_client = Mongo::Client.new('mongodb://localhost:27017,localhost:27018')
+
+ # Use an instance of Mongo::ClientEncryption to create a new data key
+ client_encryption = Mongo::ClientEncryption.new(
+ key_vault_client,
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers
+ )
+
+ data_key_id = client_encryption.create_data_key('local')
+ # =>
+
+ ##########################################
+ # Step 3: Create an encrypted collection #
+ ##########################################
+ encrypted_fields = {
+ fields: [
+ {
+ path: 'encrypted_field',
+ bsonType: 'string',
+ keyId: data_key_id,
+ queries: {
+ queryType: 'equality',
+ contention: 0
+ }
+ }
+ ]
+ }
+
+ # Create the client you will use to read and write the data to MongoDB
+ # Please note that to insert or query with an "Indexed" encrypted payload,
+ # you should use a ``Mongo::Client`` that is configured with ``:auto_encryption_options``.
+ # ``auto_encryption_options[:bypass_query_analysis]`` may be true.
+ # ``auto_encryption_options[:bypass_auto_encryption]`` must be not set or false.
+ client = Mongo::Client.new(
+ ['localhost:27017'],
+ auto_encryption_options: {
+ key_vault_namespace: 'encryption.__keyVault',
+ kms_providers: kms_providers,
+ bypass_query_analysis: true,
+ },
+ database: 'encryption_db',
+ )
+
+ # Make sure there is no data in the collection.
+ client['encryption_coll'].drop(encrypted_fields: encrypted_fields)
+ # Create encrypted collection explicitly.
+ client['encryption_coll'].create(encrypted_fields: encrypted_fields)
+
+ #####################################################
+ # Step 4: Encrypt a string with explicit encryption #
+ #####################################################
+
+ # The value to encrypt
+ value = 'sensitive data'
+
+ # Encrypt the value
+ insert_payload = client_encryption.encrypt(
+ 'sensitive data',
+ {
+ key_id: data_key_id,
+ algorithm: "Indexed",
+ contention_factor: 0
+ }
+ )
+
+ # Insert the encrypted value into the collection
+ client['encryption_coll'].insert_one(encrypted_field: insert_payload)
+
+ # Use the client to read the encrypted value from the database, then
+ # use the ClientEncryption object to decrypt it.
+ find_payload = client_encryption.encrypt(
+ 'sensitive data',
+ {
+ key_id: data_key_id,
+ algorithm: "Indexed",
+ contention_factor: 0,
+ query_type: "equality"
+ }
+ )
+
+ find_result = client['encryption_coll'].find(encrypted_field: find_payload).first['encrypted_field']
+ # => 'sensitive data'
diff --git a/source/reference/indexing.txt b/source/reference/indexing.txt
new file mode 100644
index 000000000..a536c8870
--- /dev/null
+++ b/source/reference/indexing.txt
@@ -0,0 +1,172 @@
+********
+Indexing
+********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+The driver provides the ability to create, drop and view
+:manual:`indexes` on a collection through the ``indexes`` attribute:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music')
+ client[:bands].indexes
+ # => #, @batch_size=nil, @options={}>
+
+
+Creating Indexes
+================
+
+Indexes can be created one at a time, or several can be created in a single
+operation. When creating multiple indexes on MongoDB 3.0 and later, the indexes
+are created in parallel; on earlier versions they are created sequentially.
+
+To create a single index, use ``indexes#create_one``, passing the key
+specification as the first argument and options as the second argument:
+
+.. code-block:: ruby
+
+ client[:bands].indexes.create_one(genre: 1)
+
+ client[:bands].indexes.create_one(
+ { name: 1 },
+ unique: true, expire_after: 120,
+ )
+
+To create multiple indexes, use ``indexes#create_many`` which accepts an array
+of index specifications. Unlike ``create_one``, each index specification
+is a hash with the ``key`` key mapped to the key specification and the
+options being specified on the top level.
+
+.. code-block:: ruby
+
+ client[:bands].indexes.create_many([
+ { key: { genre: 1 } },
+ { key: { name: 1 }, unique: true, expire_after: 120 },
+ ])
+
+.. _index-options:
+
+The following is a full list of the available options that can be added
+when creating indexes. These options mirror the options supported by the
+:manual:`createIndex command`.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 80
+
+ * - Option
+ - Description
+ * - ``:background``
+ - Either ``true`` or ``false``. Tells the index to be created in the background.
+ * - ``:expire_after``
+ - Number of seconds to expire documents in the collection after.
+ * - ``:name``
+ - The name of the index.
+ * - ``:sparse``
+ - Whether the index should be sparse or not, either ``true`` or ``false``.
+ * - ``:storage_engine``
+ - The name of the storage engine for this particular index.
+ * - ``:version``
+ - The index format version to use.
+ * - ``:default_language``
+ - The default language of text indexes.
+ * - ``:language_override``
+ - The field name to use when overriding the default language.
+ * - ``:text_version``
+ - The version format for text index storage.
+ * - ``:weights``
+ - A document specifying fields and weights in text search.
+ * - ``:sphere_version``
+ - The 2d sphere index version.
+ * - ``:bits``
+ - Sets the maximum boundary for latitude and longitude in the 2d index.
+ * - ``:max``
+ - Maximum boundary for latitude and longitude in the 2d index.
+ * - ``:min``
+ - Minimum boundary for latitude and longitude in the 2d index.
+ * - ``:bucket_size``
+ - The number of units within which to group the location values in a geo haystack index.
+ * - ``:partial_filter_expression``
+ - A filter for a partial index.
+ * - ``:hidden``
+ - A Boolean specifying whether the index should be hidden; a hidden index
+ is one that exists on the collection but will not be used by the query planner.
+
+The :commit_quorum option
+-------------------------
+On MongoDB server versions 4.4 and newer, the ``:commit_quorum`` option may be
+specified on index creation. This option differs from other index options in that
+it determines server behavior during index creation, rather than determining
+the behavior of an individual index.
+
+The ``:commit_quorum`` option specifies how many voting, data-bearing members
+of a replica set must complete the index build before the index is ready.
+Possible values are integers (0 to the number of voting, data-bearing members
+of the replica set), "majority", or "votingMembers".
+
+To specify ``:commit_quorum`` when creating one index, add another option
+to the second argument of the ``indexes#create_one`` method:
+
+.. code-block:: ruby
+
+ client[:bands].indexes.create_one(
+ { name: 1 },
+ unique: true, expire_after: 120, commit_quorum: 'majority'
+ )
+
+To specify create options when creating multiple indexes, add a Hash specifying
+``:commit_quorum`` as a final element to the Array of indexes passed to
+``indexes#create_many``. Note that this Hash MUST be the final element in the
+Array.
+
+.. code-block:: ruby
+
+ client[:bands].indexes.create_many([
+ { key: { genre: 1 } },
+ { key: { name: 1 }, unique: true, expire_after: 120 },
+ { commit_quorum: 'majority' },
+ ])
+
+Dropping Indexes
+================
+
+To drop an index, call ``indexes#drop_one`` or ``indexes#drop_all``.
+
+.. code-block:: ruby
+
+ # Drops the name_1 index.
+ client[:bands].indexes.drop_one( 'name_1' )
+
+ # Drops all indexes in the collection.
+ client[:bands].indexes.drop_all
+
+
+Listing Indexes
+===============
+
+To list the indexes, iterate the ``indexes`` object:
+
+.. code-block:: ruby
+
+ client[:bands].indexes.each do |index_spec|
+ p index_spec
+ # {"v"=>2, "key"=>{"_id"=>1}, "name"=>"_id_"}
+ # {"v"=>2, "key"=>{"genre"=>1}, "name"=>"genre_1"}
+ # {"v"=>2, "unique"=>true, "key"=>{"name"=>1}, "name"=>"name_1",
+ # "expireAfterSeconds"=>120}
+ end
+
+Each iteration returns an index specification as returned by the
+:manual:`listIndexes` command.
+
+.. note::
+
+ The shape and contents of the index specifications returned by this method
+ may change from one version of MongoDB to another.
diff --git a/source/reference/map-reduce.txt b/source/reference/map-reduce.txt
new file mode 100644
index 000000000..f01b64b55
--- /dev/null
+++ b/source/reference/map-reduce.txt
@@ -0,0 +1,132 @@
+**********
+Map-Reduce
+**********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: singlecol
+
+:manual:`Map-Reduce ` is a data processing paradigm for
+condensing large volumes of data into aggregated results.
+
+.. note::
+
+ The map-reduce operation is deprecated.
+ The :ref:`aggregation framework ` provides better performance
+ and usability than map-reduce operations, and should be preferred for
+ new development.
+
+A map-reduce operation is issued on a collection view, as obtained from
+``Collection#find`` method, by calling the ``map_reduce`` method on the
+view. The ``map_reduce`` method takes three arguments: the mapper, the
+reducer and map-reduce options. The mapper and the reducer must be provided
+as strings containing JavaScript functions.
+
+For example, given the following collection with values 1 through 10:
+
+.. code-block:: ruby
+
+ coll = client['foo']
+ 10.times do |i|
+ coll.insert_one(v: i)
+ end
+
+The following invocation will sum up the values less than 6:
+
+.. code-block:: ruby
+
+ coll.find(v: {'$lt' => 6}).map_reduce(
+ 'function() { emit(null, this.v) }',
+ 'function(key, values) { return Array.sum(values) }',
+ ).first['value']
+ # => 15.0
+
+The ``map_reduce`` method returns an instance of
+``Mongo::Collection::View::MapReduce`` - a map-reduce view which holds
+the parameters to be used for the operation. To execute the operation, either
+iterate the results (by using e.g. ``each``, ``first`` or ``to_a`` on the
+view object) or invoke the ``execute`` method. The ``execute`` method issues
+the map-reduce operation but does not return the result set from the server,
+and is primarily useful for when the output of the operation is directed to
+a collection as follows:
+
+.. code-block:: ruby
+
+ coll.find(...).map_reduce(...).out('destination_collection').execute
+
+Note that:
+
+- If the results of map-reduce are not directed to a collection, they are
+ said to be retrieved inline. In this case the entire result set must fit in
+ the 16 MiB BSON document size limit.
+- If the results of map-reduce are directed to a collection, and the
+ map-reduce view is iterated, the driver automatically retrieves the
+ entire collection and returns its contents as the result set. The
+ collection is retrieved without sorting. If map-reduce is performed into
+ a collection that is not empty, the driver will return the documents
+ as they exist in the collection after the map-reduce operation completes,
+ which may include the documents that were in the collection prior to the
+ map-reduce operation.
+
+.. code-block:: ruby
+
+ coll.find(...).map_reduce(...).out('destination_collection').each do |doc|
+ # ...
+ end
+
+ coll.find(...).map_reduce(...).out(replace: 'destination_collection', db: 'db_name').each do |doc|
+ # ...
+ end
+
+Given a map-reduce view, it can be configured using the following methods:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 20 80
+
+ * - Method
+ - Description
+
+ * - ``js_mode``
+ - Sets the ``jsMode`` flag for the operation.
+
+ * - ``out``
+ - Directs the output to the specified collection, instead of returning
+ the result set.
+
+ * - ``scope``
+ - Sets the scope for the operation.
+
+ * - ``verbose``
+ - Sets whether to include the timing information in the result.
+
+The following accessor methods are defined on the view object:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 20 80
+
+ * - Method
+ - Description
+
+ * - ``js_mode``
+ - Returns the current ``jsMode`` flag value.
+
+ * - ``map_function``
+ - Returns the map function as a string.
+
+ * - ``out``
+ - Returns the current output location for the operation.
+
+ * - ``reduce_function``
+ - Returns the reduce function as a string.
+
+ * - ``scope``
+ - Returns the current scope for the operation.
+
+ * - ``verbose``
+ - Returns whether to include the timing information in the result.
diff --git a/source/reference/monitoring.txt b/source/reference/monitoring.txt
new file mode 100644
index 000000000..06328456a
--- /dev/null
+++ b/source/reference/monitoring.txt
@@ -0,0 +1,469 @@
+**********
+Monitoring
+**********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+The driver allows the application to be notified when certain events happen.
+These events are organized into the following categories:
+
+- Command monitoring
+- Topology lifecycle
+- Server lifecycle
+- Server heartbeats
+- Connection pools and connections
+
+Topology and server events are part of Server Discovery and Monitoring (SDAM).
+
+
+.. _command-monitoring:
+
+Command Monitoring
+==================
+
+All user-initiated commands that are sent to the server publish events that
+can be subscribed to for fine grained information. The monitoring API
+publishes a guaranteed start event for each command, then either a succeeded
+or a failed event. A subscriber must implement 3 methods: ``started``,
+``succeeded``, and ``failed``, each which takes a single parameter for
+the event. The following is an example logging subscriber based on a
+logging subscriber used internally by the driver:
+
+.. code-block:: ruby
+
+ class CommandLogSubscriber
+ include Mongo::Loggable
+
+ def started(event)
+ # The default inspection of a command which is a BSON document gets
+ # truncated in the middle. To get the full rendering of the command, the
+ # ``to_json`` method can be called on the document.
+ log_debug("#{prefix(event)} | STARTED | #{format_command(event.command.to_json)}")
+ end
+
+ def succeeded(event)
+ log_debug("#{prefix(event)} | SUCCEEDED | #{event.duration}s")
+ end
+
+ def failed(event)
+ log_debug("#{prefix(event)} | FAILED | #{event.message} | #{event.duration}s")
+ end
+
+ private
+
+ def logger
+ Mongo::Logger.logger
+ end
+
+ def format_command(args)
+ begin
+ args.inspect
+ rescue Exception
+ ''
+ end
+ end
+
+ def format_message(message)
+ format("COMMAND | %s".freeze, message)
+ end
+
+ def prefix(event)
+ "#{event.address.to_s} | #{event.database_name}.#{event.command_name}"
+ end
+ end
+
+To register a custom subscriber, you can do so globally for
+all clients or on a per-client basis:
+
+.. code-block:: ruby
+
+ subscriber = CommandLogSubscriber.new
+
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::COMMAND, subscriber)
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test' )
+ client.subscribe( Mongo::Monitoring::COMMAND, subscriber )
+
+Sample output:
+
+.. code-block:: none
+
+ D, [2018-09-23T13:47:31.258020 #4692] DEBUG -- : COMMAND | 127.0.0.1:27027 | test.hello | STARTED | {"hello"=>1, "$readPreference"=>{"mode"=>"primary"}, "lsid"=>{"id"=>}}
+ D, [2018-09-23T13:47:31.259145 #4692] DEBUG -- : COMMAND | 127.0.0.1:27027 | test.hello | SUCCEEDED | 0.000791175s
+
+
+.. _sdam:
+
+Server Discovery And Monitoring
+===============================
+
+The Ruby driver implements `Server Discovery And Monitoring (SDAM) specification
+`_.
+and makes the following events available to the application:
+
+- Topology opening
+- Server opening
+- Server description changed
+- Topology changed
+- Server closed
+- Topology closed
+- Heartbeat events (covered below in a separate section)
+
+For all events other than the heartbeat events, the ``succeeded`` method
+will be called on each event subscriber with the event as the sole argument.
+Available data for events varies, therefore to log the events a separate
+class is needed for each event type. A simple SDAM logging subscriber
+can look like the following:
+
+.. code-block:: ruby
+
+ class SDAMLogSubscriber
+ include Mongo::Loggable
+
+ def succeeded(event)
+ log_debug(format_event(event))
+ end
+
+ private
+
+ def logger
+ Mongo::Logger.logger
+ end
+
+ def format_message(message)
+ format("SDAM | %s".freeze, message)
+ end
+ end
+
+ class TopologyOpeningLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ "Topology type '#{event.topology.display_name}' initializing."
+ end
+ end
+
+ class ServerOpeningLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ "Server #{event.address} initializing."
+ end
+ end
+
+ class ServerDescriptionChangedLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ "Server description for #{event.address} changed from " +
+ "'#{event.previous_description.server_type}' to '#{event.new_description.server_type}'."
+ end
+ end
+
+ class TopologyChangedLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ if event.previous_topology != event.new_topology
+ "Topology type '#{event.previous_topology.display_name}' changed to " +
+ "type '#{event.new_topology.display_name}'."
+ else
+ "There was a change in the members of the '#{event.new_topology.display_name}' " +
+ "topology."
+ end
+ end
+ end
+
+ class ServerClosedLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ "Server #{event.address} connection closed."
+ end
+ end
+
+ class TopologyClosedLogSubscriber < SDAMLogSubscriber
+ private
+
+ def format_event(event)
+ "Topology type '#{event.topology.display_name}' closed."
+ end
+ end
+
+To subscribe to SDAM events globally:
+
+.. code-block:: ruby
+
+ topology_opening_subscriber = TopologyOpeningLogSubscriber.new
+ server_opening_subscriber = ServerOpeningLogSubscriber.new
+ server_description_changed_subscriber = ServerDescriptionChangedLogSubscriber.new
+ topology_changed_subscriber = TopologyChangedLogSubscriber.new
+ server_closed_subscriber = ServerClosedLogSubscriber.new
+ topology_closed_subscriber = TopologyClosedLogSubscriber.new
+
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::TOPOLOGY_OPENING,
+ topology_opening_subscriber)
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::SERVER_OPENING,
+ server_opening_subscriber)
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED,
+ server_description_changed_subscriber)
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::TOPOLOGY_CHANGED,
+ topology_changed_subscriber)
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::SERVER_CLOSED,
+ server_closed_subscriber)
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::TOPOLOGY_CLOSED,
+ topology_closed_subscriber)
+
+Subscribing to SDAM events for a single client is a little more involved
+since the events may be published during the client's construction:
+
+.. code-block:: ruby
+
+ topology_opening_subscriber = TopologyOpeningLogSubscriber.new
+ server_opening_subscriber = ServerOpeningLogSubscriber.new
+ server_description_changed_subscriber = ServerDescriptionChangedLogSubscriber.new
+ topology_changed_subscriber = TopologyChangedLogSubscriber.new
+ server_closed_subscriber = ServerClosedLogSubscriber.new
+ topology_closed_subscriber = TopologyClosedLogSubscriber.new
+
+ sdam_proc = Proc.new do |client|
+ client.subscribe(Mongo::Monitoring::TOPOLOGY_OPENING,
+ topology_opening_subscriber)
+ client.subscribe(Mongo::Monitoring::SERVER_OPENING,
+ server_opening_subscriber)
+ client.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED,
+ server_description_changed_subscriber)
+ client.subscribe(Mongo::Monitoring::TOPOLOGY_CHANGED,
+ topology_changed_subscriber)
+ client.subscribe(Mongo::Monitoring::SERVER_CLOSED,
+ server_closed_subscriber)
+ client.subscribe(Mongo::Monitoring::TOPOLOGY_CLOSED,
+ topology_closed_subscriber)
+ end
+
+ client = Mongo::Client.new(['127.0.0.1:27017'], database: 'test',
+ sdam_proc: sdam_proc)
+
+Sample output:
+
+.. code-block:: none
+
+ D, [2018-10-09T13:58:03.489461 #22079] DEBUG -- : SDAM | Topology type 'Unknown' initializing.
+ D, [2018-10-09T13:58:03.489699 #22079] DEBUG -- : SDAM | Server 127.0.0.1:27100 initializing.
+ D, [2018-10-09T13:58:03.491384 #22079] DEBUG -- : SDAM | Server description for 127.0.0.1:27100 changed from 'unknown' to 'unknown'.
+ D, [2018-10-09T13:58:03.491642 #22079] DEBUG -- : SDAM | Server localhost:27100 initializing.
+ D, [2018-10-09T13:58:03.493199 #22079] DEBUG -- : SDAM | Server description for localhost:27100 changed from 'unknown' to 'primary'.
+ D, [2018-10-09T13:58:03.493473 #22079] DEBUG -- : SDAM | Server localhost:27101 initializing.
+ D, [2018-10-09T13:58:03.494874 #22079] DEBUG -- : SDAM | Server description for localhost:27101 changed from 'unknown' to 'secondary'.
+ D, [2018-10-09T13:58:03.495139 #22079] DEBUG -- : SDAM | Server localhost:27102 initializing.
+ D, [2018-10-09T13:58:03.496504 #22079] DEBUG -- : SDAM | Server description for localhost:27102 changed from 'unknown' to 'secondary'.
+ D, [2018-10-09T13:58:03.496777 #22079] DEBUG -- : SDAM | Topology type 'Unknown' changed to type 'ReplicaSetNoPrimary'.
+ D, [2018-10-09T13:58:03.497306 #22079] DEBUG -- : SDAM | Server 127.0.0.1:27100 connection closed.
+ D, [2018-10-09T13:58:03.497606 #22079] DEBUG -- : SDAM | Topology type 'ReplicaSetNoPrimary' changed to type 'ReplicaSetWithPrimary'.
+
+ # client.close
+
+ D, [2018-10-09T13:58:05.342057 #22079] DEBUG -- : SDAM | Server localhost:27100 connection closed.
+ D, [2018-10-09T13:58:05.342299 #22079] DEBUG -- : SDAM | Server localhost:27101 connection closed.
+ D, [2018-10-09T13:58:05.342565 #22079] DEBUG -- : SDAM | Server localhost:27102 connection closed.
+ D, [2018-10-09T13:58:05.342693 #22079] DEBUG -- : SDAM | Topology type 'ReplicaSetWithPrimary' closed.
+
+.. note::
+
+ ``:sdam_proc`` client option applies only to the client during whose
+ construction it is given. When certain client options are changed via the
+ ``Client#with`` call, a new cluster may be created by the driver with
+ a default set of event subscribers. If this happens, the provided
+ ``:sdam_proc`` is not called and the application may miss events.
+
+
+.. _server-heartbeats:
+
+Server Heartbeats
+=================
+
+The application can be notified of each server heartbeat by subscribing
+to SERVER_HEARTBEAT topic. A server heartbeat listener must implement
+three methods: ``started``, ``succeeded`` and ``failed``. Each heartbeat
+invokes the ``started`` method on the listener, and then either ``succeeded``
+or ``failed`` method depending on the outcome of the heartbeat.
+
+All heartbeat events contain the address of the server that the heartbeat
+was sent to. Succeeded and failed events contain the round trip time for
+the hello or legacy hello command. Failed event also contains the exception
+instance that was raised during hello or legacy hello command execution.
+Please review the API documentation for ServerHeartbeatStarted,
+ServerHeartbeatSucceeded and ServerHeartbeatFailed for event attribute details.
+
+The following is an example logging heartbeat event subscriber:
+
+.. code-block:: ruby
+
+ class HeartbeatLogSubscriber
+ include Mongo::Loggable
+
+ def started(event)
+ log_debug("#{event.address} | STARTED")
+ end
+
+ def succeeded(event)
+ log_debug("#{event.address} | SUCCEEDED | #{event.duration}s")
+ end
+
+ def failed(event)
+ log_debug("#{event.address} | FAILED | #{event.error.class}: #{event.error.message} | #{event.duration}s")
+ end
+
+ private
+
+ def logger
+ Mongo::Logger.logger
+ end
+
+ def format_message(message)
+ format("HEARTBEAT | %s".freeze, message)
+ end
+ end
+
+Similarly to command events, the application can subscribe to heartbeat
+events globally or for a specific client:
+
+.. code-block:: ruby
+
+ subscriber = HeartbeatLogSubscriber.new
+
+ Mongo::Monitoring::Global.subscribe(Mongo::Monitoring::SERVER_HEARTBEAT, subscriber)
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test' )
+ client.subscribe( Mongo::Monitoring::SERVER_HEARTBEAT, subscriber )
+
+Sample output:
+
+.. code-block:: none
+
+ D, [2018-09-23T13:44:10.707018 #1739] DEBUG -- : HEARTBEAT | 127.0.0.1:27027 | STARTED
+ D, [2018-09-23T13:44:10.707778 #1739] DEBUG -- : HEARTBEAT | 127.0.0.1:27027 | SUCCEEDED | 0.000772381s
+
+Heartbeat Event Intervals
+-------------------------
+
+When connected to MongoDB 4.2 and earlier servers, Ruby driver by default
+issues heartbeats every ``:heartbeat_frequency`` (Ruby client option) seconds,
+and heartbeats are non-overlapping (the succeeded event for a heartbeat is
+guaranteed to be published before the started event for the next heartbeat is
+published). When connected to MongoDB 4.4 and later servers, the driver uses
+multiple monitoring threads and a more complex heartbeat protocol designed
+to detect changes in server state quicker; as a result, heartbeat event
+intervals can be more irregular and heartbeat events can overlap. Specifically,
+an *awaited heartbeat* can start or finish while a *non-awaited heartbeat*
+is in progress, and vice versa. Use the ``ServerHeartbeatStarted#awaited?``,
+``ServerHeartbeatSucceeded#awaited?`` and ``ServerHeartbeatFailed#awaited?``
+methods to distinguish between non-awaited and awaited heartbeats.
+
+When a client is attempting to perform an operation and it does not have a
+suitable server, the deployment is scanned more frequently - each server can
+be polled up to every 500 milliseconds. It is also possible for the application
+to request a manual scan of a particular server; the driver enforces the
+500 millisecond minimum interval between scans.
+
+Connection Pool And Connection Monitoring
+=========================================
+
+Each client maintains a connection pool for each server in the deployment that
+it is aware of, and publishes events for both connection pools and individual
+connections. To subscribe to these events, define a subscriber class implementing
+the method ``pubished`` which takes a single parameter for the event that
+is being published. Note that future versions of the driver may introduce
+additional events published through this mechanism.
+
+The following events are currently implemented by the driver, following
+the `CMAP specification `_:
+
+- PoolCreated
+- PoolCleared
+- PoolClosed
+- ConnectionCreated
+- ConnectionReady
+- ConnectionClosed
+- ConnectionCheckOutStarted
+- ConnectionCheckOutFailed
+- ConnectionCheckOutSucceeded
+- ConnectionCheckedIn
+
+The driver provides a logging subscriber which may be used to log all
+connection pool and connection-related events. This subscriber is not enabled
+by default because it will create log entries for each operation performed
+by the application. To enable this subscriber globally or per client:
+
+.. code-block:: ruby
+
+ Mongo::Monitoring::Global.subscribe(
+ Mongo::Monitoring::CONNECTION_POOL,
+ Mongo::Monitoring::CmapLogSubscriber.new)
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test' )
+ subscriber = Mongo::Monitoring::CmapLogSubscriber.new
+ client.subscribe( Mongo::Monitoring::CONNECTION_POOL, subscriber )
+
+Sample output:
+
+.. code-block:: none
+
+ D, [2019-05-06T17:23:21.595412 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.595584 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.603549 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.603616 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.603684 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.604079 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.605759 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.605784 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.605817 #8576] DEBUG -- : MONGODB | EVENT: #
+ D, [2019-05-06T17:23:21.605852 #8576] DEBUG -- : MONGODB | EVENT: #
+
+
+Disabling Monitoring
+====================
+
+To turn off monitoring, set the client monitoring option to ``false``:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test', :monitoring => false )
+
+
+Excluded and Redacted Events
+============================
+
+The Ruby driver does not pubish, and occasionaly redacts, some events via the
+command monitoring mechanism:
+
+1. If the command belongs to a particular subset of redacted commands, or
+ contains keys that trigger payload redaction, an empty payload will be
+ provided for security reasons. The full payload can be accessed by setting
+ the ``MONGO_RUBY_DRIVER_UNREDACT_EVENTS`` environment variable to ``1``, ``true`` or ``yes``. The
+ following commands are redacted:
+
+ - ``authenticate``
+ - ``saslStart``
+ - ``saslContinue``
+ - ``getnonce``
+ - ``createUser``
+ - ``updateUser``
+ - ``copydbgetnonce``
+ - ``copydbsaslstart``
+ - ``copydb``
+2. If the command is a handshake command, either ``ismaster`` or ``hello``, on
+ a non-monitoring connection, no event is published at all.
+3. Commands sent over monitoring connections (such as ismaster and hello) do
+ not publish command monitoring events. Instead, every time a server is
+ checked a server heartbeat event is published. The server heartbeat events
+ do not include command or reply payloads.
+4. If the command is a handshake command, and the ``speculativeAuthenticate``
+ options is ``true``, the command will be redacted, and an empty payload will
+ be provided.
diff --git a/source/reference/projection.txt b/source/reference/projection.txt
new file mode 100644
index 000000000..bc3881690
--- /dev/null
+++ b/source/reference/projection.txt
@@ -0,0 +1,68 @@
+**********
+Projection
+**********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+By default, queries in MongoDB return all fields in matching
+documents. To limit the amount of data that MongoDB sends to
+applications, you can include a
+:manual:`projection`
+document in the query operation.
+
+Projection Document
+===================
+
+The projection document limits the fields to return for all
+matching documents. The projection document can specify the
+inclusion of fields or the exclusion of field and has the
+following form:
+
+.. code-block:: javascript
+
+ { 'projection': { field1: , field2: ... } }
+
+```` may be ``0`` (or ``false``) to exclude the field, or
+``1`` (or ``true``) to include it. With the exception of the ``_id``
+field, you may not have both inclusions and exclusions in the same
+projection document.
+
+Examples
+========
+
+The following code example uses the ``restaurants`` sample dataset.
+
+To return only the ``name``, ``cuisine`` and ``_id`` fields for
+documents that match the query filter, explicitly include the ``name``
+and ``cuisine`` fields in the projection document. The ``_id`` field is
+included automatically unless specifically excluded.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ collection = client[:restaurants]
+
+ collection.find({}, { 'projection' =>
+ { 'name' => 1, 'cuisine' => 1 } }).limit(5).each do |doc|
+ p doc
+ end
+
+To return ``name`` and ``cuisine`` but exclude all other fields,
+including ``_id``, use the following projection document:
+
+.. code-block:: javascript
+
+ { 'projection' => { 'name' => 1, 'cuisine' => 1, '_id' => 0 } }
+
+
+To return all fields *except* the address field, use the following:
+
+.. code-block:: javascript
+
+ { 'projection' => { 'address' => 0 } }
diff --git a/source/reference/query-cache.txt b/source/reference/query-cache.txt
new file mode 100644
index 000000000..772d24ad2
--- /dev/null
+++ b/source/reference/query-cache.txt
@@ -0,0 +1,313 @@
+***********
+Query Cache
+***********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: singlecol
+
+.. _query-cache:
+
+The MongoDB Ruby driver provides a built-in query cache. When enabled, the
+query cache saves the results of previously-executed find and aggregation
+queries. When those same queries are performed again, the driver returns
+the cached results to prevent unnecessary roundtrips to the database.
+
+Usage
+=====
+
+The query cache is disabled by default. It can be enabled on the global
+scope as well as within the context of a specific block. The driver also
+provides a :ref:`Rack middleware ` to enable the
+query cache automatically for each web request.
+
+To enable the query cache globally:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.enabled = true
+
+Similarly, to disable it globally:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.enabled = false
+
+To enable the query cache within the context of a block:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.cache do
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music') do |client|
+ client['artists'].find(name: 'Flying Lotus').first
+ #=> Queries the database and caches the result
+
+ client['artists'].find(name: 'Flying Lotus').first
+ #=> Returns the previously cached result
+ end
+ end
+
+And to disable the query cache in the context of a block:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.uncached do
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music') do |client|
+ client['artists'].find(name: 'Flying Lotus').first
+ #=> Sends the query to the database; does NOT cache the result
+
+ client['artists'].find(name: 'Flying Lotus').first
+ #=> Queries the database again
+ end
+ end
+
+You may check whether the query cache is enabled at any time by calling
+``Mongo::QueryCache.enabled?``, which will return ``true`` or ``false``.
+
+
+Interactions With Fibers
+========================
+
+The Query cache enablement flag is stored in fiber-local storage (using
+`Thread.current `_.
+This, in principle, permits query cache state to be per fiber, although
+this is not currently tested.
+
+There are methods in the Ruby standard library, like ``Enumerable#next``,
+that `utilize fibers `_
+in their implementation. These methods would not see the query cache
+enablement flag when it is set by the applications, and subsequently would
+not use the query cache. For example, the following code does not utilize
+the query cache despite requesting it:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.enabled = true
+
+ client['artists'].find({}, limit: 1).to_enum.next
+ # Issues the query again.
+ client['artists'].find({}, limit: 1).to_enum.next
+
+Rewriting this code to use ``first`` instead of ``next`` would make it use
+the query cache:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.enabled = true
+
+ client['artists'].find({}, limit: 1).first
+ # Utilizes the cached result from the first query.
+ client['artists'].find({}, limit: 1).first
+
+
+.. _query-cache-matching:
+
+Query Matching
+==============
+
+A query is eligible to use cached results if it matches the original query
+that produced the cached results. Two queries are considered matching if they
+are identical in the following values:
+
+* Namespace (the database and collection on which the query was performed)
+* Selector (for aggregations, the aggregation pipeline stages)
+* Skip
+* Sort
+* Projection
+* Collation
+* Read Concern
+* Read Preference
+
+For example, if you perform one query, and then perform a mostly identical query
+with a different sort order, those queries will not be considered matching,
+and the second query will not use the cached results of the first.
+
+Limits
+======
+
+When performing a query with a limit, the query cache will reuse an existing
+cached query with a larger limit if one exists. For example:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.cache do
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music') do |client|
+ client['artists'].find(genre: 'Rock', limit: 10)
+ #=> Queries the database and caches the result
+
+ client['artists'].find(genre: 'Rock', limit: 5)
+ #=> Returns the first 5 results from the cached query
+
+ client['artists'].find(genre: 'Rock', limit: 20)
+ #=> Queries the database again and replaces the previously cached query results
+ end
+ end
+
+Cache Invalidation
+==================
+
+The query cache is cleared in part or in full on every write operation. Most
+write operations will clear the results of any queries were performed on the same
+collection that is being written to. Some operations will clear the entire
+query cache.
+
+The following operations will clear cached query results on the same database and
+collection (including during bulk writes):
+
+* ``insert_one``
+* ``update_one``
+* ``replace_one``
+* ``update_many``
+* ``delete_one``
+* ``delete_many``
+* ``find_one_and_delete``
+* ``find_one_and_update``
+* ``find_one_and_replace``
+
+The following operations will clear the entire query cache:
+
+* aggregation with ``$merge`` or ``$out`` pipeline stages
+* ``commit_transaction``
+* ``abort_transaction``
+
+Manual Cache Invalidation
+=========================
+
+You may clear the query cache at any time with the following method:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.clear
+
+This will remove all cached query results.
+
+Transactions
+============
+
+Queries are cached within the context of a transaction, but the entire
+cache will be cleared when the transaction is committed or aborted.
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.cache do
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music') do |client|
+ session = client.start_session
+
+ session.with_transaction do
+ client['artists'].insert_one({ name: 'Fleet Foxes' }, session: session)
+
+ client['artists'].find({}, session: session).first
+ #=> { name: 'Fleet Foxes' }
+ #=> Queries the database and caches the result
+
+ client['artists'].find({}, session: session).first
+ #=> { name: 'Fleet Foxes' }
+ #=> Returns the previously cached result
+
+ session.abort_transaction
+ end
+
+ client['artists'].find.first
+ #=> nil
+ # The query cache was cleared on abort_transaction
+ end
+ end
+
+.. note::
+
+ Transactions are often performed with a "snapshot" read concern level. Keep
+ in mind that a query with a "snapshot" read concern cannot return cached
+ results from a query without the "snapshot" read concern, so it is possible
+ that a transaction may not use previously cached queries.
+
+ To understand when a query will use a cached result, see the
+ :ref:`Query Matching ` section.
+
+Aggregations
+============
+
+The query cache also caches the results of aggregation pipelines. For example:
+
+.. code-block:: ruby
+
+ Mongo::QueryCache.cache do
+ Mongo::Client.new([ '127.0.0.1:27017' ], database: 'music') do |client|
+ client['artists'].aggregate([ { '$match' => { name: 'Fleet Foxes' } } ]).first
+ #=> Queries the database and caches the result
+
+ client['artists'].aggregate([ { '$match' => { name: 'Fleet Foxes' } } ]).first
+ #=> Returns the previously cached result
+ end
+ end
+
+.. note::
+
+ Aggregation results are cleared from the cache during every write operation,
+ with no exceptions.
+
+System Collections
+==================
+
+MongoDB stores system information in collections that use the ``database.system.*``
+namespace pattern. These are called system collections.
+
+Data in system collections can change due to activity not triggered by the
+application (such as internal server processes) and as a result of a variety of
+database commands issued by the application. Because of the difficulty of
+determining when the cached results for system collections should be expired,
+queries on system collections bypass the query cache.
+
+You may read more about system collections in the
+:manual:`MongoDB documentation `.
+
+.. note ::
+
+ Even when the query cache is enabled, query results from system collections
+ will not be cached.
+
+
+.. _query-cache-middleware:
+
+Query Cache Middleware
+======================
+
+Rack Middleware
+---------------
+
+The driver provides a Rack middleware which enables the query cache for the
+duration of each web request. Below is an example of how to enable the
+query cache middleware in a Ruby on Rails application:
+
+.. code-block:: ruby
+
+ # config/application.rb
+
+ # Add Mongo::QueryCache::Middleware at the bottom of the middleware stack
+ # or before other middleware that queries MongoDB.
+ config.middleware.use Mongo::QueryCache::Middleware
+
+Please refer to the `Rails on Rack guide
+`_
+for more information about using Rack middleware in Rails applications.
+
+
+.. _query-cache-active-job-middleware:
+
+Active Job Middleware
+---------------------
+
+The driver provides an Active Job middleware which enables the query cache for
+each job. Below is an example of how to enable the query cache Active Job
+middleware in a Ruby on Rails application:
+
+.. code-block:: ruby
+
+ # config/application.rb
+
+ ActiveSupport.on_load(:active_job) do
+ include Mongo::QueryCache::Middleware::ActiveJob
+ end
diff --git a/source/reference/schema-operations.txt b/source/reference/schema-operations.txt
new file mode 100644
index 000000000..8d67d569f
--- /dev/null
+++ b/source/reference/schema-operations.txt
@@ -0,0 +1,19 @@
+.. _schema-operations:
+
+*****************
+Schema Operations
+*****************
+
+.. default-domain:: mongodb
+
+This section describes schema-related operations that the driver provides,
+including managing databases, collections, indexes and users.
+
+.. toctree::
+ :titlesonly:
+
+ /reference/database-tasks
+ /reference/collection-tasks
+ /reference/indexing
+ /reference/search-indexes
+ /reference/collations
diff --git a/source/reference/search-indexes.txt b/source/reference/search-indexes.txt
new file mode 100644
index 000000000..91f99670f
--- /dev/null
+++ b/source/reference/search-indexes.txt
@@ -0,0 +1,129 @@
+********************
+Atlas Search Indexes
+********************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+If you are using a database hosted by MongoDB Atlas, the driver provides the
+ability to create, drop and view `Atlas search indexes `_
+on a collection through the ``search_indexes`` attribute:
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new(your_atlas_uri, database: 'music')
+ client[:bands].search_indexes
+ # => # ...>
+
+
+Creating Search Indexes
+=======================
+
+Search indexes can be created one at a time, or several can be created in
+parallel in a single operation.
+
+To create a single index, use ``search_indexes#create_one``, passing the index
+definition as the first argument, and an optional name for the index as the
+second argument.
+
+.. code-block:: ruby
+
+ client[:bands].search_indexes.create_one({ dynamic: true })
+
+ client[:bands].search_indexes.create_one(
+ {
+ dynamic: false,
+ fields: {
+ name: { type: 'string', analyzer: 'lucene.simple' }
+ }
+ },
+ 'band-name-index'
+ )
+
+To create multiple indexes, use ``search_indexes#create_many`` which accepts
+an array of index specifications. Unlike ``create_one``, each index
+specification is a hash with at least a ``definition`` key, which
+defines the index. Each has may also specify a ``name`` key, to name
+the index.
+
+.. code-block:: ruby
+
+ client[:bands].search_indexes.create_many([
+ { definition: { dynamic: true } },
+ { name: 'band-name-index,
+ definition: {
+ dynamic: false,
+ fields: {
+ name: { type: 'string', analyzer: 'lucene.simple' }
+ }
+ }
+ },
+ ])
+
+Note that whether you call ``create_one`` or ``create_many``, the
+method will return immediately, before the indexes are created. The
+indexes are then created in the background, asynchronously.
+
+
+Update Search Indexes
+=====================
+
+You can programmatically update an Atlas search index. For example, you
+might do this to change the analyzer used, or to provide an explicit field
+mapping, instead of a dynamic one. To do this, use the ``search_indexes#update_one``
+method:
+
+.. code-block:: ruby
+
+ client[:bands].search_indexes.update_one(new_definition, id: index_id)
+
+ client[:bands].search_indexes.update_one(new_definition, name: index_name)
+
+Indexes may be identified by either id, or name, but you must specify one
+or the other. The new index definition must be a complete definition--it will
+take precedence as specified over the existing definition.
+
+To get the id or name of an index that you wish to update, you can
+`list the search indexes <#listing-search-indexes>`_.
+
+
+Dropping Search Indexes
+=======================
+
+To drop Atlas search indexes, call ``search_indexes#drop_one`` and
+provide either the ``id`` or the ``name`` of the index you wish to
+drop.
+
+.. code-block:: ruby
+
+ client[:bands].search_indexes.drop_one(id: index_id)
+
+ client[:bands].search_indexes.drop_one(name: index_name)
+
+In either case, the method will return immediately and the index will
+be dropped in the background, asynchronously.
+
+To get the id or name of an index that you wish to drop, you can
+`list the search indexes <#listing-search-indexes>`_.
+
+
+Listing Search Indexes
+======================
+
+To list the available search indexes, iterate over the
+``search_indexes`` object:
+
+.. code-block:: ruby
+
+ client[:bands].search_indexes.each do |index_spec|
+ p index_spec['id']
+ p index_spec['name']
+ p index_spec['status']
+ p index_spec['queryable']
+ p index_spec['latestDefinition']
+ end
diff --git a/source/reference/sessions.txt b/source/reference/sessions.txt
new file mode 100644
index 000000000..64e5db692
--- /dev/null
+++ b/source/reference/sessions.txt
@@ -0,0 +1,149 @@
+.. _sessions:
+
+********
+Sessions
+********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+
+Version 3.6 of the MongoDB server introduces the concept of logical sessions for clients.
+A session is an abstract concept that represents a set of sequential operations executed
+by an application that are related in some way. A session object can be created via a ``Mongo::Client``
+and passed to operation methods that should be executed in the context of that session.
+
+Please note that session objects are not thread safe. They must only be used by one thread at a time.
+
+.. _create-session:
+
+Creating a session from a ``Mongo::Client``
+===========================================
+
+A session can be created by calling the ``start_session`` method on a client and passing it a block:
+
+.. code-block:: ruby
+
+ client.start_session do |session|
+ # work with the session
+ end
+
+When using the block form, the session will be automatically ended by the driver after the block finishes executing.
+
+It is valid to call ``start_session`` with no options set. This will result in a
+session that has no effect on the operations performed in the context of that session,
+other than to include a session ID in commands sent to the server. Please see the API docs for all supported
+session options.
+
+An error will be thrown if the driver is connected to a deployment that does not support sessions and the
+``start_session`` method is called.
+
+Note that server sessions are discarded server-side if not used for a certain period of time.
+Be aware that if the application calls ``#start_session`` on a client and waits more than 1 minute to use
+the session, it risks getting errors due to the session going stale before it is used.
+
+
+
+Using a session
+===============
+A session object can be passed to most driver methods so that the operation can be executed in the
+context of that session. Please see the API docs for which methods support a session argument.
+
+Create a session and execute an insert, then a find using that session:
+
+.. code-block:: ruby
+
+ client.start_session do |session|
+ client[:artists].insert_one({ :name => 'FKA Twigs' }, session: session)
+ client[:artists].find({ :name => 'FKA Twigs' }, limit: 1, session: session).first
+ end
+
+If you like to call methods on a ``Mongo::Collection::View`` in the context of a particular session, you can create the
+``Mongo::Collection::View`` with the session and then call methods on it:
+
+.. code-block:: ruby
+
+ client.start_session(causal_consistency: true) do |session|
+ view = client[:artists].find({ :name => 'FKA Twigs' }, session: session)
+ view.count # will use the session
+ end
+
+You can also pass the session option to the methods directly. This session will override any session associated with
+the ``Mongo::Collection::View``:
+
+.. code-block:: ruby
+
+ client.start_session do |session|
+ client.start_session do |second_session|
+ view = client[:artists].find({ :name => 'FKA Twigs' }, session: session)
+ view.count(session: second_session) # will use the second_session
+ end
+ end
+
+Alternative way to create a session
+===================================
+
+A session can be created by calling the ``start_session`` method on a client:
+
+.. code-block:: ruby
+
+ session = client.start_session
+
+When ``start_session`` is used without passing a block to it, the driver does not automatically clean up the session which can result in an accumulation of sessions on the server. Use `end_session <#end-a-session>`_ to manually end the session created. The server will automatically clean up old sessions after a timeout but the application should end sessions when the sessions are no longer needed.
+
+Unacknowledged Writes
+=====================
+
+Unacknowledged writes are only allowed outside the session mechanism; if an explicit session is supplied for an
+unacknowledged write, the driver will not send the session id with the operation. Similarly, the driver will not use
+an implicit session for an unacknowledged write.
+
+Causal Consistency
+==================
+A causally consistent session will let you read your writes and guarantee monotonically increasing
+reads from secondaries.
+To create a causally consistent session, set the ``causal_consistency`` option to true:
+
+.. code-block:: ruby
+
+ session = client.start_session(causal_consistency: true)
+
+ # The update message goes to the primary.
+ collection = client[:artists]
+ collection.update_one({ '_id' => 1 }, { '$set' => { 'x' => 0 } }, session: session)
+
+ # Read your write, even when reading from a secondary!
+ collection.find({ '_id' => 1 }, session: session).first
+
+ # This query returns data at least as new as the previous query,
+ # even if it chooses a different secondary.
+ collection.find({ '_id' => 2 }, session: session).first
+
+Since unacknowledged writes don't receive a response from the server (or don't wait for a response), the driver
+has no way of keeping track of where the unacknowledged write is in logical time. Therefore, causally
+consistent reads are not causally consistent with unacknowledged writes.
+
+Note that if you set the causal_consistency option to nil as in ``(causal_consistency: nil)``, it will be interpreted
+as false.
+
+.. _end-session:
+
+End a session
+=============
+To end a session, call the ``end_session`` method:
+
+.. code-block:: ruby
+
+ session.end_session
+
+The Ruby driver will then add the id for the corresponding server session to a pool for reuse.
+When a client is closed, the driver will send a command to the server to end all sessions it has cached
+in its server session pool. You may see this command in your logs when a client is closed.
+
+Note that when using the `block syntax <#creating-a-session-from-a-mongo-client>`_ for ``start_session`` the session is automatically ended after
+the block finishes executing.
diff --git a/source/reference/text-search.txt b/source/reference/text-search.txt
new file mode 100644
index 000000000..de2e09837
--- /dev/null
+++ b/source/reference/text-search.txt
@@ -0,0 +1,51 @@
+***********
+Text Search
+***********
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+MongoDB provides :manual:`text indexes `
+to support text search queries on string content. Text indexes
+can include any field whose value is a string or an array of
+string elements.
+
+.. note::
+
+ MongoDB Atlas also provides
+ `Atlas Search `_
+ which is a more powerful and flexible text search solution.
+ The rest of this page discusses text indexes and not Atlas Search.
+
+To perform a text search with the Ruby driver, first create a text
+index with ``indexes.create_one()``. The following command creates a
+text index on the ``name`` field of the ``restaurants`` collection in
+the ``test`` database.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ client['restaurants'].indexes.create_one( { :name => 'text' } )
+
+Once the text index is created you can use it as part of a query. The
+following code finds all documents in the ``restaurants`` collection
+which contain the word ``garden``, without case sensitivity.
+
+.. code-block:: ruby
+
+ client = Mongo::Client.new([ '127.0.0.1:27017' ], :database => 'test')
+ client[:restaurants].find(
+ { '$text' =>
+ { '$search' => 'garden', '$caseSensitive' => false }
+ }
+ ).each do |document|
+
+ #=> Yields a BSON::Document.
+
+ end
+
diff --git a/source/reference/transactions.txt b/source/reference/transactions.txt
new file mode 100644
index 000000000..a4e857132
--- /dev/null
+++ b/source/reference/transactions.txt
@@ -0,0 +1,174 @@
+************
+Transactions
+************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+
+Version 4.0 of the MongoDB server introduces
+`multi-document transactions `_.
+(Updates to multiple fields within a single document are atomic in all
+versions of MongoDB.) Ruby driver version 2.6.0 adds support for transactions.
+
+.. _using-transactions:
+
+Using Transactions
+==================
+
+In order to start a transaction, the application must have a :ref:`session `.
+
+The recommended way to use transactions is to utilize the ``with_transaction``
+helper method:
+
+.. code-block:: ruby
+
+ session = client.start_session
+ session.with_transaction do
+ collection.insert_one({hello: 'world'}, session: session)
+ end
+
+The ``with_transaction`` helper does the following:
+
+- It starts a transaction prior to calling the supplied block, and commits
+ the transaction when the block finishes.
+- If any of the operations in the block, or the commit operation, result in
+ a transient transaction error, the block and/or the commit will be executed
+ again.
+
+The block should be idempotent, because it may be called multiple times.
+
+The block may explicitly commit or abort the transaction, by calling
+``commit_transaction`` or ``abort_transaction``; in this case ``with_transaction``
+will not attempt to commit or abort (but may still retry the block on
+transient transaction errors propagated out of the block).
+
+The block will also be retried if the transaction's commit result is unknown.
+This may happen, for example, if the cluster undergoes an election during the
+commit. In this case when the block is retried, the primary server of the
+topology would likely have changed.
+
+Currently ``with_transaction`` will stop retrying the block and the commit once
+120 seconds pass since the beginning of its execution. This time is not
+configurable and may change in a future driver version. Note that this
+does not guarantee the overall runtime of ``with_transactions`` will be 120
+seconds or less - just that once 120 seconds of wall clock time have elapsed,
+further retry attempts will not be initiated.
+
+A low level API is also available if more control over transactions is desired.
+
+``with_transaction`` takes the same options as ``start_transaction`` does,
+which are read concern, write concern and read preference:
+
+.. code-block:: ruby
+
+ session = client.start_session
+ session.with_transaction(
+ read_concern: {level: :majority},
+ write_concern: {w: 3},
+ read: {mode: :primary}
+ ) do
+ collection.insert_one({hello: 'world'}, session: session)
+ end
+
+
+Low Level API
+=============
+
+A transaction can be started by calling the ``start_transaction`` method on a session:
+
+.. code-block:: ruby
+
+ session = client.start_session
+ session.start_transaction
+
+It is also possible to specify read concern, write concern and read preference
+when starting a transaction:
+
+.. code-block:: ruby
+
+ session = client.start_session
+ session.start_transaction(
+ read_concern: {level: :majority},
+ write_concern: {w: 3},
+ read: {mode: :primary})
+
+To persist changes made in a transaction to the database, the transaction
+must be explicitly committed. If a session ends with an open transaction,
+`the transaction is aborted `_.
+A transaction may also be aborted explicitly.
+
+To commit or abort a transaction, call ``commit_transaction`` or
+``abort_transaction`` on the session instance:
+
+.. code-block:: ruby
+
+ session.commit_transaction
+
+ session.abort_transaction
+
+Note: an outstanding transaction can hold locks to various objects in the
+server, such as the database. For example, the drop call in the following
+snippet will hang for `transactionLifetimeLimitSeconds
+`_
+seconds (default 60) until the server expires and aborts the transaction:
+
+.. code-block:: ruby
+
+ c1 = Mongo::Client.new(['127.0.0.1:27017']).use(:test_db)
+ session = c1.start_session
+ c1['foo'].insert_one(test: 1)
+ session.start_transaction
+ c1['foo'].insert_one({test: 2}, session: session)
+
+ c2 = Mongo::Client.new(['127.0.0.1:27017']).use(:test_db)
+ # hangs
+ c2.database.drop
+
+Since transactions are associated with server-side sessions, closing the client
+does not abort a transaction that this client initiated - the application must
+either call ``abort_transaction`` or wait for the transaction to time out on
+the server side. In addition to committing or aborting the transaction, an
+application can also end the session which will abort a transaction on this
+session if one is in progress:
+
+.. code-block:: ruby
+
+ session.end_session
+
+ c2 = Mongo::Client.new(['127.0.0.1:27017']).use(:test_db)
+ # ok
+ c2.database.drop
+
+
+Retrying Commits
+================
+
+The transaction commit `can be retried
+`_
+if it fails. Here is the Ruby code to do so:
+
+.. code-block:: ruby
+
+ begin
+ session.commit_transaction
+ rescue Mongo::Error => e
+ if e.label?('UnknownTransactionCommitResult')
+ retry
+ else
+ raise
+ end
+ end
+
+
+Transaction Nesting
+===================
+
+MongoDB does not support nesting transactions. Attempting to call
+``start_transaction`` or ``with_transaction`` when a transaction is already
+in progress will result in an error.
diff --git a/source/reference/user-management.txt b/source/reference/user-management.txt
new file mode 100644
index 000000000..5363b9a2f
--- /dev/null
+++ b/source/reference/user-management.txt
@@ -0,0 +1,224 @@
+.. _user-management:
+
+***************
+User Management
+***************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 1
+ :class: singlecol
+
+The Mongo Ruby Driver provides a set of methods for managing users in a
+MongoDB deployment. All of these methods are defined on the
+``Mongo::Auth::User::View`` class, which defines the behavior for
+performing user-related operations on a database. You can access a database's
+user view by calling the ``users`` method on the correpsonding
+``Mongo::Database`` object:
+
+.. code-block:: ruby
+
+ client.database.users
+
+Note that this will open a view on the database to which the client is already
+connected. To interact with the users defined on a different database, call
+the client's ``use`` method and pass in the name of the database with which
+you want to connect:
+
+.. code-block:: ruby
+
+ client.use(:users).database.users
+
+In this example, all operations would be performed on the ``users`` database.
+
+For more information about users and user management, see MongoDB's
+:manual:`online documentation `.
+
+
+Users and Databases
+===================
+
+When a client connects to the server, MongoDB distinguishes the database
+that the client will perform operations on from the :ref:`auth source `
+which is the database storing the user that the client is authenticating as.
+
+In many cases, the auth source is the same as the database. When they differ,
+user management operations must be done on the auth source database. For
+example, to create a user authenticating with X.509 certifcate, which must be
+defined on the ``$external`` database:
+
+.. code-block:: ruby
+
+ client.use('$external').database.users.create(
+ 'C=US,ST=New York,L=New York City,O=MongoDB,OU=x509,CN=localhost',
+ roles: [{role: 'read', db: 'admin'}],
+ )
+
+Note that the auth source is not specified for creating the user - auth source
+is only used during the authentication process. If ``#create`` is invoked with
+a ``User`` object with ``auth_source`` set, the auth source is ignored for
+the purposes of user management.
+
+
+Creating Users
+==============
+
+There are two ways to create a new database user with the Ruby Driver.
+
+The simplest way to create a new user is to use the ``create`` method,
+passing in a username, password, and roles:
+
+.. code-block:: ruby
+
+ client.database.users.create(
+ 'alanturing',
+ password: 'enigma',
+ roles: [ Mongo::Auth::Roles::READWRITE ]
+ )
+
+Another way to create a user is to first create a ``Mongo::Auth::User`` object
+with all the user information and then pass that object into the ``create``
+method instead.
+
+.. code-block:: ruby
+
+ user = Mongo::User.new(
+ user: 'alanturing',
+ password: 'enigma',
+ roles: [ Mongo::Auth::Roles::READWRITE ]
+ )
+
+ client.database.users.create(user)
+
+Note that your new user's credentials will be stored in whatever database your
+``client`` object is currently connected to. This will be your user's
+``auth_source``, and you must be connected to that same database in order to
+update, remove, or get information about the user you just created in the future.
+
+The ``create`` method takes a ``Hash`` of options as an optional second argument.
+The ``:roles`` option allows you to grant permissions to the new user.
+For example, the ``Mongo::Auth::Roles::READ_WRITE`` role grants the user the
+ability to both read from and write to the database in which they were created.
+Each role can be specified as a ``String`` or as a ``Hash``. If you would like
+to grant permissions to a user on a database other than the one on which they
+were created, you can pass that database name in the role ``Hash``. To create
+a user ``alanturing`` with permission to read and write on the ``machines``
+database, you could execute the following code:
+
+.. code-block:: ruby
+
+ client.database.users.create(
+ 'alanturing',
+ password: 'enigma',
+ roles: [{ role: Mongo::Auth::Roles::READWRITE, db: 'machines' }]
+ )
+
+For more information about roles in MongoDB, see the
+:manual:`Built-in roles` documentation.
+
+In addition to the ``:roles`` option, the ``create`` method supports a
+``:session`` option, which allows you to specify a ``Mongo::Session`` object
+to use for this operation, as well as a ``:write_concern`` option,
+which specifies the write concern of this operation when performed on a
+replica set.
+
+.. seealso::
+ :manual:`Built-in roles`
+ :manual:`Write Concerns`,
+ :ref:`Sessions`,
+
+
+User Information
+================
+
+To view information about a user that already exists in the database, use the
+``info`` method:
+
+.. code-block:: ruby
+
+ client.database.users.info('alanturing')
+
+If the user exists, this method will return an ``Array`` object containing a
+``Hash`` with information about the user, such as their id, username, the
+database they were created on, and their roles. If the user doesn't exist,
+this method will return an empty Array.
+
+The ``info`` method also takes an optional ``Hash`` of options as a second
+argument. Currently, the only supported option is ``:session``, which allows
+you to specify a ``Mongo::Session`` object to use for this operation.
+
+The Ruby Driver does not have a method that lists all of the users that
+currently exist in a database.
+
+.. seealso::
+ :ref:`Sessions `
+
+
+Updating Users
+==============
+
+To update a user that already exists in the database, you can use the
+``update`` method in one of two ways. The first way is to specify the name of
+the user you wish to update, along with a new set of options.
+
+.. warning::
+
+ You must include all user options in the options ``Hash``, even those options
+ whose values will remain the same. Omitting an option is the same as setting
+ it to an empty value.
+
+.. code-block:: ruby
+
+ client.database.users.update(
+ 'alanturing',
+ roles: [ Mongo::Auth::Roles::READ_WRITE ]
+ password: 'turing-test'
+ )
+
+The second way to update a user is to pass an updated ``Mongo::Auth::User``
+object to the ``update`` method in lieu of a username.
+
+.. code-block:: ruby
+
+ user = Mongo::Auth::User.new({
+ user: 'alanturing',
+ roles: [ Mongo::Auth::Roles::READ_WRITE ],
+ password: 'turing-test'
+ })
+
+ client.database.users.update(user)
+
+Optionally, the ``update`` method takes a ``Hash`` of options as a second
+argument. The two possible options for this method are ``:session``, which
+allows you to specify a ``Mongo::Session`` object on which to perform this
+operation, and ``:write_concern``, which sets a write concern if this operation
+is performed on a replica set.
+
+.. seealso::
+ :ref:`Sessions`
+ :manual:`Write Concerns`,
+
+Removing Users
+==============
+
+To remove a user from the database, use the ``remove`` method:
+
+.. code-block:: ruby
+
+ client.database.users.remove('alanturing')
+
+You may pass a ``Hash`` of options as a second argument. The two supported
+options for the ``remove`` method are ``:session`` and ``:write_concern``.
+``:session`` allows you to specify a ``Mongo::Session`` object to use for
+this operation. ``:write_concern`` specifies the write concern
+of the operation if you are running this command against a replica set.
+
+The Ruby Driver does not provide a method for removing all users
+from a database.
+
+.. seealso::
+ :ref:`Sessions`
+ :manual:`Write Concerns`,
diff --git a/source/reference/working-with-data.txt b/source/reference/working-with-data.txt
new file mode 100644
index 000000000..0f36c6be4
--- /dev/null
+++ b/source/reference/working-with-data.txt
@@ -0,0 +1,27 @@
+.. _working-with-data:
+
+*****************
+Working With Data
+*****************
+
+.. default-domain:: mongodb
+
+This section describes in detail the functionality that the Ruby driver
+implements for inserting, updating and retrieving data from MongoDB.
+
+.. toctree::
+ :titlesonly:
+
+ /reference/crud-operations
+ /reference/bulk-operations
+ /reference/projection
+ /reference/aggregation
+ /reference/map-reduce
+ /reference/text-search
+ /reference/geospatial-search
+ /reference/query-cache
+ /reference/gridfs
+ /reference/change-streams
+ /reference/sessions
+ /reference/transactions
+ /reference/in-use-encryption
diff --git a/source/release-notes.txt b/source/release-notes.txt
new file mode 100644
index 000000000..149dcfc85
--- /dev/null
+++ b/source/release-notes.txt
@@ -0,0 +1,389 @@
+.. _release-notes:
+
+*************
+Release Notes
+*************
+
+.. default-domain:: mongodb
+
+This page documents significant changes in driver releases.
+
+It is not an exhaustive list of changes and generally does not enumerate
+bug fixes; please consult the `releases page on GitHub
+`_ for a more
+comprehensive list of changes in each version of the driver and the
+`releases page in Jira
+`_
+for the complete list of changes, including those internal to the driver and
+its test suite.
+
+
+.. _release-notes-2.19:
+
+2.19
+====
+
+This release of the Ruby driver supports MongoDB version 7.0. The Ruby driver
+now supports Ruby 3.2. Ruby 2.5 and 2.6 are now deprecated.
+
+This release includes the following new features:
+
+- The driver now limits the number of connections established by a connection
+ pool simultaneously. By default the limit is 2. The limit can be configured
+ with the ``:max_connecting`` option of the ``Mongo::Client`` constructor.
+ The default should be sufficient for most applications. However, if your
+ application is using a large number of threads, you may need to increase
+ the limit.
+- Added support for automatic AWS credentials retrieval and authentication
+ with temporary credentials when AWS KMS is used for client side encryption.
+- Added support for automatic GCP credentials retrieval when Google Cloud Key
+ Management is used for client side encryption.
+- Added support the Azure VM-assigned Managed Identity for Automatic KMS Credentials
+ when Azure Key Vault is used for client side encryption.
+- `Queryable Encryption `_ support is extended.
+- Added support for Queryable Encryption Range Indexes.
+- A `crypt_shared `_
+ library can be now used instead of ``mongocryptd``.
+- Added support for AWS IAM Roles for service accounts, EKS in particular.
+- AWS Credentials are now cached when possible.
+- Added support for creating and managing `Atlas search indexes `_ via the
+ Ruby driver.
+
+.. _release-notes-2.18:
+
+2.18
+====
+
+This release of the Ruby driver supports MongoDB version 5.2 and 6.0.
+
+This release includes the following new features:
+
+- Added support for `queryable encryption `_.
+- Added support for Azure Key Vault, Google Cloud Key Management, and any
+ KMIP compliant Key Management System to be used as master key storage for
+ client side encryption.
+- It is now possible to provide the :ref:`path to a schema map file `
+ instead of the entire schema map as an object.
+- The driver now implements the :ref:`feature flag ` mechanism
+ for incompatible changes and bug fixes. Changes gated behind feature flags
+ for 2.18 are passing view filter and options to ``aggregate`` and validation
+ of correct usage of ``update`` vs ``replace`` methods, as described below.
+- Added the ``validate_update_replace`` feature flag which validates the
+ parameters to update and replace operations. If this flag is turned on, an
+ error will be raised on an invalid update or replacement document.
+- Added the ``broken_view_options`` feature flag which allows the view options
+ to be correctly propagated to the ``aggregate``, ``count``, ``count_documents``,
+ ``distinct``, and ``estimated_document_count`` mehods. When this flag is
+ switched on, the view options will be ignored in those methods.
+- The driver now permits :ref:`inserting documents with dollar-prefixed and
+ dotted keys `.
+- CRUD methods, methods for listing databases, collection, and indexes management
+ methods now support a new option ``:comment``. This option enables users to
+ specify an arbitrary comment to help trace the operation through the
+ database profiler, currentOp and logs.
+- The ``estimated_document_count`` method is now using the ``count`` server
+ command instead of ``$collStats`` aggregation pipeline stage, to support
+ operation on views. Applications using the Stable API should upgrade to
+ server versions 5.0.8 (if using MongoDB 5.0) or 5.3.2 (if using MongoDB
+ 5.1/5.2/5.3) or newer to use the ``count`` command when API strict is enabled,
+ or avoid setting ``api_strict: true`` when constructing ``Mongo::Client``
+ instances with server versions 5.0.0-5.0.7 and 5.1.0-5.3.1.
+- The ``DBRef`` class has been moved to ``bson-ruby``. For backwards compatibility,
+ ``BSON::DBRef`` is aliased as ``Mongo::DBRef``. The ``BSON::DBRef`` class
+ derives from ``BSON::Document``, unlike the legacy ``Mongo::DBRef`` which
+ derived from ``Object``. ``BSON::DBRef`` retains all attributes passed into
+ its constructor, unlike ``Mongo::DBRef`` which only allowed ``$ref``,
+ ``$id``, and ``$db``. ``BSON::DBRef`` also reorders the fields if
+ necessary to place ``$ref``, ``$id``, and ``$db`` first, in that order, as
+ required by the MongoDB server.
+- ``BulkWrite::Result`` class now has the ``acknowledged?`` attribute.
+- Providing an empty array of operations to the bulk write is now an error.
+- BSON serialization performance has been improved.
+- :ref:`ActiveJob middleware ` was added to
+ the query cache.
+- ``:authorized_collections`` options is recognized when listing collections.
+- ``:wildcard_projection`` option was added to the allowed index specification.
+- Added ``:srv_max_hosts``/``srvMaxHosts`` Ruby and URI options to limit how
+ many mongos routers the driver will establish connections to.
+- Custom SRV service names are now supported with the ``:srv_service_name``
+ Ruby option and the ``srvServiceName`` URI option.
+- When 0 is given as the max connection pool size, it is now interpreted to
+ mean no limit.
+- The default maximum connection pool size has been increased to 20 from 5.
+
+This release adds supports for JRuby 9.3.
+
+
+.. _release-notes-2.17:
+
+2.17
+====
+
+This release of the Ruby driver supports MongoDB version 5.1. It also increases
+the minimum required Ruby version to 2.5, and drops support for MongoDB
+versions older than 3.6.
+
+This release includes the following new features:
+
+- Added new readConcern level "snapshot" (non-speculative) for read commands outside of transactions, including on secondaries.
+- Support $merge and $out executing on secondaries.
+- Support 'let' option for aggregate and CRUD commands.
+
+The following bugs were fixed:
+
+- Push monitor thread can exit when address resolution fails.
+
+The following non-breaking changes were made:
+
+- mapReduce command is now deprecated.
+- Avoid tight looping in push monitor
+
+
+.. _release-notes-2.16:
+
+2.16
+====
+
+This release adds the following new feature:
+
+- Load balancer support.
+
+The following minor improvement has been made:
+
+- GridFS file retrieval no longer requires index creation privileges when
+ the indexes already exist, and is thus usable with users that have only
+ read permissions.
+
+This release of the Ruby driver increases the minimum required Ruby version
+to 2.4 and deprecates support for MongoDB server versions below 3.6.
+
+
+.. _release-notes-2.15:
+
+2.15
+====
+
+This release adds the following new features:
+
+- Ruby 3.0 support.
+- Ability to specify the :ref:`server API parameters `.
+- Support for Zstandard and Snappy :ref:`wire protocol compression `.
+- :ref:`Query cache middleware ` was moved to the
+ driver from Mongoid and is now usable in applications that do not use Mongoid.
+- It is now possible to create collections with time-series options.
+- Experimental support for `MongoDB Atlas Serverless
+ `_ when not using a
+ load balancer.
+
+The following smaller improvements have been made:
+
+- The ``OperationFailure`` exception message now contains the server error code
+ name, if provided by the server. The layout of the message was changed to
+ accommodate the error code name.
+- The generic SSL messaging has been removed from ``SocketError`` messages
+ when TLS is used. TLS connections to MongoDB are now the norm, with Atlas
+ requiring TLS, and it is more likely that a connection fails due to failed
+ certificate verification than due to the server not having TLS enabled.
+- A hook was added to permit applications to :ref:`modify the TLS context
+ ` used for TLS connections, for example to exclude
+ ciphers.
+- Heartbeat succeeded and heartbeat failed :ref:`server monitoring events
+ ` are now linked to the respective heartbeat started
+ event, to improve usability.
+- ``skip`` and ``limit`` options are now prohibited when calling
+ ``estimated_document_count``, because the server command does not accept them.
+- The driver will now omit command monitoring reply payloads when they are
+ in response to sensitive commands.
+- When the driver closes network sockets it now enforces the socket timeout.
+- ``estimated_document_count`` collection method now uses the ``$collStats``
+ aggregation pipeline stage instead of the count command on 5.0 and newer
+ servers.
+- The platform metadata sent by the driver to the server in the handshake
+ now includes the purpose of the connection being established, permitting
+ administrators to distinguish monitoring connections from application
+ connections.
+- The driver now uses monotonic clock for timeouts.
+- The driver will no longer mark servers unknown based on errors in
+ ``writeErrors`` field in the server response.
+- Server selection timeout for ``mongocryptd`` has been increased to 10 seconds.
+
+
+.. _release-notes-2.14:
+
+2.14
+====
+
+This release adds the following new features:
+
+- Queries against Atlas Data Lake are now supported.
+- The :ref:`query cache ` has been moved from Mongoid into the
+ driver. Mongoid will use the driver's query cache as of driver 2.14.
+ As part of the move, several issues with the query cache have been fixed
+ and its functionality was extended to cover aggregation pipeline queries
+ and to support result sets of any size.
+- Explain verbosity can now :ref:`be specified ` when explaining.
+- Mixed case read preference tag names are now supported.
+- The driver will perform :ref:`OCSP endpoint verification `
+ by default when TLS is enabled. Due to lack of support in Ruby's ``openssl``
+ extension, OCSP stapling is not yet implemented.
+
+The following smaller improvements have been made:
+
+- Default logger level for ``Client`` objects is now info (up from debug).
+ This reduces the amount of log output produced by the driver by default.
+- Database and collection write methods support specifying write concern for
+ the individual operations.
+- ``Client#summary`` method now shows the monitoring state of each server.
+- When objects other than hashes are attempted to be inserted (which is not
+ allowed), the driver now provides better diagnostics.
+- DNS queries for SRV URIs are now subject to configured socket timeouts.
+- When the ``Client`` object is reconnected, session pools are now cleared.
+
+Support for Ruby versions 2.3 and 2.4 has been deprecated as of this release.
+
+
+.. _release-notes-2.13:
+
+2.13
+====
+
+This release implements the necessary client-side functionality to use the
+features added in MongoDB 4.4. Specifically, the following new driver
+functionality has been added:
+
+- Support for the ``directConnection`` URI option to provide a consistent
+ cross-driver mechanims to discover deployment topology or force direct
+ connection.
+- Support for :ref:`MONGODB-AWS authentication mechanism `.
+- When SCRAM authentication is used with 4.4 and newer servers, the driver will
+ complete authentication with fewer network roundtrips.
+- The driver creates an additional monitoring connection for 4.4 and newer
+ servers, permitting the server to notify the driver when its state changes.
+ This reduces the time for the driver to discover the new primary during
+ failover events.
+- ``Client`` constructor can be given a block, in which case the client object
+ will be yielded to the block and automatically closed when the block ends.
+- ``start_session`` can be given a block, in which case the session object will
+ be yielded to the block and automatically ended when the block ends.
+- Write options can now be specified for individual CRUD operations.
+- The ``:allow_disk_use`` option was added to find operations.
+- The ``:authorized_databases`` option was added to ``list_databases``
+ method.
+- The ``list_collections`` method now passes through all options.
+- Ability to set an index :ref:`as hidden ` when creating it.
+- Ability to specify commit quorum when creating indexes.
+- ``:wrapping_libraries`` :ref:`client option `, to be used
+ by libraries like Mongoid which wrap the driver to report their version to
+ the server for troubleshooting/statistics aggregation purposes.
+
+The following smaller improvements have been made:
+
+- ``count_documents`` can now be invoked with no arguments.
+- The default TCP keep-alive time has been reduced to make the driver
+ correctly detect dropped connections on Microsoft Azure.
+- ``CursorNotFound`` is now a resumable change stream error.
+- The number of backtrace lines in exceptions handled by background threads
+ can now be configured.
+
+
+.. _release-notes-2.12:
+
+2.12
+====
+
+This release adds the following new features:
+
+- :ref:`Client-side encryption `.
+- ``list_collections`` method now accepts the ``:filter`` option.
+
+The following smaller improvements have been made:
+
+- Authentication exceptions now include server information to aid in
+ troubleshooting.
+
+
+.. _release-notes-2.11:
+
+2.11
+====
+
+This release adds the following new features:
+
+- If a minimum connection pool size is specified, the pool for each server
+ will create a background thread to eagerly establish connections up to
+ the specified minimum pool size.
+- If the driver connects to the deployment using a SRV URI and the deployment
+ is a sharded cluster, the driver will poll the SRV DNS records to
+ automatically discover new and removed mongos servers and adjust the
+ set of known servers accordingly.
+
+The following smaller improvements have been made:
+
+- The driver now permits unencoded subdelimiters in usernames and passwords in
+ MongoDB URIs.
+- User management helpers now accept the write concern option.
+- The :ref:`command monitoring ` logger provided with the
+ driver will now log connection ids used for each command.
+- When legacy read retries are used, retry on the same set of server errors
+ that the modern retries would have retried on.
+- The ``distinct(nil)`` call is prohibited because it is rejected by MongoDB
+ 4.4 and newer servers.
+
+This release of the Ruby driver increases the minimum required Ruby version
+to 2.3, as well as minimum supported JRuby version to 9.2.
+
+
+.. _release-notes-2.10:
+
+2.10
+====
+
+This release implements the necessary client-side functionality to use the
+features added in MongoDB 4.2. Specifically, the following new driver
+functionality has been added:
+
+- Support for sharded transactions.
+- Applications can set the ``:max_time_ms`` option in ``commit_transaction``
+ method.
+- Support for database-level aggregation.
+- Support for ``$merge`` aggregation pipeline stage.
+- The update operations now accept an aggregation pipeline as an array.
+- TLS regenotiation is now disabled when possible.
+- Change streams now handle post-batch resume tokens provided by the server.
+
+The following smaller improvements have been made:
+
+- All methods now accept ``:write_concern`` option for the write concern,
+ including those that previously accepted the ``:write`` option.
+- The query string in a MongoDB URI can now start with ``&``.
+
+Support for Ruby versions less than 2.3 is deprecated in this release.
+
+
+.. _release-notes-2.9:
+
+2.9
+===
+
+This release adds the following new features:
+
+- A rewrite of the connection pool code with improved monitoring,
+ compliant with the CMAP specification
+- A modern retryable reads implementation compliant with the cross-driver
+ retryable reads specification, enabled by default.
+- Modern retryable writes are now enabled by default.
+- Legacy retryable writes can be disabled in most cases.
+- The driver now supports certificate chains being provided as client
+ certificates for TLS connections.
+- Ability to specify multiple CA certificates when creating a ``Client``.
+- Ability to pass the private key and certificate via URI options.
+
+The following smaller improvements have been made:
+
+- Support for the ``startAfter`` option in the ``$changeStream``
+ aggregation pipeline stage.
+- Field order of BSON documents sent to the server changed for better logging.
+- Certificate paths with unescaped slashes can now be specified in
+ MongoDB URIs.
+
+This release deprecates support for Ruby versions less than 2.3.
diff --git a/source/support.txt b/source/support.txt
new file mode 100644
index 000000000..b06675889
--- /dev/null
+++ b/source/support.txt
@@ -0,0 +1,15 @@
+*******
+Support
+*******
+
+.. default-domain:: mongodb
+
+Commercial support for the Ruby driver is available through the
+`MongoDB Support Portal `_.
+
+For questions, discussions or general technical support, please visit the
+`MongoDB Community Forum
+`_.
+
+Please see :manual:`Technical Support ` page
+in the documentation for other support resources.
diff --git a/source/tutorials.txt b/source/tutorials.txt
new file mode 100644
index 000000000..0972fba7f
--- /dev/null
+++ b/source/tutorials.txt
@@ -0,0 +1,19 @@
+.. _tutorials:
+
+*********
+Tutorials
+*********
+
+.. default-domain:: mongodb
+
+The tutorials in this section provide examples of some frequently used
+operations. This section is not meant to be an exhaustive list of all
+operations available in the Ruby driver.
+
+.. toctree::
+ :titlesonly:
+
+ tutorials/quick-start
+ tutorials/common-errors
+ tutorials/bson
+
diff --git a/source/tutorials/bson.txt b/source/tutorials/bson.txt
new file mode 100644
index 000000000..15d7d1d63
--- /dev/null
+++ b/source/tutorials/bson.txt
@@ -0,0 +1,957 @@
+.. https://www.mongodb.com/docs/ecosystem/tutorial/ruby-bson-tutorial/
+
+.. _ruby-bson-tutorial:
+
+*************
+BSON Tutorial
+*************
+
+.. default-domain:: mongodb
+
+.. contents:: On this page
+ :local:
+ :backlinks: none
+ :depth: 2
+ :class: twocols
+
+This tutorial discusses using the Ruby BSON library.
+
+Installation
+============
+
+The BSON library can be installed from `Rubygems `_
+manually or with bundler.
+
+To install the gem manually:
+
+.. code-block:: sh
+
+ gem install bson
+
+To install the gem with bundler, include the following in your ``Gemfile``:
+
+.. code-block:: ruby
+
+ gem 'bson'
+
+The BSON library is compatible with MRI >= 2.5 and JRuby >= 9.2.
+
+Use With ActiveSupport
+======================
+
+Serialization for ActiveSupport-defined classes, such as TimeWithZone, is
+not loaded by default to avoid a hard dependency of BSON on ActiveSupport.
+When using BSON in an application that also uses ActiveSupport, the
+ActiveSupport-related code must be explicitly required:
+
+.. code-block:: ruby
+
+ require 'bson'
+ require 'bson/active_support'
+
+BSON Serialization
+==================
+
+Getting a Ruby object's raw BSON representation is done by calling ``to_bson``
+on the Ruby object, which will return a ``BSON::ByteBuffer``. For example:
+
+.. code-block:: ruby
+
+ "Shall I compare thee to a summer's day".to_bson
+ 1024.to_bson
+
+Generating an object from BSON is done via calling ``from_bson`` on the class
+you wish to instantiate and passing it a ``BSON::ByteBuffer`` instance.
+
+.. code-block:: ruby
+
+ String.from_bson(byte_buffer)
+ BSON::Int32.from_bson(byte_buffer)
+
+
+Byte Buffers
+============
+
+BSON library 4.0 introduces the use of native byte buffers in MRI and JRuby
+instead of using ``StringIO``, for improved performance.
+
+Writing
+-------
+
+To create a ``ByteBuffer`` for writing (i.e. serializing to BSON),
+instantiate ``BSON::ByteBuffer`` with no arguments:
+
+.. code-block:: ruby
+
+ buffer = BSON::ByteBuffer.new
+
+To write raw bytes to the byte buffer with no transformations, use
+``put_byte`` and ``put_bytes`` methods. They take a byte string as the argument
+and copy this string into the buffer. ``put_byte`` enforces that the argument
+is a string of length 1; ``put_bytes`` accepts any length strings.
+The strings can contain null bytes.
+
+.. code-block:: ruby
+
+ buffer.put_byte("\x00")
+
+ buffer.put_bytes("\xff\xfe\x00\xfd")
+
+.. note::
+
+ ``put_byte`` and ``put_bytes`` do not write a BSON type byte prior to
+ writing the argument to the byte buffer.
+
+Subsequent write methods write objects of particular types in the
+`BSON spec `_. Note that the type indicated
+by the method name takes precedence over the type of the argument -
+for example, if a floating-point value is given to ``put_int32``, it is
+coerced into an integer and the resulting integer is written to the byte
+buffer.
+
+To write a UTF-8 string (BSON type 0x02) to the byte buffer, use ``put_string``:
+
+.. code-block:: ruby
+
+ buffer.put_string("hello, world")
+
+Note that BSON strings are always encoded in UTF-8. Therefore, the
+argument must be either in UTF-8 or in an encoding convertable to UTF-8
+(i.e. not binary). If the argument is in an encoding other than UTF-8,
+the string is first converted to UTF-8 and the UTF-8 encoded version is
+written to the buffer. The string must be valid in its claimed encoding,
+including being valid UTF-8 if the encoding is UTF-8.
+The string may contain null bytes.
+
+The BSON specification also defines a CString type, which is used for
+example for document keys. To write CStrings to the buffer, use ``put_cstring``:
+
+.. code-block:: ruby
+
+ buffer.put_cstring("hello, world")
+
+As with regular strings, CStrings in BSON must be UTF-8 encoded. If the
+argument is not in UTF-8, it is converted to UTF-8 and the resulting string
+is written to the buffer. Unlike ``put_string``, the UTF-8 encoding of
+the argument given to ``put_cstring`` cannot have any null bytes, since the
+CString serialization format in BSON is null terminated.
+
+Unlike ``put_string``, ``put_cstring`` also accepts symbols and integers.
+In all cases the argument is stringified prior to being written:
+
+.. code-block:: ruby
+
+ buffer.put_cstring(:hello)
+ buffer.put_cstring(42)
+
+To write a 32-bit or a 64-bit integer to the byte buffer, use
+``put_int32`` and ``put_int64`` methods respectively. Note that Ruby
+integers can be arbitrarily large; if the value being written exceeds the
+range of a 32-bit or a 64-bit integer, ``put_int32`` and ``put_int64``
+raise ``RangeError``.
+
+.. code-block:: ruby
+
+ buffer.put_int32(12345)
+ buffer.put_int64(123456789012345)
+
+.. note::
+
+ If ``put_int32`` or ``put_int64`` are given floating point arguments,
+ the arguments are first coerced into integers and the integers are
+ written to the byte buffer.
+
+To write a 64-bit floating point value to the byte buffer, use ``put_double``:
+
+.. code-block:: ruby
+
+ buffer.put_double(3.14159)
+
+To obtain the serialized data as a byte string (for example, to send the data
+over a socket), call ``to_s`` on the buffer:
+
+.. code-block:: ruby
+
+ buffer = BSON::ByteBuffer.new
+ buffer.put_string('testing')
+ socket.write(buffer.to_s)
+
+.. note::
+
+ ``ByteBuffer`` keeps track of read and write positions separately.
+ There is no way to rewind the buffer for writing - ``rewind`` only affects
+ the read position.
+
+
+Reading
+-------
+
+To create a ``ByteBuffer`` for reading (i.e. deserializing from BSON),
+instantiate ``BSON::ByteBuffer`` with a byte string as the argument:
+
+.. code-block:: ruby
+
+ buffer = BSON::ByteBuffer.new(string) # a read mode buffer.
+
+Reading from the buffer is done via the following API:
+
+.. code-block:: ruby
+
+ buffer.get_byte # Pulls a single byte from the buffer.
+ buffer.get_bytes(value) # Pulls n number of bytes from the buffer.
+ buffer.get_cstring # Pulls a null-terminated string from the buffer.
+ buffer.get_double # Pulls a 64-bit floating point from the buffer.
+ buffer.get_int32 # Pulls a 32-bit integer (4 bytes) from the buffer.
+ buffer.get_int64 # Pulls a 64-bit integer (8 bytes) from the buffer.
+ buffer.get_string # Pulls a UTF-8 string from the buffer.
+
+To restart reading from the beginning of a buffer, use ``rewind``:
+
+.. code-block:: ruby
+
+ buffer.rewind
+
+.. note::
+
+ ``ByteBuffer`` keeps track of read and write positions separately.
+ ``rewind`` only affects the read position.
+
+
+Supported Classes
+=================
+
+Core Ruby classes that have representations in the BSON specification and
+will have a ``to_bson`` method defined for them are: ``Object``, ``Array``,
+``FalseClass``, ``Float``, ``Hash``, ``Integer``, ``BigDecimal``, ``NilClass``,
+``Regexp``, ``String``, ``Symbol`` (deprecated), ``Time``, ``TrueClass``.
+
+In addition to the core Ruby objects, BSON also provides some special types
+specific to the specification:
+
+
+``BSON::Binary``
+----------------
+
+Use ``BSON::Binary`` objects to store arbitrary binary data. The ``Binary``
+objects can be constructed from binary strings as follows:
+
+.. code-block:: ruby
+
+ BSON::Binary.new("binary_string")
+ # =>
+
+By default, ``Binary`` objects are created with BSON binary subtype 0
+(``:generic``). The subtype can be explicitly specified to indicate that
+the bytes encode a particular type of data:
+
+.. code-block:: ruby
+
+ BSON::Binary.new("binary_string", :user)
+ # =>
+
+Valid subtypes are ``:generic``, ``:function``, ``:old``, ``:uuid_old``,
+``:uuid``, ``:md5`` and ``:user``.
+
+The data and the subtype can be retrieved from ``Binary`` instances using
+``data`` and ``type`` attributes, as follows:
+
+.. code-block:: ruby
+
+ binary = BSON::Binary.new("binary_string", :user)
+ binary.data
+ => "binary_string"
+ binary.type
+ => :user
+
+.. note::
+
+ ``BSON::Binary`` objects always store the data in ``BINARY`` encoding,
+ regardless of the encoding that the string passed to the constructor
+ was in:
+
+ .. code-block:: ruby
+
+ str = "binary_string"
+ str.encoding
+ # => #
+ binary = BSON::Binary.new(str)
+ binary.data
+ # => "binary_string"
+ binary.data.encoding
+ # => #
+
+UUID Methods
+````````````
+
+To create a UUID BSON::Binary (binary subtype 4) from its RFC 4122-compliant
+string representation, use the ``from_uuid`` method:
+
+.. code-block:: ruby
+
+ uuid_str = "00112233-4455-6677-8899-aabbccddeeff"
+ BSON::Binary.from_uuid(uuid_str)
+ # =>
+
+To stringify a UUID BSON::Binary to an RFC 4122-compliant representation,
+use the ``to_uuid`` method:
+
+.. code-block:: ruby
+
+ binary = BSON::Binary.new("\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xAA\xBB\xCC\xDD\xEE\xFF".force_encoding('BINARY'), :uuid)
+ =>
+ binary.to_uuid
+ => "00112233-4455-6677-8899aabbccddeeff"
+
+The standard representation may be explicitly specified when invoking both
+``from_uuid`` and ``to_uuid`` methods:
+
+.. code-block:: ruby
+
+ binary = BSON::Binary.from_uuid(uuid_str, :standard)
+ binary.to_uuid(:standard)
+
+Note that the ``:standard`` representation can only be used with a Binary
+of subtype ``:uuid`` (not ``:uuid_old``).
+
+Legacy UUIDs
+````````````
+
+Data stored in BSON::Binary objects of subtype 3 (``:uuid_old``) may be
+persisted in one of three different byte orders depending on which driver
+created the data. The byte orders are CSharp legacy, Java legacy and Python
+legacy. The Python legacy byte order is the same as the standard RFC 4122
+byte order; CSharp legacy and Java legacy byte orders have some of the bytes
+swapped.
+
+The Binary object containing a legacy UUID does not encode *which* format
+the UUID is stored in. Therefore, methods that convert to and from the legacy
+UUID format take the desired format, or representation, as their argument.
+An application may copy legacy UUID Binary objects without knowing which byte
+order they store their data in.
+
+The following methods for working with legacy UUIDs are provided for
+interoperability with existing deployments storing data in legacy UUID formats.
+It is recommended that new applications use the ``:uuid`` (subtype 4) format
+only, which is compliant with RFC 4122.
+
+To stringify a legacy UUID BSON::Binary, use the ``to_uuid`` method specifying
+the desired representation. Accepted representations are ``:csharp_legacy``,
+``:java_legacy`` and ``:python_legacy``. Note that a legacy UUID BSON::Binary
+cannot be stringified without specifying a representation.
+
+.. code-block:: ruby
+
+ binary = BSON::Binary.new("\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xAA\xBB\xCC\xDD\xEE\xFF".force_encoding('BINARY'), :uuid_old)
+ =>
+
+ binary.to_uuid
+ # => ArgumentError (Representation must be specified for BSON::Binary objects of type :uuid_old)
+
+ binary.to_uuid(:csharp_legacy)
+ # => "33221100-5544-7766-8899aabbccddeeff"
+
+ binary.to_uuid(:java_legacy)
+ # => "77665544-3322-1100-ffeeddccbbaa9988"
+
+ binary.to_uuid(:python_legacy)
+ # => "00112233-4455-6677-8899aabbccddeeff"
+
+To create a legacy UUID BSON::Binary from the string representation of the
+UUID, use the ``from_uuid`` method specifying the desired representation:
+
+.. code-block:: ruby
+
+ uuid_str = "00112233-4455-6677-8899-aabbccddeeff"
+
+ BSON::Binary.from_uuid(uuid_str, :csharp_legacy)
+ # =>
+
+ BSON::Binary.from_uuid(uuid_str, :java_legacy)
+ # =>
+
+ BSON::Binary.from_uuid(uuid_str, :python_legacy)
+ # =>
+
+These methods can be used to convert from one representation to another:
+
+.. code-block:: ruby
+
+ BSON::Binary.from_uuid('77665544-3322-1100-ffeeddccbbaa9988',:java_legacy).to_uuid(:csharp_legacy)
+ # => "33221100-5544-7766-8899aabbccddeeff"
+
+
+``BSON::Code``
+--------------
+
+Represents a string of JavaScript code.
+
+.. code-block:: ruby
+
+ BSON::Code.new("this.value = 5;")
+
+``BSON::CodeWithScope``
+-----------------------
+
+.. note::
+
+ The ``CodeWithScope`` type is deprecated as of MongoDB 4.2.1. Starting
+ with MongoDB 4.4, support from ``CodeWithScope`` is being removed from
+ various server commands and operators such as ``$where``. Please use
+ other BSON types and operators when working with MongoDB 4.4 and newer.
+
+Represents a string of JavaScript code with a hash of values.
+
+.. code-block:: ruby
+
+ BSON::CodeWithScope.new("this.value = age;", age: 5)
+
+
+``BSON::DBRef``
+---------------
+
+This is a subclass of ``BSON::Document`` that provides accessors for the
+collection, id, and database of the DBRef.
+
+.. code-block:: ruby
+
+ BSON::DBRef.new({"$ref" => "collection", "$id" => "id"})
+ BSON::DBRef.new({"$ref" => "collection", "$id" => "id", "database" => "db"})
+
+.. note::
+
+ The BSON::DBRef constructor will validate the given hash and will raise an ArgumentError
+ if it is not a valid DBRef. ``BSON::ExtJSON.parse_obj`` and ``Hash.from_bson`` will not
+ raise an error if given an invalid DBRef, and will parse a Hash or deserialize a
+ BSON::Document instead.
+
+.. note::
+
+ All BSON documents are deserialized into instances of BSON::DBRef if they are
+ valid DBRefs, otherwise they are deserialized into instances of BSON::Document.
+ This is true even when the invocation is made from the ``Hash`` class:
+
+ .. code-block:: ruby
+
+ bson = {"$ref" => "collection", "$id" => "id"}.to_bson.to_s
+ loaded = Hash.from_bson(BSON::ByteBuffer.new(bson))
+ => {"$ref"=>"collection", "$id"=>"id"}
+ loaded.class
+ => BSON::DBRef
+
+For backwards compatibility with the MongoDB Ruby driver versions 2.17 and
+earlier, ``BSON::DBRef`` also can be constructed using the legacy driver API.
+This API is deprecated and will be removed in a future version of ``bson-ruby``:
+
+.. code-block:: ruby
+
+ BSON::DBRef.new("collection", BSON::ObjectId('61eeb760a15d5d0f9f1e401d'))
+ BSON::DBRef.new("collection", BSON::ObjectId('61eeb760a15d5d0f9f1e401d'), "db")
+
+
+``BSON::Document``
+------------------
+
+This is a subclass of ``Hash`` that stores all keys as strings, but allows
+access to them with symbol keys.
+
+.. code-block:: ruby
+
+ BSON::Document[:key, "value"]
+ BSON::Document.new
+
+.. note::
+
+ All BSON documents are deserialized into instances of BSON::Document
+ (or BSON::DBRef, if they happen to be a valid DBRef), even when the
+ invocation is made from the ``Hash`` class:
+
+ .. code-block:: ruby
+
+ bson = {test: 1}.to_bson.to_s
+ loaded = Hash.from_bson(BSON::ByteBuffer.new(bson))
+ => {"test"=>1}
+ loaded.class
+ => BSON::Document
+
+
+``BSON::MaxKey``
+----------------
+
+Represents a value in BSON that will always compare higher to another value.
+
+.. code-block:: ruby
+
+ BSON::MaxKey.new
+
+``BSON::MinKey``
+----------------
+
+Represents a value in BSON that will always compare lower to another value.
+
+.. code-block:: ruby
+
+ BSON::MinKey.new
+
+``BSON::ObjectId``
+------------------
+
+Represents a 12 byte unique identifier for an object on a given machine.
+
+.. code-block:: ruby
+
+ BSON::ObjectId.new
+
+``BSON::Timestamp``
+-------------------
+
+Represents a special time with a start and increment value.
+
+.. code-block:: ruby
+
+ BSON::Timestamp.new(5, 30)
+
+``BSON::Undefined``
+-------------------
+
+Represents a placeholder for a value that was not provided.
+
+.. code-block:: ruby
+
+ BSON::Undefined.new
+
+``BSON::Decimal128``
+--------------------
+
+Represents a 128-bit decimal-based floating-point value capable of emulating
+decimal rounding with exact precision.
+
+.. code-block:: ruby
+
+ # Instantiate with a String
+ BSON::Decimal128.new("1.28")
+
+ # Instantiate with a BigDecimal
+ d = BigDecimal(1.28, 3)
+ BSON::Decimal128.new(d)
+
+BSON::Decimal128 vs BigDecimal
+``````````````````````````````
+The ``BigDecimal`` ``from_bson`` and ``to_bson`` methods use the same
+``BSON::Decimal128`` methods under the hood. This leads to some limitations
+that are imposed on the ``BigDecimal`` values that can be serialized to BSON
+and those that can be deserialized from existing ``decimal128`` BSON
+values. This change was made because serializing ``BigDecimal`` instances as
+``BSON::Decimal128`` instances allows for more flexibility in terms of querying
+and aggregation in MongoDB. The limitations imposed on ``BigDecimal`` are as
+follows:
+
+- ``decimal128`` has a limited range and precision, while ``BigDecimal`` has no
+ restrictions in terms of range and precision. ``decimal128`` has a max value
+ of approximately ``10^6145`` and a min value of approximately ``-10^6145``,
+ and has a maximum of 34 bits of precision.
+
+- ``decimal128`` is able to accept signed ``NaN`` values, while ``BigDecimal``
+ is not. All signed ``NaN`` values that are deserialized into ``BigDecimal``
+ instances will be unsigned.
+
+- ``decimal128`` maintains trailing zeroes when serializing to and
+ deserializing from BSON. ``BigDecimal``, however, does not maintain trailing
+ zeroes and therefore using ``BigDecimal`` may result in a lack of precision.
+
+.. note::
+
+ In BSON 5.0, ``decimal128`` is deserialized into ``BigDecimal`` by
+ default. In order to have ``decimal128`` values in BSON documents
+ deserialized into ``BSON::Decimal128``, the ``mode: :bson`` option can be set
+ on ``from_bson``.
+
+``Symbol``
+----------
+
+The BSON specification defines a symbol type which allows round-tripping
+Ruby ``Symbol`` values (i.e., a Ruby ``Symbol``is encoded into a BSON symbol
+and a BSON symbol is decoded into a Ruby ``Symbol``). However, since most
+programming langauges do not have a native symbol type, to promote
+interoperabilty, MongoDB deprecated the BSON symbol type and encourages
+strings to be used instead.
+
+.. note::
+
+ In BSON, hash *keys* are always strings. Non-string values will be
+ stringified when used as hash keys:
+
+ .. code-block:: ruby
+
+ Hash.from_bson({foo: 'bar'}.to_bson)
+ # => {"foo"=>"bar"}
+
+ Hash.from_bson({1 => 2}.to_bson)
+ # => {"1"=>2}
+
+By default, the BSON library encodes ``Symbol`` hash values as strings and
+decodes BSON symbols into Ruby ``Symbol`` values:
+
+.. code-block:: ruby
+
+ {foo: :bar}.to_bson.to_s
+ # => "\x12\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00"
+
+ # 0x02 is the string type
+ Hash.from_bson(BSON::ByteBuffer.new("\x12\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00".force_encoding('BINARY')))
+ # => {"foo"=>"bar"}
+
+ # 0x0E is the symbol type
+ Hash.from_bson(BSON::ByteBuffer.new("\x12\x00\x00\x00\x0Efoo\x00\x04\x00\x00\x00bar\x00\x00".force_encoding('BINARY')))
+ # => {"foo"=>:bar}
+
+To force encoding of Ruby symbols to BSON symbols, wrap the Ruby symbols in
+``BSON::Symbol::Raw``:
+
+.. code-block:: ruby
+
+ {foo: BSON::Symbol::Raw.new(:bar)}.to_bson.to_s
+ # => "\x12\x00\x00\x00\x0Efoo\x00\x04\x00\x00\x00bar\x00\x00"
+
+JSON Serialization
+==================
+
+Some BSON types have special representations in JSON. These are as follows
+and will be automatically serialized in the form when calling ``to_json`` on
+them.
+
+.. list-table::
+ :header-rows: 1
+ :widths: 40 105
+
+ * - Object
+ - JSON
+
+ * - ``BSON::Binary``
+ - ``{ "$binary" : "\x01", "$type" : "md5" }``
+
+ * - ``BSON::Code``
+ - ``{ "$code" : "this.v = 5" }``
+
+ * - ``BSON::CodeWithScope``
+ - ``{ "$code" : "this.v = value", "$scope" : { v => 5 }}``
+
+ * - ``BSON::DBRef``
+ - ``{ "$ref" : "collection", "$id" : { "$oid" : "id" }, "$db" : "database" }``
+
+ * - ``BSON::MaxKey``
+ - ``{ "$maxKey" : 1 }``
+
+ * - ``BSON::MinKey``
+ - ``{ "$minKey" : 1 }``
+
+ * - ``BSON::ObjectId``
+ - ``{ "$oid" : "4e4d66343b39b68407000001" }``
+
+ * - ``BSON::Timestamp``
+ - ``{ "t" : 5, "i" : 30 }``
+
+ * - ``Regexp``
+ - ``{ "$regex" : "[abc]", "$options" : "i" }``
+
+
+Time Instances
+==============
+
+Times in Ruby can have nanosecond precision. Times in BSON (and MongoDB)
+can only have millisecond precision. When Ruby ``Time`` instances are
+serialized to BSON or Extended JSON, the times are floored to the nearest
+millisecond.
+
+.. note::
+
+ The time as always rounded down. If the time precedes the Unix epoch
+ (January 1, 1970 00:00:00 UTC), the absolute value of the time would
+ increase:
+
+ .. code-block:: ruby
+
+ time = Time.utc(1960, 1, 1, 0, 0, 0, 999_999)
+ time.to_f
+ # => -315619199.000001
+ time.floor(3).to_f
+ # => -315619199.001
+
+.. note::
+
+ JRuby as of version 9.2.11.0 `rounds pre-Unix epoch times up rather than
+ down `_. bson-ruby works around
+ this and correctly floors the times when serializing on JRuby.
+
+Because of this flooring, applications are strongly recommended to perform
+all time calculations using integer math, as inexactness of floating point
+calculations may produce unexpected results.
+
+
+DateTime Instances
+==================
+
+BSON only supports storing the time as the number of seconds since the
+Unix epoch. Ruby's ``DateTime`` instances can be serialized to BSON,
+but when the BSON is deserialized the times will be returned as
+``Time`` instances.
+
+``DateTime`` class in Ruby supports non-Gregorian calendars. When non-Gregorian
+``DateTime`` instances are serialized, they are first converted to Gregorian
+calendar, and the respective date in the Gregorian calendar is stored in the
+database.
+
+
+Date Instances
+==============
+
+BSON only supports storing the time as the number of seconds since the
+Unix epoch. Ruby's ``Date`` instances can be serialized to BSON, but when
+the BSON is deserialized the times will be returned as ``Time`` instances.
+
+When ``Date`` instances are serialized, the time value used is midnight
+of the day that the ``Date`` refers to in UTC.
+
+
+Regular Expressions
+===================
+
+Both MongoDB and Ruby provide facilities for working with regular expressions,
+but they use regular expression engines. The following subsections detail the
+differences between Ruby regular expressions and MongoDB regular expressions
+and describe how to work with both.
+
+Ruby vs MongoDB Regular Expressions
+-----------------------------------
+
+MongoDB server uses `Perl-compatible regular expressions implemented using
+the PCRE library `_ and `Ruby regular expressions
+`_ are implemented using the
+`Onigmo regular expression engine `_,
+which is a fork of `Oniguruma