diff --git a/NuGet.config b/NuGet.config
index 3b6333eeb72..f951f89147f 100644
--- a/NuGet.config
+++ b/NuGet.config
@@ -1,7 +1,7 @@
-
+
-
-
+
+
\ No newline at end of file
diff --git a/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc b/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc
deleted file mode 100644
index e03f8eb564a..00000000000
--- a/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc
+++ /dev/null
@@ -1,16 +0,0 @@
-To use the child aggregation you have to make sure
-a `_parent` mapping is in place, here we create the project
-index with two mapped types, `project` and `commitactivity` and
-we add a `_parent` mapping from `commitactivity` to `parent`
-
-[source, csharp]
-----
-var createProjectIndex = TestClient.GetClient().CreateIndex(typeof(Project), c => c
- .Mappings(map=>map
- .Map(m=>m.AutoMap())
- .Map(m=>m
- .Parent()
- )
- )
-);
-----
diff --git a/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc b/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc
deleted file mode 100644
index 7f2a8739c7d..00000000000
--- a/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc
+++ /dev/null
@@ -1,78 +0,0 @@
-Aggregations are arguably one of the most powerful features of Elasticsearch.
-NEST allows you to write your aggregations using a strict fluent dsl, a verbatim object initializer
-syntax that maps verbatim to the elasticsearch API
-a more terse object initializer aggregation DSL.
-
-Three different ways, yikes thats a lot to take in! Lets go over them one by one and explain when you might
-want to use which one.
-
-The fluent lambda syntax is the most terse way to write aggregations.
-It benefits from types that are carried over to sub aggregations
-
-[source, csharp]
-----
-s => s
-.Aggregations(aggs => aggs
- .Children("name_of_child_agg", child => child
- .Aggregations(childAggs => childAggs
- .Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor))
- .Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor))
- )
- )
-)
-----
-The object initializer syntax (OIS) is a one-to-one mapping with how aggregations
-have to be represented in the Elasticsearch API. While it has the benefit of being a one-to-one
-mapping, being dictionary based in C# means it can grow exponentially in complexity rather quickly.
-
-[source, csharp]
-----
-new SearchRequest
-{
- Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity))
- {
- Aggregations =
- new AverageAggregation("average_per_child", "confidenceFactor")
- && new MaxAggregation("max_per_child", "confidenceFactor")
- }
-}
-----
-For this reason the OIS syntax can be shortened dramatically by using `*Agg` related family,
-These allow you to forego introducing intermediary Dictionaries to represent the aggregation DSL.
-It also allows you to combine multiple aggregations using bitwise AND (`
-`) operator.
-
-Compare the following example with the previous vanilla OIS syntax
-
-[source, csharp]
-----
-new SearchRequest
-{
- Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity))
- {
- Aggregations =
- new AverageAggregation("average_per_child", Field(p => p.ConfidenceFactor))
- && new MaxAggregation("max_per_child", Field(p => p.ConfidenceFactor))
- }
-}
-----
-An advanced scenario may involve an existing collection of aggregation functions that should be set as aggregations
-on the request. Using LINQ's `.Aggregate()` method, each function can be applied to the aggregation descriptor
-(`childAggs` below) in turn, returning the descriptor after each function application.
-
-[source, csharp]
-----
-var aggregations = new List, IAggregationContainer>>
-{
- a => a.Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)),
- a => a.Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor))
-};
-return s => s
- .Aggregations(aggs => aggs
- .Children("name_of_child_agg", child => child
- .Aggregations(childAggs =>
- aggregations.Aggregate(childAggs, (acc, agg) => { agg(acc); return acc; })
- )
- )
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc
deleted file mode 100644
index 8d4ec9d1de7..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc
+++ /dev/null
@@ -1,161 +0,0 @@
-= Connection Pooling
-Connection pooling is the internal mechanism that takes care of registering what nodes there are in the cluster and which
-we can use to issue client calls on.
-
-
-== SingleNodeConnectionPool
-The simplest of all connection pools, this takes a single `Uri` and uses that to connect to elasticsearch for all the calls
-It doesn't opt in to sniffing and pinging behavior, and will never mark nodes dead or alive. The one `Uri` it holds is always
-ready to go.
-
-
-[source, csharp]
-----
-var uri = new Uri("http://localhost:9201");
-var pool = new SingleNodeConnectionPool(uri);
-pool.Nodes.Should().HaveCount(1);
-var node = pool.Nodes.First();
-node.Uri.Port.Should().Be(9201);
-----
-This type of pool is hardwired to opt out of sniffing
-
-[source, csharp]
-----
-pool.SupportsReseeding.Should().BeFalse();
-----
-and pinging
-
-[source, csharp]
-----
-pool.SupportsPinging.Should().BeFalse();
-----
-When you use the low ceremony ElasticClient constructor that takes a single Uri,
-We default to this SingleNodeConnectionPool
-
-[source, csharp]
-----
-var client = new ElasticClient(uri);
-----
-[source, csharp]
-----
-client.ConnectionSettings.ConnectionPool.Should().BeOfType();
-----
-However we urge that you always pass your connection settings explicitly
-
-[source, csharp]
-----
-client = new ElasticClient(new ConnectionSettings(uri));
-----
-[source, csharp]
-----
-client.ConnectionSettings.ConnectionPool.Should().BeOfType();
-----
-or even better pass the connection pool explicitly
-
-[source, csharp]
-----
-client = new ElasticClient(new ConnectionSettings(pool));
-----
-[source, csharp]
-----
-client.ConnectionSettings.ConnectionPool.Should().BeOfType();
-----
-== StaticConnectionPool
-The static connection pool is great if you have a known small sized cluster and do no want to enable
-sniffing to find out the cluster topology.
-
-
-[source, csharp]
-----
-var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p));
-----
-a connection pool can be seeded using an enumerable of `Uri`s
-
-[source, csharp]
-----
-var pool = new StaticConnectionPool(uris);
-----
-Or using an enumerable of `Node`
-
-[source, csharp]
-----
-var nodes = uris.Select(u=>new Node(u));
-----
-[source, csharp]
-----
-pool = new StaticConnectionPool(nodes);
-----
-This type of pool is hardwired to opt out of sniffing
-
-[source, csharp]
-----
-pool.SupportsReseeding.Should().BeFalse();
-----
-but supports pinging when enabled
-
-[source, csharp]
-----
-pool.SupportsPinging.Should().BeTrue();
-----
-To create a client using this static connection pool pass
-the connection pool to the connectionsettings you pass to ElasticClient
-
-[source, csharp]
-----
-var client = new ElasticClient(new ConnectionSettings(pool));
-----
-[source, csharp]
-----
-client.ConnectionSettings.ConnectionPool.Should().BeOfType();
-----
-== SniffingConnectionPool
-A subclass of StaticConnectionPool that allows itself to be reseeded at run time.
-It comes with a very minor overhead of a `ReaderWriterLockSlim` to ensure thread safety.
-
-
-[source, csharp]
-----
-var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p));
-----
-a connection pool can be seeded using an enumerable of `Uri`
-
-[source, csharp]
-----
-var pool = new SniffingConnectionPool(uris);
-----
-Or using an enumerable of `Node`
-A major benefit here is you can include known node roles when seeding
-NEST can use this information to favour sniffing on master eligible nodes first
-and take master only nodes out of rotation for issuing client calls on.
-
-[source, csharp]
-----
-var nodes = uris.Select(u=>new Node(u));
-----
-[source, csharp]
-----
-pool = new SniffingConnectionPool(nodes);
-----
-This type of pool is hardwired to opt in to sniffing
-
-[source, csharp]
-----
-pool.SupportsReseeding.Should().BeTrue();
-----
-and pinging
-
-[source, csharp]
-----
-pool.SupportsPinging.Should().BeTrue();
-----
-To create a client using the sniffing connection pool pass
-the connection pool to the connectionsettings you pass to ElasticClient
-
-[source, csharp]
-----
-var client = new ElasticClient(new ConnectionSettings(pool));
-----
-[source, csharp]
-----
-client.ConnectionSettings.ConnectionPool.Should().BeOfType();
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc
deleted file mode 100644
index 359ea30aefc..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc
+++ /dev/null
@@ -1,39 +0,0 @@
-= Transports
-
-The `ITransport` interface can be seen as the motor block of the client. It's interface is deceitfully simple.
-It's ultimately responsible from translating a client call to a response. If for some reason you do not agree with the way we wrote
-the internals of the client, by implementing a custom `ITransport`, you can circumvent all of it and introduce your own.
-
-
-
-Transport is generically typed to a type that implements IConnectionConfigurationValues
-This is the minimum ITransport needs to report back for the client to function.
-e.g in the low level client, transport is instantiated like this:
-
-[source, csharp]
-----
-var lowLevelTransport = new Transport(new ConnectionConfiguration());
-----
-In the high level client like this:
-
-[source, csharp]
-----
-var highlevelTransport = new Transport(new ConnectionSettings());
-----
-[source, csharp]
-----
-var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200"));
-var inMemoryTransport = new Transport(new ConnectionSettings(connectionPool, new InMemoryConnection()));
-----
-The only two methods on `ITransport` are `Request()` and `RequestAsync()`, the default `ITransport` implementation is responsible for introducing
-many of the building blocks in the client, if these do not work for you can swap them out for your own custom `ITransport` implementation.
-If you feel this need, please let us know as we'd love to learn why you've go down this route!
-
-[source, csharp]
-----
-var response = inMemoryTransport.Request>(HttpMethod.GET, "/_search", new { query = new { match_all = new { } } });
-----
-[source, csharp]
-----
-response = await inMemoryTransport.RequestAsync>(HttpMethod.GET, "/_search", new { query = new { match_all = new { } } });
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc
deleted file mode 100644
index 1dc333afec6..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc
+++ /dev/null
@@ -1,114 +0,0 @@
-== Unexpected exceptions
-When a client call throws an exception that the IConnction can not handle, this exception will bubble
-out the client as an UnexpectedElasticsearchClientException, regardless whether the client is configured to throw or not.
-An IConnection is in charge of knowning what exceptions it can recover from or not. The default IConnection that is based on WebRequest can and
-will recover from WebExceptions but others will be grounds for immediately exiting the pipeline.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.SucceedAlways())
- .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { AuditEvent.HealthyResponse, 9200 },
- }
- );
-audit = await audit.TraceUnexpectedException(
- new ClientCall {
- { AuditEvent.BadResponse, 9201 },
- },
- (e) =>
- {
- e.FailureReason.Should().Be(PipelineFailure.Unexpected);
- e.InnerException.Should().NotBeNull();
- e.InnerException.Message.Should().Be("boom!");
- }
- );
-e.FailureReason.Should().Be(PipelineFailure.Unexpected);
-e.InnerException.Should().NotBeNull();
-e.InnerException.Message.Should().Be("boom!");
-----
-
-Sometimes an unexpected exception happens further down in the pipeline, this is why we
-wrap them inside an UnexpectedElasticsearchClientException so that information about where
-in the pipeline the unexpected exception is not lost, here a call to 9200 fails using a webexception.
-It then falls over to 9201 which throws an hard exception from within IConnection. We assert that we
-can still see the audit trail for the whole coordinated request.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
-#if DOTNETCORE
- .ClientCalls(r => r.OnPort(9200).FailAlways(new System.Net.Http.HttpRequestException("recover")))
-#else
- .ClientCalls(r => r.OnPort(9200).FailAlways(new WebException("recover")))
-#endif
- .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceUnexpectedException(
- new ClientCall {
- { AuditEvent.BadResponse, 9200 },
- { AuditEvent.BadResponse, 9201 },
- },
- (e) =>
- {
- e.FailureReason.Should().Be(PipelineFailure.Unexpected);
- e.InnerException.Should().NotBeNull();
- e.InnerException.Message.Should().Be("boom!");
- }
- );
-e.FailureReason.Should().Be(PipelineFailure.Unexpected);
-e.InnerException.Should().NotBeNull();
-e.InnerException.Message.Should().Be("boom!");
-----
-
-An unexpected hard exception on ping and sniff is something we *do* try to revover from and failover.
-Here pinging nodes on first use is enabled and 9200 throws on ping, we still fallover to 9201's ping succeeds.
-However the client call on 9201 throws a hard exception we can not recover from
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Ping(r => r.OnPort(9200).FailAlways(new Exception("ping exception")))
- .Ping(r => r.OnPort(9201).SucceedAlways())
- .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
- .StaticConnectionPool()
- .AllDefaults()
- );
-----
-[source, csharp]
-----
-audit = await audit.TraceUnexpectedException(
- new ClientCall {
- { AuditEvent.PingFailure, 9200 },
- { AuditEvent.PingSuccess, 9201 },
- { AuditEvent.BadResponse, 9201 },
- },
- (e) =>
- {
- e.FailureReason.Should().Be(PipelineFailure.Unexpected);
-e.InnerException.Should().NotBeNull();
- e.InnerException.Message.Should().Be("boom!");
-e.SeenExceptions.Should().NotBeEmpty();
- var pipelineException = e.SeenExceptions.First();
- pipelineException.FailureReason.Should().Be(PipelineFailure.PingFailure);
- pipelineException.InnerException.Message.Should().Be("ping exception");
-var pingException = e.AuditTrail.First(a => a.Event == AuditEvent.PingFailure).Exception;
- pingException.Should().NotBeNull();
- pingException.Message.Should().Be("ping exception");
-
- }
-);
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc
deleted file mode 100644
index 7a2c0e52b85..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc
+++ /dev/null
@@ -1,44 +0,0 @@
-== Unrecoverable exceptions
-Unrecoverable exceptions are excepted exceptions that are grounds to exit the client pipeline immediately.
-By default the client won't throw on any ElasticsearchClientException but return an invalid response.
-You can configure the client to throw using ThrowExceptions() on ConnectionSettings. The following test
-both a client that throws and one that returns an invalid response with an `.OriginalException` exposed
-
-
-[source, csharp]
-----
-var recoverablExceptions = new[]
- {
- new PipelineException(PipelineFailure.BadResponse),
- new PipelineException(PipelineFailure.PingFailure),
- };
-recoverablExceptions.Should().OnlyContain(e => e.Recoverable);
-var unrecoverableExceptions = new[]
- {
- new PipelineException(PipelineFailure.CouldNotStartSniffOnStartup),
- new PipelineException(PipelineFailure.SniffFailure),
- new PipelineException(PipelineFailure.Unexpected),
- new PipelineException(PipelineFailure.BadAuthentication),
- new PipelineException(PipelineFailure.MaxRetriesReached),
- new PipelineException(PipelineFailure.MaxTimeoutReached)
- };
-unrecoverableExceptions.Should().OnlyContain(e => !e.Recoverable);
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Ping(r => r.SucceedAlways())
- .ClientCalls(r => r.FailAlways(401))
- .StaticConnectionPool()
- .AllDefaults()
- );
-audit = await audit.TraceElasticsearchException(
- new ClientCall {
- { AuditEvent.PingSuccess, 9200 },
- { AuditEvent.BadResponse, 9200 },
- },
- (e) =>
- {
- e.FailureReason.Should().Be(PipelineFailure.BadAuthentication);
- }
- );
-e.FailureReason.Should().Be(PipelineFailure.BadAuthentication);
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc
deleted file mode 100644
index f83117ff4d0..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc
+++ /dev/null
@@ -1,80 +0,0 @@
-== Fail over
-When using connection pooling and the pool has sufficient nodes a request will be retried if
-the call to a node throws an exception or returns a 502 or 503
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways())
- .ClientCalls(r => r.OnPort(9201).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { HealthyResponse, 9201 },
- }
- );
-----
-502 Bad Gateway
-Will be treated as an error that requires retrying
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways(502))
- .ClientCalls(r => r.OnPort(9201).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { HealthyResponse, 9201 },
- }
- );
-----
-503 Service Unavailable
-Will be treated as an error that requires retrying
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways(503))
- .ClientCalls(r => r.OnPort(9201).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { HealthyResponse, 9201 },
- }
- );
-----
-
-If a call returns a valid http status code other then 502/503 the request won't be retried.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways(418))
- .ClientCalls(r => r.OnPort(9201).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- }
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc
deleted file mode 100644
index 2aa8b032712..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc
+++ /dev/null
@@ -1,146 +0,0 @@
-== MaxRetries
-By default retry as many times as we have nodes. However retries still respect the request timeout.
-Meaning if you have a 100 node cluster and a request timeout of 20 seconds we will retry as many times as we can
-but give up after 20 seconds
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways())
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { BadResponse, 9202 },
- { BadResponse, 9203 },
- { BadResponse, 9204 },
- { BadResponse, 9205 },
- { BadResponse, 9206 },
- { BadResponse, 9207 },
- { BadResponse, 9208 },
- { HealthyResponse, 9209 }
- }
- );
-----
-
-When you have a 100 node cluster you might want to ensure a fixed number of retries.
-Remember that the actual number of requests is initial attempt + set number of retries
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways())
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing().MaximumRetries(3))
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { BadResponse, 9202 },
- { BadResponse, 9203 },
- { MaxRetriesReached }
- }
- );
-----
-
-In our previous test we simulated very fast failures, in the real world a call might take upwards of a second
-Here we simulate a particular heavy search that takes 10 seconds to fail, our Request timeout is set to 20 seconds.
-In this case it does not make sense to retry our 10 second query on 10 nodes. We should try it twice and give up before a third call is attempted
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10)))
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20)))
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { MaxTimeoutReached }
- }
- );
-----
-
-If you set smaller request time outs you might not want it to also affect the retry timeout, therefor you can configure these separately too.
-Here we simulate calls taking 3 seconds, a request time out of 2 and an overall retry timeout of 10 seconds.
-We should see 5 attempts to perform this query, testing that our request timeout cuts the query off short and that our max retry timeout of 10
-wins over the configured request timeout
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3)))
- .ClientCalls(r => r.OnPort(9209).FailAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10)))
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { BadResponse, 9202 },
- { BadResponse, 9203 },
- { BadResponse, 9204 },
- { MaxTimeoutReached }
- }
- );
-----
-
-If your retry policy expands beyond available nodes we won't retry the same node twice
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(2)
- .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3)))
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10)))
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { MaxRetriesReached }
- }
- );
-----
-
-This makes setting any retry setting on a single node connection pool a NOOP, this is by design!
-Connection pooling and connection failover is about trying to fail sanely whilst still utilizing available resources and
-not giving up on the fail fast principle. It's *NOT* a mechanism for forcing requests to succeed.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3)))
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .SingleNodeConnection()
- .Settings(s => s.DisablePing().MaximumRetries(10))
- );
-audit = await audit.TraceCall(
- new ClientCall {
- { BadResponse, 9200 }
- }
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc
deleted file mode 100644
index 6648607066a..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc
+++ /dev/null
@@ -1,128 +0,0 @@
-== Pinging
-
-Pinging is enabled by default for the Static & Sniffing connection pool.
-This means that the first time a node is used or resurrected we issue a ping with a smaller (configurable) timeout.
-This allows us to fail and fallover to a healthy node faster
-
-
-A cluster with 2 nodes where the second node fails on ping
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(2)
- .Ping(p => p.Succeeds(Always))
- .Ping(p => p.OnPort(9201).FailAlways())
- .StaticConnectionPool()
- .AllDefaults()
-);
-----
-[source, csharp]
-----
-await audit.TraceCalls(
-----
-The first call goes to 9200 which succeeds
-
-[source, csharp]
-----
-new ClientCall {
- { PingSuccess, 9200},
- { HealthyResponse, 9200},
- { pool =>
- {
- pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0);
- } }
- },
-----
-The 2nd call does a ping on 9201 because its used for the first time.
-It fails so we wrap over to node 9200 which we've already pinged
-
-[source, csharp]
-----
-new ClientCall {
- { PingFailure, 9201},
- { HealthyResponse, 9200},
-----
-Finally we assert that the connectionpool has one node that is marked as dead
-
-[source, csharp]
-----
-{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) }
- }
-);
-----
-A cluster with 4 nodes where the second and third pings fail
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(4)
- .Ping(p => p.SucceedAlways())
- .Ping(p => p.OnPort(9201).FailAlways())
- .Ping(p => p.OnPort(9202).FailAlways())
- .StaticConnectionPool()
- .AllDefaults()
-);
-----
-[source, csharp]
-----
-await audit.TraceCalls(
-----
-The first call goes to 9200 which succeeds
-
-[source, csharp]
-----
-new ClientCall {
- { PingSuccess, 9200},
- { HealthyResponse, 9200},
- { pool =>
- {
- pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0);
- } }
- },
-----
-The 2nd call does a ping on 9201 because its used for the first time.
-It fails and so we ping 9202 which also fails. We then ping 9203 becuase
-we haven't used it before and it succeeds
-
-[source, csharp]
-----
-new ClientCall {
- { PingFailure, 9201},
- { PingFailure, 9202},
- { PingSuccess, 9203},
- { HealthyResponse, 9203},
-----
-Finally we assert that the connectionpool has two nodes that are marked as dead
-
-[source, csharp]
-----
-{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
- }
-);
-----
-A healthy cluster of 4 (min master nodes of 3 of course!)
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(4)
- .Ping(p => p.SucceedAlways())
- .StaticConnectionPool()
- .AllDefaults()
-);
-----
-[source, csharp]
-----
-await audit.TraceCalls(
- new ClientCall { { PingSuccess, 9200}, { HealthyResponse, 9200} },
- new ClientCall { { PingSuccess, 9201}, { HealthyResponse, 9201} },
- new ClientCall { { PingSuccess, 9202}, { HealthyResponse, 9202} },
- new ClientCall { { PingSuccess, 9203}, { HealthyResponse, 9203} },
- new ClientCall { { HealthyResponse, 9200} },
- new ClientCall { { HealthyResponse, 9201} },
- new ClientCall { { HealthyResponse, 9202} },
- new ClientCall { { HealthyResponse, 9203} },
- new ClientCall { { HealthyResponse, 9200} }
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc
deleted file mode 100644
index 89c4e99284a..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc
+++ /dev/null
@@ -1,48 +0,0 @@
-== Pinging
-
-When a node is marked dead it will only be put in the dog house for a certain amount of time. Once it comes out of the dog house, or revived, we schedule a ping
-before the actual call to make sure its up and running. If its still down we put it back in the dog house a little longer. For an explanation on these timeouts see: TODO LINK
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(3)
- .ClientCalls(r => r.SucceedAlways())
- .ClientCalls(r => r.OnPort(9202).Fails(Once))
- .Ping(p => p.SucceedAlways())
- .StaticConnectionPool()
- .AllDefaults()
- );
-audit = await audit.TraceCalls(
- new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } },
- new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } },
- new ClientCall {
- { PingSuccess, 9202},
- { BadResponse, 9202},
- { HealthyResponse, 9200},
- { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) }
- },
- new ClientCall { { HealthyResponse, 9201 } },
- new ClientCall { { HealthyResponse, 9200 } },
- new ClientCall { { HealthyResponse, 9201 } },
- new ClientCall {
- { HealthyResponse, 9200 },
- { pool => pool.Nodes.First(n=>!n.IsAlive).DeadUntil.Should().BeAfter(DateTime.UtcNow) }
- }
- );
-audit = await audit.TraceCalls(
- new ClientCall { { HealthyResponse, 9201 } },
- new ClientCall { { HealthyResponse, 9200 } },
- new ClientCall { { HealthyResponse, 9201 } }
- );
-audit.ChangeTime(d => d.AddMinutes(20));
-audit = await audit.TraceCalls(
- new ClientCall { { HealthyResponse, 9201 } },
- new ClientCall {
- { Resurrection, 9202 },
- { PingSuccess, 9202 },
- { HealthyResponse, 9202 }
- }
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc
deleted file mode 100644
index ce2f5afac55..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc
+++ /dev/null
@@ -1,68 +0,0 @@
-== MaxRetries
-By default retry as many times as we have nodes. However retries still respect the request timeout.
-Meaning if you have a 100 node cluster and a request timeout of 20 seconds we will retry as many times as we can
-but give up after 20 seconds
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways())
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing())
- );
-audit = await audit.TraceCall(
- new ClientCall(r => r.MaxRetries(2)) {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { BadResponse, 9202 },
- { MaxRetriesReached }
- }
- );
-----
-
-When you have a 100 node cluster you might want to ensure a fixed number of retries.
-Remember that the actual number of requests is initial attempt + set number of retries
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways())
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .StaticConnectionPool()
- .Settings(s => s.DisablePing().MaximumRetries(5))
- );
-audit = await audit.TraceCall(
- new ClientCall(r => r.MaxRetries(2)) {
- { BadResponse, 9200 },
- { BadResponse, 9201 },
- { BadResponse, 9202 },
- { MaxRetriesReached }
- }
- );
-----
-
-This makes setting any retry setting on a single node connection pool a NOOP, this is by design!
-Connection pooling and connection failover is about trying to fail sanely whilst still utilizing available resources and
-not giving up on the fail fast principle. It's *NOT* a mechanism for forcing requests to succeed.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3)))
- .ClientCalls(r => r.OnPort(9209).SucceedAlways())
- .SingleNodeConnection()
- .Settings(s => s.DisablePing().MaximumRetries(10))
- );
-audit = await audit.TraceCall(
- new ClientCall(r => r.MaxRetries(10)) {
- { BadResponse, 9200 }
- }
- );
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc
deleted file mode 100644
index ef2b68cd4cc..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc
+++ /dev/null
@@ -1,175 +0,0 @@
-== Sniffing on connection failure
-Sniffing on connection is enabled by default when using a connection pool that allows reseeding.
-The only IConnectionPool we ship that allows this is the SniffingConnectionPool.
-
-This can be very handy to force a refresh of the pools known healthy node by inspecting elasticsearch itself.
-A sniff tries to get the nodes by asking each currently known node until one response.
-
-
-Here we seed our connection with 5 known nodes 9200-9204 of which we think
-9202, 9203, 9204 are master eligible nodes. Our virtualized cluster will throw once when doing
-a search on 9201. This should a sniff to be kicked off.
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(5)
- .MasterEligible(9202, 9203, 9204)
- .ClientCalls(r => r.SucceedAlways())
- .ClientCalls(r => r.OnPort(9201).Fails(Once))
-----
-When the call fails on 9201 the sniff succeeds and returns a new cluster of healty nodes
-this cluster only has 3 nodes and the known masters are 9200 and 9202 but a search on 9201
-still fails once
-
-[source, csharp]
-----
-.Sniff(p => p.SucceedAlways(Framework.Cluster
- .Nodes(3)
- .MasterEligible(9200, 9202)
- .ClientCalls(r => r.OnPort(9201).Fails(Once))
-----
-After this second failure on 9201 another sniff will be returned a cluster that no
-longer fails but looks completely different (9210-9212) we should be able to handle this
-
-[source, csharp]
-----
-.Sniff(s => s.SucceedAlways(Framework.Cluster
- .Nodes(3, 9210)
- .MasterEligible(9210, 9212)
- .ClientCalls(r => r.SucceedAlways())
- .Sniff(r => r.SucceedAlways())
- ))
- ))
- .SniffingConnectionPool()
- .Settings(s => s.DisablePing().SniffOnStartup(false))
-);
-----
-[source, csharp]
-----
-audit = await audit.TraceCalls(
-----
-
-
-[source, csharp]
-----
-new ClientCall {
- { HealthyResponse, 9200 },
- { pool => pool.Nodes.Count.Should().Be(5) }
- },
- new ClientCall {
- { BadResponse, 9201},
-----
-We assert we do a sniff on our first known master node 9202
-
-[source, csharp]
-----
-{ SniffOnFail },
- { SniffSuccess, 9202},
- { HealthyResponse, 9200},
-----
-Our pool should now have three nodes
-
-[source, csharp]
-----
-{ pool => pool.Nodes.Count.Should().Be(3) }
- },
- new ClientCall {
- { BadResponse, 9201},
-----
-We assert we do a sniff on the first master node in our updated cluster
-
-[source, csharp]
-----
-{ SniffOnFail },
- { SniffSuccess, 9200},
- { HealthyResponse, 9210},
- { pool => pool.Nodes.Count.Should().Be(3) }
- },
- new ClientCall { { HealthyResponse, 9211 } },
- new ClientCall { { HealthyResponse, 9212 } },
- new ClientCall { { HealthyResponse, 9210 } },
- new ClientCall { { HealthyResponse, 9211 } },
- new ClientCall { { HealthyResponse, 9212 } },
- new ClientCall { { HealthyResponse, 9210 } },
- new ClientCall { { HealthyResponse, 9211 } },
- new ClientCall { { HealthyResponse, 9212 } },
- new ClientCall { { HealthyResponse, 9210 } }
-);
-----
-Here we set up our cluster exactly the same as the previous setup
-Only we enable pinging (default is true) and make the ping fail
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(5)
- .MasterEligible(9202, 9203, 9204)
- .Ping(r => r.OnPort(9201).Fails(Once))
- .Sniff(p => p.SucceedAlways(Framework.Cluster
- .Nodes(3)
- .MasterEligible(9200, 9202)
- .Ping(r => r.OnPort(9201).Fails(Once))
- .Sniff(s => s.SucceedAlways(Framework.Cluster
- .Nodes(3, 9210)
- .MasterEligible(9210, 9211)
- .Ping(r => r.SucceedAlways())
- .Sniff(r => r.SucceedAlways())
- ))
- ))
- .SniffingConnectionPool()
- .Settings(s => s.SniffOnStartup(false))
-);
-----
-[source, csharp]
-----
-audit = await audit.TraceCalls(
- new ClientCall {
- { PingSuccess, 9200 },
- { HealthyResponse, 9200 },
- { pool => pool.Nodes.Count.Should().Be(5) }
- },
- new ClientCall {
- { PingFailure, 9201},
-----
-We assert we do a sniff on our first known master node 9202
-
-[source, csharp]
-----
-{ SniffOnFail },
- { SniffSuccess, 9202},
- { PingSuccess, 9200},
- { HealthyResponse, 9200},
-----
-Our pool should now have three nodes
-
-[source, csharp]
-----
-{ pool => pool.Nodes.Count.Should().Be(3) }
- },
- new ClientCall {
- { PingFailure, 9201},
-----
-We assert we do a sniff on the first master node in our updated cluster
-
-[source, csharp]
-----
-{ SniffOnFail },
- { SniffSuccess, 9200},
- { PingSuccess, 9210},
- { HealthyResponse, 9210},
- { pool => pool.Nodes.Count.Should().Be(3) }
- },
- new ClientCall { { PingSuccess, 9211 }, { HealthyResponse, 9211 } },
- new ClientCall { { PingSuccess, 9212 }, { HealthyResponse, 9212 } },
-----
-9210 was already pinged after the sniff returned the new nodes
-
-[source, csharp]
-----
-new ClientCall { { HealthyResponse, 9210 } },
- new ClientCall { { HealthyResponse, 9211 } },
- new ClientCall { { HealthyResponse, 9212 } },
- new ClientCall { { HealthyResponse, 9210 } }
-);
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc
deleted file mode 100644
index d20b01abde4..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc
+++ /dev/null
@@ -1,97 +0,0 @@
-== Sniffing periodically
-
-Connection pools that return true for `SupportsReseeding` can be configured to sniff periodically.
-In addition to sniffing on startup and sniffing on failures, sniffing periodically can benefit scenerio's where
-clusters are often scaled horizontally during peak hours. An application might have a healthy view of a subset of the nodes
-but without sniffing periodically it will never find the nodes that have been added to help out with load
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .MasterEligible(9202, 9203, 9204)
- .ClientCalls(r => r.SucceedAlways())
- .Sniff(s => s.SucceedAlways(Framework.Cluster
- .Nodes(100)
- .MasterEligible(9202, 9203, 9204)
- .ClientCalls(r => r.SucceedAlways())
- .Sniff(ss => ss.SucceedAlways(Framework.Cluster
- .Nodes(10)
- .MasterEligible(9202, 9203, 9204)
- .ClientCalls(r => r.SucceedAlways())
- ))
- ))
- .SniffingConnectionPool()
- .Settings(s => s
- .DisablePing()
- .SniffOnConnectionFault(false)
- .SniffOnStartup(false)
- .SniffLifeSpan(TimeSpan.FromMinutes(30))
- )
- );
-----
-healty cluster all nodes return healthy responses
-
-[source, csharp]
-----
-audit = await audit.TraceCalls(
- new ClientCall { { HealthyResponse, 9200 } },
- new ClientCall { { HealthyResponse, 9201 } },
- new ClientCall { { HealthyResponse, 9202 } },
- new ClientCall { { HealthyResponse, 9203 } },
- new ClientCall { { HealthyResponse, 9204 } },
- new ClientCall { { HealthyResponse, 9205 } },
- new ClientCall { { HealthyResponse, 9206 } },
- new ClientCall { { HealthyResponse, 9207 } },
- new ClientCall { { HealthyResponse, 9208 } },
- new ClientCall { { HealthyResponse, 9209 } },
- new ClientCall {
- { HealthyResponse, 9200 },
- { pool => pool.Nodes.Count.Should().Be(10) }
- }
-);
-----
-Now let's forward the clock 31 minutes, our sniff lifespan should now go state
-and the first call should do a sniff which discovered we scaled up to a 100 nodes!
-
-[source, csharp]
-----
-audit.ChangeTime(d => d.AddMinutes(31));
-----
-[source, csharp]
-----
-audit = await audit.TraceCalls(
- new ClientCall {
-----
-a sniff is done first and it prefers the first node master node
-
-[source, csharp]
-----
-{ SniffOnStaleCluster },
- { SniffSuccess, 9202 },
- { HealthyResponse, 9201 },
- { pool => pool.Nodes.Count.Should().Be(100) }
- }
-);
-----
-[source, csharp]
-----
-audit.ChangeTime(d => d.AddMinutes(31));
-----
-[source, csharp]
-----
-audit = await audit.TraceCalls(
- new ClientCall {
-----
-a sniff is done first and it prefers the first node master node
-
-[source, csharp]
-----
-{ SniffOnStaleCluster },
- { SniffSuccess, 9202 },
- { HealthyResponse, 9200 },
- { pool => pool.Nodes.Count.Should().Be(10) }
- }
-);
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc
deleted file mode 100644
index 1f27d68c313..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc
+++ /dev/null
@@ -1,119 +0,0 @@
-== Sniffing on startup
-
-Connection pools that return true for `SupportsReseeding` by default sniff on startup.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202).Succeeds(Always))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCall(new ClientCall
- {
- { SniffOnStartup},
- { SniffFailure, 9200},
- { SniffFailure, 9201},
- { SniffSuccess, 9202},
- { PingSuccess , 9200},
- { HealthyResponse, 9200}
- });
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202).Succeeds(Always))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCalls(
- new ClientCall
- {
- { SniffOnStartup},
- { SniffFailure, 9200},
- { SniffFailure, 9201},
- { SniffSuccess, 9202},
- { PingSuccess , 9200},
- { HealthyResponse, 9200}
- },
- new ClientCall
- {
- { PingSuccess, 9201},
- { HealthyResponse, 9201}
- }
- );
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202).Succeeds(Always, Framework.Cluster.Nodes(8, startFrom: 9204)))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCall(new ClientCall {
- { SniffOnStartup},
- { SniffFailure, 9200},
- { SniffFailure, 9201},
- { SniffSuccess, 9202},
- { PingSuccess, 9204},
- { HealthyResponse, 9204}
- });
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9209).Succeeds(Always))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCall(new ClientCall {
- { SniffOnStartup},
- { SniffFailure, 9200},
- { SniffFailure, 9201},
- { SniffFailure, 9202},
- { SniffFailure, 9203},
- { SniffFailure, 9204},
- { SniffFailure, 9205},
- { SniffFailure, 9206},
- { SniffFailure, 9207},
- { SniffFailure, 9208},
- { SniffSuccess, 9209},
- { PingSuccess, 9200},
- { HealthyResponse, 9200}
- });
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(new[] {
- new Node(new Uri("http://localhost:9200")) { MasterEligible = false },
- new Node(new Uri("http://localhost:9201")) { MasterEligible = false },
- new Node(new Uri("http://localhost:9202")) { MasterEligible = true },
- })
- .Sniff(s => s.Succeeds(Always))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCall(new ClientCall {
- { SniffOnStartup},
- { SniffSuccess, 9202},
- { PingSuccess, 9200},
- { HealthyResponse, 9200}
- });
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(new[] {
- new Node(new Uri("http://localhost:9200")) { MasterEligible = true },
- new Node(new Uri("http://localhost:9201")) { MasterEligible = true },
- new Node(new Uri("http://localhost:9202")) { MasterEligible = false },
- })
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202).Succeeds(Always))
- .SniffingConnectionPool()
- .AllDefaults()
- );
-await audit.TraceCall(new ClientCall {
- { SniffOnStartup},
- { SniffFailure, 9200},
- { SniffFailure, 9201},
- { SniffSuccess, 9202},
- { PingSuccess, 9200},
- { HealthyResponse, 9200}
- });
-----
diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc
deleted file mode 100644
index 576d9500d74..00000000000
--- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc
+++ /dev/null
@@ -1,121 +0,0 @@
-== Sniffing role detection
-
-When we sniff the custer state we detect the role of the node whether its master eligible and holds data
-We use this information when selecting a node to perform an API call on.
-
-
-[source, csharp]
-----
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202)
- .Succeeds(Always, Framework.Cluster.Nodes(8).MasterEligible(9200, 9201, 9202))
- )
- .SniffingConnectionPool()
- .AllDefaults()
- )
- {
- AssertPoolBeforeCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(10);
- pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(10);
- },
- AssertPoolAfterCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(8);
- pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(3);
- }
- };
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(10);
-pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(10);
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(8);
-pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(3);
-await audit.TraceStartup();
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.Fails(Always))
- .Sniff(s => s.OnPort(9202)
- .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202))
- )
- .SniffingConnectionPool()
- .AllDefaults()
- )
- {
- AssertPoolBeforeCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(10);
- pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10);
- },
-
- AssertPoolAfterCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(8);
- pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5);
- }
- };
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(10);
-pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10);
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(8);
-pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5);
-await audit.TraceStartup();
-var audit = new Auditor(() => Framework.Cluster
- .Nodes(10)
- .Sniff(s => s.SucceedAlways()
- .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202).SniffShouldReturnFqdn())
- )
- .SniffingConnectionPool()
- .AllDefaults()
- )
- {
- AssertPoolBeforeCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(10);
- pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10);
- pool.Nodes.Should().OnlyContain(n => n.Uri.Host == "localhost");
- },
-
- AssertPoolAfterCall = (pool) =>
- {
- pool.Should().NotBeNull();
- pool.Nodes.Should().HaveCount(8);
- pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5);
- pool.Nodes.Should().OnlyContain(n => n.Uri.Host.StartsWith("fqdn") && !n.Uri.Host.Contains("/"));
- }
- };
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(10);
-pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10);
-pool.Nodes.Should().OnlyContain(n => n.Uri.Host == "localhost");
-pool.Should().NotBeNull();
-pool.Nodes.Should().HaveCount(8);
-pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5);
-pool.Nodes.Should().OnlyContain(n => n.Uri.Host.StartsWith("fqdn") && !n.Uri.Host.Contains("/"));
-await audit.TraceStartup();
-var node = SniffAndReturnNode();
-node.MasterEligible.Should().BeTrue();
-node.HoldsData.Should().BeFalse();
-node = await SniffAndReturnNodeAsync();
-node.MasterEligible.Should().BeTrue();
-node.HoldsData.Should().BeFalse();
-var pipeline = CreatePipeline();
-pipeline.Sniff();
-var pipeline = CreatePipeline();
-await pipeline.SniffAsync();
-this._settings =
- this._cluster.Client(u => new SniffingConnectionPool(new[] {u}), c => c.PrettyJson()).ConnectionSettings;
-var pipeline = new RequestPipeline(this._settings, DateTimeProvider.Default, new MemoryStreamFactory(),
- new SearchRequestParameters());
-var nodes = this._settings.ConnectionPool.Nodes;
-nodes.Should().NotBeEmpty().And.HaveCount(1);
-var node = nodes.First();
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc
deleted file mode 100644
index 22a2862b8ef..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc
+++ /dev/null
@@ -1,104 +0,0 @@
-# DocumentPaths
-Many API's in elasticsearch describe a path to a document. In NEST besides generating a constructor that takes
-and Index, Type and Id seperately we also generate a constructor taking a DocumentPath that allows you to describe the path
-to your document more succintly
-
-Manually newing
-
-here we create a new document path based on Project with the id 1
-
-[source, csharp]
-----
-IDocumentPath path = new DocumentPath(1);
-----
-[source, csharp]
-----
-Expect("project").WhenSerializing(path.Index);
-Expect("project").WhenSerializing(path.Type);
-Expect(1).WhenSerializing(path.Id);
-----
-You can still override the inferred index and type name
-
-[source, csharp]
-----
-path = new DocumentPath(1).Type("project1");
-----
-[source, csharp]
-----
-Expect("project1").WhenSerializing(path.Type);
-path = new DocumentPath(1).Index("project1");
-Expect("project1").WhenSerializing(path.Index);
-----
-there is also a static way to describe such paths
-
-[source, csharp]
-----
-path = DocumentPath.Id(1);
-----
-[source, csharp]
-----
-Expect("project").WhenSerializing(path.Index);
-Expect("project").WhenSerializing(path.Type);
-Expect(1).WhenSerializing(path.Id);
-var project = new Project { Name = "hello-world" };
-----
-here we create a new document path based on a Project
-
-[source, csharp]
-----
-IDocumentPath path = new DocumentPath(project);
-----
-[source, csharp]
-----
-Expect("project").WhenSerializing(path.Index);
-Expect("project").WhenSerializing(path.Type);
-Expect("hello-world").WhenSerializing(path.Id);
-----
-You can still override the inferred index and type name
-
-[source, csharp]
-----
-path = new DocumentPath(project).Type("project1");
-----
-[source, csharp]
-----
-Expect("project1").WhenSerializing(path.Type);
-path = new DocumentPath(project).Index("project1");
-Expect("project1").WhenSerializing(path.Index);
-----
-there is also a static way to describe such paths
-
-[source, csharp]
-----
-path = DocumentPath.Id(project);
-----
-[source, csharp]
-----
-Expect("project").WhenSerializing(path.Index);
-Expect("project").WhenSerializing(path.Type);
-Expect("hello-world").WhenSerializing(path.Id);
-DocumentPath p = project;
-var project = new Project { Name = "hello-world" };
-----
-Here we can see and example how DocumentPath helps your describe your requests more tersely
-
-[source, csharp]
-----
-var request = new IndexRequest(2) { Document = project };
-----
-[source, csharp]
-----
-request = new IndexRequest(project) { };
-----
-when comparing with the full blown constructor and passing document manually
-DocumentPath
-T
-'s benefits become apparent.
-
-[source, csharp]
-----
-request = new IndexRequest(IndexName.From(), TypeName.From(), 2)
-{
- Document = project
-};
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc
deleted file mode 100644
index 8701d88bf1d..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc
+++ /dev/null
@@ -1,429 +0,0 @@
-# Strongly typed field access
-
-Several places in the elasticsearch API expect the path to a field from your original source document as a string.
-NEST allows you to use C# expressions to strongly type these field path strings.
-
-These expressions are assigned to a type called `Field` and there are several ways to create a instance of that type
-
-
-Using the constructor directly is possible but rather involved
-
-[source, csharp]
-----
-var fieldString = new Field { Name = "name" };
-----
-especially when using C# expressions since these can not be simply new'ed
-
-[source, csharp]
-----
-Expression> expression = p => p.Name;
-----
-[source, csharp]
-----
-var fieldExpression = Field.Create(expression);
-Expect("name")
- .WhenSerializing(fieldExpression)
- .WhenSerializing(fieldString);
-----
-Therefore you can also implicitly convert strings and expressions to Field's
-
-[source, csharp]
-----
-Field fieldString = "name";
-----
-but for expressions this is still rather involved
-
-[source, csharp]
-----
-Expression> expression = p => p.Name;
-----
-[source, csharp]
-----
-Field fieldExpression = expression;
-Expect("name")
- .WhenSerializing(fieldExpression)
- .WhenSerializing(fieldString);
-----
-to ease creating Field's from expressions there is a static Property class you can use
-
-[source, csharp]
-----
-Field fieldString = "name";
-----
-but for expressions this is still rather involved
-
-[source, csharp]
-----
-var fieldExpression = Infer.Field(p => p.Name);
-----
-Using static imports in c# 6 this can be even shortened:
-using static Nest.Static;
-
-[source, csharp]
-----
-fieldExpression = Field(p => p.Name);
-----
-Now this is much much terser then our first example using the constructor!
-
-[source, csharp]
-----
-Expect("name")
- .WhenSerializing(fieldString)
- .WhenSerializing(fieldExpression);
-----
-By default NEST will camelCase all the field names to be more javascripty
-
-using DefaultFieldNameInferrer() on ConnectionSettings you can change this behavior
-
-[source, csharp]
-----
-var setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p.ToUpper()));
-----
-[source, csharp]
-----
-setup.Expect("NAME").WhenSerializing(Field(p => p.Name));
-----
-However string are *always* passed along verbatim
-
-[source, csharp]
-----
-setup.Expect("NaMe").WhenSerializing("NaMe");
-----
-if you want the same behavior for expressions simply do nothing in the default inferrer
-
-[source, csharp]
-----
-setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p));
-----
-[source, csharp]
-----
-setup.Expect("Name").WhenSerializing(Field(p => p.Name));
-----
-Complex field name expressions
-
-You can follow your property expression to any depth, here we are traversing to the LeadDeveloper's (Person) FirstName
-
-[source, csharp]
-----
-Expect("leadDeveloper.firstName").WhenSerializing(Field(p => p.LeadDeveloper.FirstName));
-----
-When dealing with collection index access is ingnored allowing you to traverse into properties of collections
-
-[source, csharp]
-----
-Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags[0]));
-----
-Similarly .First() also works, remember these are expressions and not actual code that will be executed
-
-[source, csharp]
-----
-Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags.First()));
-----
-[source, csharp]
-----
-Expect("curatedTags.added").WhenSerializing(Field(p => p.CuratedTags[0].Added));
-Expect("curatedTags.name").WhenSerializing(Field(p => p.CuratedTags.First().Name));
-----
-When we see an indexer on a dictionary we assume they describe property names
-
-[source, csharp]
-----
-Expect("metadata.hardcoded").WhenSerializing(Field(p => p.Metadata["hardcoded"]));
-----
-[source, csharp]
-----
-Expect("metadata.hardcoded.created").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created));
-----
-A cool feature here is that we'll evaluate variables passed to these indexers
-
-[source, csharp]
-----
-var variable = "var";
-----
-[source, csharp]
-----
-Expect("metadata.var").WhenSerializing(Field(p => p.Metadata[variable]));
-Expect("metadata.var.created").WhenSerializing(Field(p => p.Metadata[variable].Created));
-----
-If you are using elasticearch's multifield mapping (you really should!) these "virtual" sub fields
-do not always map back on to your POCO, by calling .Suffix() you describe the sub fields that do not live in your c# objects
-
-[source, csharp]
-----
-Expect("leadDeveloper.firstName.raw").WhenSerializing(Field(p => p.LeadDeveloper.FirstName.Suffix("raw")));
-----
-[source, csharp]
-----
-Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags[0].Suffix("raw")));
-Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags.First().Suffix("raw")));
-Expect("curatedTags.added.raw").WhenSerializing(Field(p => p.CuratedTags[0].Added.Suffix("raw")));
-Expect("metadata.hardcoded.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Suffix("raw")));
-Expect("metadata.hardcoded.created.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created.Suffix("raw")));
-----
-You can even chain them to any depth!
-
-[source, csharp]
-----
-Expect("curatedTags.name.raw.evendeeper").WhenSerializing(Field(p => p.CuratedTags.First().Name.Suffix("raw").Suffix("evendeeper")));
-----
-Variables passed to suffix will be evaluated as well
-
-[source, csharp]
-----
-var suffix = "unanalyzed";
-----
-[source, csharp]
-----
-Expect("metadata.var.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Suffix(suffix)));
-Expect("metadata.var.created.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Created.Suffix(suffix)));
-----
-
-Suffixes can be appended to expressions. This is useful in cases where you want to apply the same suffix
-to a list of fields
-
-
-
-
-[source, csharp]
-----
-var expressions = new List>>
-{
- p => p.Name,
- p => p.Description,
- p => p.CuratedTags.First().Name,
- p => p.LeadDeveloper.FirstName
-};
-----
-append the suffix "raw" to each expression
-
-[source, csharp]
-----
-var fieldExpressions =
- expressions.Select>, Field>(e => e.AppendSuffix("raw")).ToList();
-----
-[source, csharp]
-----
-Expect("name.raw").WhenSerializing(fieldExpressions[0]);
-Expect("description.raw").WhenSerializing(fieldExpressions[1]);
-Expect("curatedTags.name.raw").WhenSerializing(fieldExpressions[2]);
-Expect("leadDeveloper.firstName.raw").WhenSerializing(fieldExpressions[3]);
-----
-Annotations
-
-When using NEST's property attributes you can specify a new name for the properties
-
-[source, csharp]
-----
-public class BuiltIn
-{
- [String(Name = "naam")]
- public string Name { get; set; }
-}
-----
-[source, csharp]
-----
-Expect("naam").WhenSerializing(Field(p => p.Name));
-----
-
-Starting with NEST 2.x we also ask the serializer if it can resolve the property to a name.
-Here we ask the default JsonNetSerializer and it takes JsonProperty into account
-
-[source, csharp]
-----
-public class SerializerSpecific
-{
- [JsonProperty("nameInJson")]
- public string Name { get; set; }
-}
-----
-[source, csharp]
-----
-Expect("nameInJson").WhenSerializing(Field(p => p.Name));
-----
-
-If both are specified NEST takes precedence though
-
-[source, csharp]
-----
-public class Both
-{
- [String(Name = "naam")]
- [JsonProperty("nameInJson")]
- public string Name { get; set; }
-}
-----
-[source, csharp]
-----
-Expect("naam").WhenSerializing(Field(p => p.Name));
-Expect(new
- {
- naam = "Martijn Laarman"
- }).WhenSerializing(new Both { Name = "Martijn Laarman" });
-----
-[source, csharp]
-----
-class A { public C C { get; set; } }
-----
-[source, csharp]
-----
-class B { public C C { get; set; } }
-----
-[source, csharp]
-----
-class C
-{
- public string Name { get; set; }
-}
-----
-
-Resolving field names is cached but this is per connection settings
-
-
-[source, csharp]
-----
-var connectionSettings = TestClient.CreateSettings(forceInMemory: true);
-var client = new ElasticClient(connectionSettings);
-var fieldNameOnA = client.Infer.Field(Field(p => p.C.Name));
-var fieldNameOnB = client.Infer.Field(Field(p => p.C.Name));
-----
-Here we have to similary shaped expressions on coming from A and on from B
-that will resolve to the same field name, as expected
-
-[source, csharp]
-----
-fieldNameOnA.Should().Be("c.name");
-----
-[source, csharp]
-----
-fieldNameOnB.Should().Be("c.name");
-----
-now we create a new connectionsettings with a remap for C on class A to `d`
-now when we resolve the field path for A will be different
-
-[source, csharp]
-----
-var newConnectionSettings = TestClient.CreateSettings(forceInMemory: true, modifySettings: s => s
- .InferMappingFor(m => m
- .Rename(p => p.C, "d")
- )
-);
-----
-[source, csharp]
-----
-var newClient = new ElasticClient(newConnectionSettings);
-fieldNameOnA = newClient.Infer.Field(Field(p => p.C.Name));
-fieldNameOnB = newClient.Infer.Field(Field(p => p.C.Name));
-fieldNameOnA.Should().Be("d.name");
-fieldNameOnB.Should().Be("c.name");
-----
-however we didn't break inferrence on the first client instance using its separate connectionsettings
-
-[source, csharp]
-----
-fieldNameOnA = client.Infer.Field(Field(p => p.C.Name));
-----
-[source, csharp]
-----
-fieldNameOnB = client.Infer.Field(Field(p => p.C.Name));
-fieldNameOnA.Should().Be("c.name");
-fieldNameOnB.Should().Be("c.name");
-----
-To wrap up lets showcase the precedence that field names are inferred
-1. A hard rename of the property on connection settings using Rename()
-2. A NEST property mapping
-3. Ask the serializer if the property has a verbatim value e.g it has an explicit JsonPropery attribute.
-4. Pass the MemberInfo's Name to the DefaultFieldNameInferrer which by default camelCases
-In the following example we have a class where each case wins
-
-[source, csharp]
-----
-class Precedence
-{
-----
-Eventhough this property has a NEST property mapping and a JsonProperty attribute
-We are going to provide a hard rename for it on ConnectionSettings later that should win.
-
-[source, csharp]
-----
-[String(Name = "renamedIgnoresNest")]
- [JsonProperty("renamedIgnoresJsonProperty")]
- public string RenamedOnConnectionSettings { get; set; }
-----
-This property has both a NEST attribute and a JsonProperty, NEST should win.
-
-[source, csharp]
-----
-[String(Name = "nestAtt")]
- [JsonProperty("jsonProp")]
- public string NestAttribute { get; set; }
-----
-We should take the json property into account by itself
-
-[source, csharp]
-----
-[JsonProperty("jsonProp")]
- public string JsonProperty { get; set; }
-----
-This property we are going to special case in our custom serializer to resolve to `ask`
-
-[source, csharp]
-----
-[JsonProperty("dontaskme")]
- public string AskSerializer { get; set; }
-----
-We are going to register a DefaultFieldNameInferrer on ConnectionSettings
-that will uppercase all properties.
-
-[source, csharp]
-----
-public string DefaultFieldNameInferrer { get; set; }
-
-}
-----
-[source, csharp]
-----
-var usingSettings = WithConnectionSettings(s => s
-----
-here we provide an explicit rename of a property on connectionsettings
-
-[source, csharp]
-----
-.InferMappingFor(m => m
- .Rename(p => p.RenamedOnConnectionSettings, "renamed")
- )
-----
-All properties that are not mapped verbatim should be uppercased
-
-[source, csharp]
-----
-.DefaultFieldNameInferrer(p => p.ToUpperInvariant())
-).WithSerializer(s => new CustomSerializer(s));
-----
-[source, csharp]
-----
-usingSettings.Expect("renamed").ForField(Field(p => p.RenamedOnConnectionSettings));
-usingSettings.Expect("nestAtt").ForField(Field(p => p.NestAttribute));
-usingSettings.Expect("jsonProp").ForField(Field(p => p.JsonProperty));
-usingSettings.Expect("ask").ForField(Field(p => p.AskSerializer));
-usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer));
-----
-The same rules apply when indexing an object
-
-[source, csharp]
-----
-usingSettings.Expect(new []
-{
- "ask",
- "DEFAULTFIELDNAMEINFERRER",
- "jsonProp",
- "nestAtt",
- "renamed"
-}).AsPropertiesOf(new Precedence
-{
- RenamedOnConnectionSettings = "renamed on connection settings",
- NestAttribute = "using a nest attribute",
- JsonProperty = "the default serializer resolves json property attributes",
- AskSerializer = "serializer fiddled with this one",
- DefaultFieldNameInferrer = "shouting much?"
-});
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc
deleted file mode 100644
index 82b58a192ec..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc
+++ /dev/null
@@ -1,313 +0,0 @@
-# Strongly typed field access
-
-Several places in the elasticsearch API expect the path to a field from your original source document as a string.
-NEST allows you to use C# expressions to strongly type these field path strings.
-These expressions are assigned to a type called `Field` and there are several ways to create a instance of that type
-
-Using the constructor directly is possible but rather involved
-
-[source, csharp]
-----
-var fieldString = new Field { Name = "name" };
-----
-especially when using C# expressions since these can not be simply new'ed
-
-[source, csharp]
-----
-Expression> expression = p => p.Name;
-----
-[source, csharp]
-----
-var fieldExpression = Field.Create(expression);
-Expect("name")
- .WhenSerializing(fieldExpression)
- .WhenSerializing(fieldString);
-----
-Therefor you can also implicitly convert strings and expressions to Field's
-
-[source, csharp]
-----
-Field fieldString = "name";
-----
-but for expressions this is still rather involved
-
-[source, csharp]
-----
-Expression> expression = p => p.Name;
-----
-[source, csharp]
-----
-Field fieldExpression = expression;
-Expect("name")
- .WhenSerializing(fieldExpression)
- .WhenSerializing(fieldString);
-----
-to ease creating Field's from expressions there is a static Property class you can use
-
-[source, csharp]
-----
-Field fieldString = "name";
-----
-but for expressions this is still rather involved
-
-[source, csharp]
-----
-var fieldExpression = Field(p => p.Name);
-----
-Using static imports in c# 6 this can be even shortened:
-using static Nest.Static;
-
-[source, csharp]
-----
-fieldExpression = Field(p => p.Name);
-----
-Now this is much much terser then our first example using the constructor!
-
-[source, csharp]
-----
-Expect("name")
- .WhenSerializing(fieldString)
- .WhenSerializing(fieldExpression);
-----
-By default NEST will camelCase all the field names to be more javascripty
-
-using DefaultFieldNameInferrer() on ConnectionSettings you can change this behavior
-
-[source, csharp]
-----
-var setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p.ToUpper()));
-----
-[source, csharp]
-----
-setup.Expect("NAME").WhenSerializing(Field(p => p.Name));
-----
-However string are *always* passed along verbatim
-
-[source, csharp]
-----
-setup.Expect("NaMe").WhenSerializing("NaMe");
-----
-if you want the same behavior for expressions simply do nothing in the default inferrer
-
-[source, csharp]
-----
-setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p));
-----
-[source, csharp]
-----
-setup.Expect("Name").WhenSerializing(Field(p => p.Name));
-----
-Complex field name expressions
-
-You can follow your property expression to any depth, here we are traversing to the LeadDeveloper's (Person) FirstName
-
-[source, csharp]
-----
-Expect("leadDeveloper.firstName").WhenSerializing(Field(p => p.LeadDeveloper.FirstName));
-----
-When dealing with collection index access is ingnored allowing you to traverse into properties of collections
-
-[source, csharp]
-----
-Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags[0]));
-----
-Similarly .First() also works, remember these are expressions and not actual code that will be executed
-
-[source, csharp]
-----
-Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags.First()));
-----
-[source, csharp]
-----
-Expect("curatedTags.added").WhenSerializing(Field(p => p.CuratedTags[0].Added));
-Expect("curatedTags.name").WhenSerializing(Field(p => p.CuratedTags.First().Name));
-----
-When we see an indexer on a dictionary we assume they describe property names
-
-[source, csharp]
-----
-Expect("metadata.hardcoded").WhenSerializing(Field(p => p.Metadata["hardcoded"]));
-----
-[source, csharp]
-----
-Expect("metadata.hardcoded.created").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created));
-----
-A cool feature here is that we'll evaluate variables passed to these indexers
-
-[source, csharp]
-----
-var variable = "var";
-----
-[source, csharp]
-----
-Expect("metadata.var").WhenSerializing(Field(p => p.Metadata[variable]));
-Expect("metadata.var.created").WhenSerializing(Field(p => p.Metadata[variable].Created));
-----
-If you are using elasticearch's multifield mapping (you really should!) these "virtual" sub fields
-do not always map back on to your POCO, by calling .Suffix() you describe the sub fields that do not live in your c# objects
-
-[source, csharp]
-----
-Expect("leadDeveloper.firstName.raw").WhenSerializing(Field(p => p.LeadDeveloper.FirstName.Suffix("raw")));
-----
-[source, csharp]
-----
-Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags[0].Suffix("raw")));
-Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags.First().Suffix("raw")));
-Expect("curatedTags.added.raw").WhenSerializing(Field(p => p.CuratedTags[0].Added.Suffix("raw")));
-Expect("metadata.hardcoded.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Suffix("raw")));
-Expect("metadata.hardcoded.created.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created.Suffix("raw")));
-----
-You can even chain them to any depth!
-
-[source, csharp]
-----
-Expect("curatedTags.name.raw.evendeeper").WhenSerializing(Field(p => p.CuratedTags.First().Name.Suffix("raw").Suffix("evendeeper")));
-----
-Variables passed to suffix will be evaluated as well
-
-[source, csharp]
-----
-var suffix = "unanalyzed";
-----
-[source, csharp]
-----
-Expect("metadata.var.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Suffix(suffix)));
-Expect("metadata.var.created.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Created.Suffix(suffix)));
-----
-Annotations
-
-When using NEST's property attributes you can specify a new name for the properties
-
-[source, csharp]
-----
-Expect("naam").WhenSerializing(Field(p => p.Name));
-----
-
-Starting with NEST 2.x we also ask the serializer if it can resolve the property to a name.
-Here we ask the default JsonNetSerializer and it takes JsonProperty into account
-
-[source, csharp]
-----
-Expect("nameInJson").WhenSerializing(Field(p => p.Name));
-----
-
-If both are specified NEST takes precedence though
-
-[source, csharp]
-----
-Expect("naam").WhenSerializing(Field(p => p.Name));
-Expect(new
- {
- naam = "Martijn Laarman"
- }).WhenSerializing(new Both { Name = "Martijn Laarman" });
-----
-Resolving field names is cached but this is per connection settings
-
-[source, csharp]
-----
-var connectionSettings = TestClient.CreateSettings(forceInMemory: true);
-var client = new ElasticClient(connectionSettings);
-var fieldNameOnA = client.Infer.Field(Field(p => p.C.Name));
-var fieldNameOnB = client.Infer.Field(Field(p => p.C.Name));
-----
-Here we have to similary shaped expressions on coming from A and on from B
-that will resolve to the same field name, as expected
-
-[source, csharp]
-----
-fieldNameOnA.Should().Be("c.name");
-----
-[source, csharp]
-----
-fieldNameOnB.Should().Be("c.name");
-----
-now we create a new connectionsettings with a remap for C on class A to `d`
-now when we resolve the field path for A will be different
-
-[source, csharp]
-----
-var newConnectionSettings = TestClient.CreateSettings(forceInMemory: true, modifySettings: s => s
- .InferMappingFor(m => m
- .Rename(p => p.C, "d")
- )
-);
-----
-[source, csharp]
-----
-var newClient = new ElasticClient(newConnectionSettings);
-fieldNameOnA = newClient.Infer.Field(Field(p => p.C.Name));
-fieldNameOnB = newClient.Infer.Field(Field(p => p.C.Name));
-fieldNameOnA.Should().Be("d.name");
-fieldNameOnB.Should().Be("c.name");
-----
-however we didn't break inferrence on the first client instance using its separate connectionsettings
-
-[source, csharp]
-----
-fieldNameOnA = client.Infer.Field(Field(p => p.C.Name));
-----
-[source, csharp]
-----
-fieldNameOnB = client.Infer.Field(Field(p => p.C.Name));
-fieldNameOnA.Should().Be("c.name");
-fieldNameOnB.Should().Be("c.name");
-----
-To wrap up lets showcase the precedence that field names are inferred
-1. A hard rename of the property on connection settings using Rename()
-2. A NEST property mapping
-3. Ask the serializer if the property has a verbatim value e.g it has an explicit JsonPropery attribute.
-4. Pass the MemberInfo's Name to the DefaultFieldNameInferrer which by default camelCases
-In the following example we have a class where each case wins
-
-
-Here we create a custom converter that renames any property named `AskSerializer` to `ask`
-
-[source, csharp]
-----
-var usingSettings = WithConnectionSettings(s => s
-----
-here we provide an explicit rename of a property on connectionsettings
-
-[source, csharp]
-----
-.InferMappingFor(m => m
- .Rename(p => p.RenamedOnConnectionSettings, "renamed")
- )
-----
-All properties that are not mapped verbatim should be uppercased
-
-[source, csharp]
-----
-.DefaultFieldNameInferrer(p => p.ToUpperInvariant())
-).WithSerializer(s => new CustomSerializer(s));
-----
-[source, csharp]
-----
-usingSettings.Expect("renamed").ForField(Field(p => p.RenamedOnConnectionSettings));
-usingSettings.Expect("nestAtt").ForField(Field(p => p.NestAttribute));
-usingSettings.Expect("jsonProp").ForField(Field(p => p.JsonProperty));
-usingSettings.Expect("ask").ForField(Field(p => p.AskSerializer));
-usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer));
-----
-The same rules apply when indexing an object
-
-[source, csharp]
-----
-usingSettings.Expect(new []
-{
- "ask",
- "DEFAULTFIELDNAMEINFERRER",
- "jsonProp",
- "nestAtt",
- "renamed"
-}).AsPropertiesOf(new Precedence
-{
- RenamedOnConnectionSettings = "renamed on connection settings",
- NestAttribute = "using a nest attribute",
- JsonProperty = "the default serializer resolves json property attributes",
- AskSerializer = "serializer fiddled with this one",
- DefaultFieldNameInferrer = "shouting much?"
-});
-----
-
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc
deleted file mode 100644
index 6a659778cb3..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc
+++ /dev/null
@@ -1,79 +0,0 @@
-# Ids
-
-Several places in the elasticsearch API expect an Id object to be passed. This is a special box type that you can implicitly convert to and from many value types.
-
-Methods that take an Id can be passed longs, ints, strings
-Guids and they will implicitly converted to Ids
-
-[source, csharp]
-----
-Nest.Id idFromInt = 1;
-Nest.Id idFromLong = 2L;
-Nest.Id idFromString = "hello-world";
-Nest.Id idFromGuid = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E");
-Expect(1).WhenSerializing(idFromInt);
-Expect(2).WhenSerializing(idFromLong);
-Expect("hello-world").WhenSerializing(idFromString);
-Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenSerializing(idFromGuid);
-----
-Sometimes a method takes an object and we need an Id from that object to build up a path.
-There is no implicit conversion from any object to Id but we can call Id.From.
-Imagine your codebase has the following type that we want to index into elasticsearch
-
-By default NEST will try to find a property called `Id` on the class using reflection
-and create a cached fast func delegate based on the properties getter
-
-[source, csharp]
-----
-var dto = new MyDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" };
-----
-[source, csharp]
-----
-Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenInferringIdOn(dto);
-----
-Using the connection settings you can specify a different property NEST should look for ids.
-Here we instruct NEST to infer the Id for MyDTO based on its Name property
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.Name)
- )
-).Expect("x").WhenInferringIdOn(dto);
-----
-Even though we have a cache at play the cache is per connection settings, so we can create a different config
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.OtherName)
- )
-).Expect("y").WhenInferringIdOn(dto);
-----
-Another way is to mark the type with an ElasticType attribute, using a string IdProperty
-
-Now when we infer the id we expect it to be the Name property without doing any configuration on the ConnectionSettings
-
-[source, csharp]
-----
-var dto = new MyOtherDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" };
-----
-[source, csharp]
-----
-Expect("x").WhenInferringIdOn(dto);
-----
-This attribute IS cached statically/globally, however connectionsettings with a config for the type will
-still win over this static configuration
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.OtherName)
- )
-).Expect("y").WhenInferringIdOn(dto);
-----
-Eventhough we have a cache at play the cache its per connection settings, so we can create a different config
-
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc
deleted file mode 100644
index b5818a65d59..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc
+++ /dev/null
@@ -1,98 +0,0 @@
-# Ids
-
-Several places in the elasticsearch API expect an Id object to be passed. This is a special box type that you can implicitly convert to and from many value types.
-
-
-Methods that take an Id can be passed longs, ints, strings & Guids and they will implicitly converted to Ids
-
-[source, csharp]
-----
-Id idFromInt = 1;
-Id idFromLong = 2L;
-Id idFromString = "hello-world";
-Id idFromGuid = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E");
-Expect(1).WhenSerializing(idFromInt);
-Expect(2).WhenSerializing(idFromLong);
-Expect("hello-world").WhenSerializing(idFromString);
-Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenSerializing(idFromGuid);
-----
-Sometimes a method takes an object and we need an Id from that object to build up a path.
-There is no implicit conversion from any object to Id but we can call Id.From.
-Imagine your codebase has the following type that we want to index into elasticsearch
-
-[source, csharp]
-----
-class MyDTO
-{
- public Guid Id { get; set; }
- public string Name { get; set; }
- public string OtherName { get; set; }
-}
-----
-By default NEST will try to find a property called `Id` on the class using reflection
-and create a cached fast func delegate based on the properties getter
-
-[source, csharp]
-----
-var dto = new MyDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" };
-----
-[source, csharp]
-----
-Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenInferringIdOn(dto);
-----
-Using the connection settings you can specify a different property NEST should look for ids.
-Here we instruct NEST to infer the Id for MyDTO based on its Name property
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.Name)
- )
-).Expect("x").WhenInferringIdOn(dto);
-----
-Even though we have a cache at play the cache is per connection settings, so we can create a different config
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.OtherName)
- )
-).Expect("y").WhenInferringIdOn(dto);
-----
-Another way is to mark the type with an ElasticType attribute, using a string IdProperty
-
-[source, csharp]
-----
-[ElasticsearchType(IdProperty = nameof(Name))]
-class MyOtherDTO
-{
- public Guid Id { get; set; }
- public string Name { get; set; }
- public string OtherName { get; set; }
-}
-----
-Now when we infer the id we expect it to be the Name property without doing any configuration on the ConnectionSettings
-
-[source, csharp]
-----
-var dto = new MyOtherDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" };
-----
-[source, csharp]
-----
-Expect("x").WhenInferringIdOn(dto);
-----
-This attribute IS cached statically/globally, however connectionsettings with a config for the type will
-still win over this static configuration
-
-[source, csharp]
-----
-WithConnectionSettings(x => x
- .InferMappingFor(m => m
- .IdProperty(p => p.OtherName)
- )
-).Expect("y").WhenInferringIdOn(dto);
-----
-Eventhough we have a cache at play the cache its per connection settings, so we can create a different config
-
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc
deleted file mode 100644
index cc902f17fde..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc
+++ /dev/null
@@ -1,34 +0,0 @@
-# Indices paths
-
-Some API's in elasticsearch take one or many index name or a special "_all" marker to send the request to all the indices
-In nest this is encoded using `Indices`
-
-Several types implicitly convert to `Indices`
-
-[source, csharp]
-----
-Nest.Indices singleIndexFromString = "name";
-Nest.Indices multipleIndicesFromString = "name1, name2";
-Nest.Indices allFromString = "_all";
-Nest.Indices allWithOthersFromString = "_all, name2";
-singleIndexFromString.Match(
- all => all.Should().BeNull(),
- many => many.Indices.Should().HaveCount(1).And.Contain("name")
- );
-----
-to ease creating Field's from expressions there is a static Property class you can use
-
-
-
-[source, csharp]
-----
-var all = Nest.Indices.All;
-----
-[source, csharp]
-----
-var many = Nest.Indices.Index("name1", "name2");
-var manyTyped = Nest.Indices.Index().And();
-var singleTyped = Nest.Indices.Index();
-var singleString = Nest.Indices.Index("name1");
-var invalidSingleString = Nest.Indices.Index("name1, name2");
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc
deleted file mode 100644
index c6c5a78f898..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc
+++ /dev/null
@@ -1,47 +0,0 @@
-# Indices paths
-
-Some API's in elasticsearch take one or many index name or a special "_all" marker to send the request to all the indices
-In nest this is encoded using `Indices`
-
-
-Several types implicitly convert to `Indices`
-
-[source, csharp]
-----
-Nest.Indices singleIndexFromString = "name";
-Nest.Indices multipleIndicesFromString = "name1, name2";
-Nest.Indices allFromString = "_all";
-Nest.Indices allWithOthersFromString = "_all, name2";
-singleIndexFromString.Match(
- all => all.Should().BeNull(),
- many => many.Indices.Should().HaveCount(1).And.Contain("name")
- );
-multipleIndicesFromString.Match(
- all => all.Should().BeNull(),
- many => many.Indices.Should().HaveCount(2).And.Contain("name2")
- );
-allFromString.Match(
- all => all.Should().NotBeNull(),
- many => many.Indices.Should().BeNull()
- );
-allWithOthersFromString.Match(
- all => all.Should().NotBeNull(),
- many => many.Indices.Should().BeNull()
- );
-----
-to ease creating Field's from expressions there is a static Property class you can use
-
-
-
-[source, csharp]
-----
-var all = Nest.Indices.All;
-----
-[source, csharp]
-----
-var many = Nest.Indices.Index("name1", "name2");
-var manyTyped = Nest.Indices.Index().And();
-var singleTyped = Nest.Indices.Index();
-var singleString = Nest.Indices.Index("name1");
-var invalidSingleString = Nest.Indices.Index("name1, name2");
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc
deleted file mode 100644
index d21a723cd1b..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc
+++ /dev/null
@@ -1,6 +0,0 @@
-[source, csharp]
-----
-Expression> expression = p => p.Name.Suffix("raw");
-Expect("raw").WhenSerializing(expression);
-Assert.Throws(() => Expect("exception!").WhenSerializing("name.raw"));
-----
diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc
deleted file mode 100644
index 9db88699059..00000000000
--- a/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc
+++ /dev/null
@@ -1,904 +0,0 @@
-# Auto mapping properties
-
-When creating a mapping (either when creating an index or via the put mapping API),
-NEST offers a feature called AutoMap(), which will automagically infer the correct
-Elasticsearch datatypes of the POCO properties you are mapping. Alternatively, if
-you're using attributes to map your properties, then calling AutoMap() is required
-in order for your attributes to be applied. We'll look at examples of both.
-
-
-
-For these examples, we'll define two POCOS. A Company, which has a name
-and a collection of Employees. And Employee, which has various properties of
-different types, and itself has a collection of Employees.
-
-[source, csharp]
-----
-public class Company
-{
- public string Name { get; set; }
- public List Employees { get; set; }
-}
-----
-[source, csharp]
-----
-public class Employee
-{
- public string FirstName { get; set; }
- public string LastName { get; set; }
- public int Salary { get; set; }
- public DateTime Birthday { get; set; }
- public bool IsManager { get; set; }
- public List Employees { get; set; }
- public TimeSpan Hours { get; set;}
-}
-----
-## Manual mapping
-To create a mapping for our Company type, we can use the fluent API
-and map each property explicitly
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m
- .Properties(ps => ps
- .String(s => s
- .Name(c => c.Name)
- )
- .Object(o => o
- .Name(c => c.Employees)
- .Properties(eps => eps
- .String(s => s
- .Name(e => e.FirstName)
- )
- .String(s => s
- .Name(e => e.LastName)
- )
- .Number(n => n
- .Name(e => e.Salary)
- .Type(NumberType.Integer)
- )
- )
- )
- )
- )
- );
-----
-Which is all fine and dandy, and useful for some use cases. However in most cases
-this is becomes too cumbersome of an approach, and you simply just want to map *all*
-the properties of your POCO in a single go.
-
-[source, csharp]
-----
-var expected = new
-{
- mappings = new
- {
- company = new
- {
- properties = new
- {
- name = new
- {
- type = "string"
- },
- employees = new
- {
- type = "object",
- properties = new
- {
- firstName = new
- {
- type = "string"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- }
- }
- }
- }
- }
-};
-----
-[source, csharp]
-----
-Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-## Simple Automapping
-This is exactly where `AutoMap()` becomes useful. Instead of manually mapping each property,
-explicitly, we can instead call `.AutoMap()` for each of our mappings and let NEST do all the work
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap())
- .Map(m => m.AutoMap())
- );
-----
-Observe that NEST has inferred the Elasticsearch types based on the CLR type of our POCO properties.
-In this example,
-- Birthday was mapped as a date,
-- Hours was mapped as a long (ticks)
-- IsManager was mapped as a boolean,
-- Salary as an integer
-- Employees as an object
-and the remaining string properties as strings.
-
-[source, csharp]
-----
-var expected = new
-{
- mappings = new
- {
- company = new
- {
- properties = new
- {
- employees = new
- {
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- hours = new
- {
- type = "long"
- },
- isManager = new
- {
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- },
- type = "object"
- },
- name = new
- {
- type = "string"
- }
- }
- },
- employee = new
- {
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- hours = new
- {
- type = "long"
- },
- isManager = new
- {
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- }
- }
- }
-};
-----
-[source, csharp]
-----
-Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-## Automapping with overrides
-In most cases, you'll want to map more than just the vanilla datatypes and also provide
-various options on your properties (analyzer, doc_values, etc...). In that case, it's
-possible to use AutoMap() in conjuction with explicitly mapped properties.
-
-
-Here we are using AutoMap() to automatically map our company type, but then we're
-overriding our employee property and making it a `nested` type, since by default,
-AutoMap() will infer objects as `object`.
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m
- .AutoMap()
- .Properties(ps => ps
- .Nested(n => n
- .Name(c => c.Employees)
- .Properties(eps => eps
- // snip
- )
- )
- )
- )
- );
-----
-[source, csharp]
-----
-var expected = new
- {
- mappings = new
- {
- company = new
- {
- properties = new
- {
- name = new
- {
- type = "string"
- },
- employees = new
- {
- type = "nested",
- properties = new {}
- }
- }
- }
- }
- };
-Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-## Automap with attributes
-It is also possible to define your mappings using attributes on your POCOS. When you
-use attributes, you MUST use AutoMap() in order for the attributes to be applied.
-Here we define the same two types but this time using attributes.
-
-[source, csharp]
-----
-[ElasticsearchType(Name = "company")]
-public class CompanyWithAttributes
-{
- [String(Analyzer = "keyword", NullValue = "null", Similarity = SimilarityOption.BM25)]
- public string Name { get; set; }
-
- [String]
- public TimeSpan? HeadOfficeHours { get; set; }
-
- [Object(Path = "employees", Store = false)]
- public List Employees { get; set; }
-}
-----
-[source, csharp]
-----
-[ElasticsearchType(Name = "employee")]
-public class EmployeeWithAttributes
-{
- [String]
- public string FirstName { get; set; }
-
- [String]
- public string LastName { get; set; }
-
- [Number(DocValues = false, IgnoreMalformed = true, Coerce = true)]
- public int Salary { get; set; }
-
- [Date(Format = "MMddyyyy", NumericResolution = NumericResolutionUnit.Seconds)]
- public DateTime Birthday { get; set; }
-
- [Boolean(NullValue = false, Store = true)]
- public bool IsManager { get; set; }
-
- [Nested(Path = "employees")]
- [JsonProperty("empl")]
- public List Employees { get; set; }
-}
-----
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap())
- .Map(m => m.AutoMap())
- );
-var expected = new
- {
- mappings = new
- {
- company = new
- {
- properties = new
- {
- employees = new
- {
- path = "employees",
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- hours = new
- {
- type = "long"
- },
- isManager = new
- {
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- },
- store = false,
- type = "object"
- },
- name = new
- {
- analyzer = "keyword",
- null_value = "null",
- similarity = "BM25",
- type = "string"
- },
- headOfficeHours = new
- {
- type = "string"
- }
- }
- },
- employee = new
- {
- properties = new
- {
- birthday = new
- {
- format = "MMddyyyy",
- numeric_resolution = "seconds",
- type = "date"
- },
- empl = new
- {
- path = "employees",
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- hours = new
- {
- type = "long"
- },
- isManager = new
- {
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- },
- type = "nested"
- },
- firstName = new
- {
- type = "string"
- },
- isManager = new
- {
- null_value = false,
- store = true,
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- coerce = true,
- doc_values = false,
- ignore_malformed = true,
- type = "double"
- }
- }
- }
- }
- };
-Expect(expected).WhenSerializing(descriptor as ICreateIndexRequest);
-----
-
-Just as we were able to override the inferred properties in our earlier example, explicit (manual)
-mappings also take precedence over attributes. Therefore we can also override any mappings applied
-via any attributes defined on the POCO
-
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m
- .AutoMap()
- .Properties(ps => ps
- .Nested(n => n
- .Name(c => c.Employees)
- )
- )
- )
- .Map(m => m
- .AutoMap()
- .TtlField(ttl => ttl
- .Enable()
- .Default("10m")
- )
- .Properties(ps => ps
- .String(s => s
- .Name(e => e.FirstName)
- .Fields(fs => fs
- .String(ss => ss
- .Name("firstNameRaw")
- .Index(FieldIndexOption.NotAnalyzed)
- )
- .TokenCount(t => t
- .Name("length")
- .Analyzer("standard")
- )
- )
- )
- .Number(n => n
- .Name(e => e.Salary)
- .Type(NumberType.Double)
- .IgnoreMalformed(false)
- )
- .Date(d => d
- .Name(e => e.Birthday)
- .Format("MM-dd-yy")
- )
- )
- )
- );
-var expected = new
- {
- mappings = new
- {
- company = new
- {
- properties = new
- {
- employees = new
- {
- type = "nested"
- },
- name = new
- {
- analyzer = "keyword",
- null_value = "null",
- similarity = "BM25",
- type = "string"
- },
- headOfficeHours = new
- {
- type = "string"
- }
- }
- },
- employee = new
- {
- _ttl = new
- {
- enabled = true,
- @default = "10m"
- },
- properties = new
- {
- birthday = new
- {
- format = "MM-dd-yy",
- type = "date"
- },
- empl = new
- {
- path = "employees",
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- hours = new
- {
- type = "long"
- },
- isManager = new
- {
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "integer"
- }
- },
- type = "nested"
- },
- firstName = new
- {
- fields = new
- {
- firstNameRaw = new
- {
- index = "not_analyzed",
- type = "string"
- },
- length = new
- {
- type = "token_count",
- analyzer = "standard"
- }
- },
- type = "string"
- },
- isManager = new
- {
- null_value = false,
- store = true,
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- ignore_malformed = false,
- type = "double"
- }
- }
- }
- }
- };
-Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-[source, csharp]
-----
-[ElasticsearchType(Name = "company")]
-public class CompanyWithAttributesAndPropertiesToIgnore
-{
- public string Name { get; set; }
-
- [String(Ignore = true)]
- public string PropertyToIgnore { get; set; }
-
- public string AnotherPropertyToIgnore { get; set; }
-
- [JsonIgnore]
- public string JsonIgnoredProperty { get; set; }
-}
-----
-== Ignoring Properties
-Properties on a POCO can be ignored in a few ways:
-
-
-
-- Using the `Ignore` property on a derived `ElasticsearchPropertyAttribute` type applied to the property that should be ignored on the POCO
-
-
-
-- Using the `.InferMappingFor(Func, IClrTypeMapping> selector)` on the connection settings
-
-
-
-- Using an ignore attribute applied to the POCO property that is understood by the `IElasticsearchSerializer` used and inspected inside of `CreatePropertyMapping()` on the serializer. In the case of the default `JsonNetSerializer`, this is the Json.NET `JsonIgnoreAttribute`
-
-
-
-This example demonstrates all ways, using the attribute way to ignore the property `PropertyToIgnore`, the infer mapping way to ignore the
-property `AnotherPropertyToIgnore` and the json serializer specific attribute way to ignore the property `JsonIgnoredProperty`
-
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m
- .AutoMap()
- )
- );
-var expected = new
- {
- mappings = new
- {
- company = new
- {
- properties = new
- {
- name = new
- {
- type = "string"
- }
- }
- }
- }
- };
-var settings = WithConnectionSettings(s => s
- .InferMappingFor(i => i
- .Ignore(p => p.AnotherPropertyToIgnore)
- )
- );
-settings.Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-If you notice in our previous Company/Employee examples, the Employee type is recursive
-in that itself contains a collection of type `Employee`. By default, `.AutoMap()` will only
-traverse a single depth when it encounters recursive instances like this. Hence, in the
-previous examples, the second level of Employee did not get any of its properties mapped.
-This is done as a safe-guard to prevent stack overflows and all the fun that comes with
-infinite recursion. Additionally, in most cases, when it comes to Elasticsearch mappings, it is
-often an edge case to have deeply nested mappings like this. However, you may still have
-the need to do this, so you can control the recursion depth of AutoMap().
-Let's introduce a very simple class A, to reduce the noise, which itself has a property
-Child of type A.
-
-[source, csharp]
-----
-public class A
-{
- public A Child { get; set; }
-}
-----
-By default, AutoMap() only goes as far as depth 1
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap())
- );
-----
-Thus we do not map properties on the second occurrence of our Child property
-
-[source, csharp]
-----
-var expected = new
-{
- mappings = new
- {
- a = new
- {
- properties = new
- {
- child = new
- {
- properties = new { },
- type = "object"
- }
- }
- }
- }
-};
-----
-[source, csharp]
-----
-Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor);
-----
-Now lets specify a maxRecursion of 3
-
-[source, csharp]
-----
-var withMaxRecursionDescriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap(3))
- );
-----
-AutoMap() has now mapped three levels of our Child property
-
-[source, csharp]
-----
-var expectedWithMaxRecursion = new
-{
- mappings = new
- {
- a = new
- {
- properties = new
- {
- child = new
- {
- type = "object",
- properties = new
- {
- child = new
- {
- type = "object",
- properties = new
- {
- child = new
- {
- type = "object",
- properties = new
- {
- child = new
- {
- type = "object",
- properties = new { }
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
-};
-----
-[source, csharp]
-----
-Expect(expectedWithMaxRecursion).WhenSerializing((ICreateIndexRequest) withMaxRecursionDescriptor);
-----
-Now we can pass an instance of our custom visitor to AutoMap()
-
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap(new DisableDocValuesPropertyVisitor()))
- );
-----
-and anytime it maps a property as a number (INumberProperty) or boolean (IBooleanProperty)
-it will apply the transformation defined in each Visit() respectively, which in this example
-disables doc values.
-
-[source, csharp]
-----
-var expected = new
-{
- mappings = new
- {
- employee = new
- {
- properties = new
- {
- birthday = new
- {
- type = "date"
- },
- employees = new
- {
- properties = new { },
- type = "object"
- },
- firstName = new
- {
- type = "string"
- },
- isManager = new
- {
- doc_values = false,
- type = "boolean"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- doc_values = false,
- type = "integer"
- }
- }
- }
- }
-};
-----
-[source, csharp]
-----
-var descriptor = new CreateIndexDescriptor("myindex")
- .Mappings(ms => ms
- .Map(m => m.AutoMap(new EverythingIsAStringPropertyVisitor()))
- );
-var expected = new
- {
- mappings = new
- {
- employee = new
- {
- properties = new
- {
- birthday = new
- {
- type = "string"
- },
- employees = new
- {
- type = "string"
- },
- firstName = new
- {
- type = "string"
- },
- isManager = new
- {
- type = "string"
- },
- lastName = new
- {
- type = "string"
- },
- salary = new
- {
- type = "string"
- }
- }
- }
- }
- };
-----
diff --git a/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc b/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc
deleted file mode 100644
index 41ac4fd0d1b..00000000000
--- a/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc
+++ /dev/null
@@ -1,291 +0,0 @@
-# Connecting
-Connecting to *Elasticsearch* with `Elasticsearch.Net` is quite easy but has a few toggles and options worth knowing.
-
-# Choosing the right connection strategy
-If you simply new an `ElasticLowLevelClient`, it will be a non-failover connection to `http://localhost:9200`
-
-
-[source, csharp]
-----
-var client = new ElasticLowLevelClient();
-var tokenizers = new TokenizersDescriptor();
-----
-
-If your Elasticsearch node does not live at `http://localhost:9200` but i.e `http://mynode.example.com:8082/apiKey`, then
-you will need to pass in some instance of `IConnectionConfigurationValues`.
-
-The easiest way to do this is:
-
-
-[source, csharp]
-----
-var node = new Uri("http://mynode.example.com:8082/apiKey");
-var config = new ConnectionConfiguration(node);
-var client = new ElasticLowLevelClient(config);
-----
-
-This however is still a non-failover connection. Meaning if that `node` goes down the operation will not be retried on any other nodes in the cluster.
-
-To get a failover connection we have to pass an `IConnectionPool` instance instead of a `Uri`.
-
-
-[source, csharp]
-----
-var node = new Uri("http://mynode.example.com:8082/apiKey");
-var connectionPool = new SniffingConnectionPool(new[] { node });
-var config = new ConnectionConfiguration(connectionPool);
-var client = new ElasticLowLevelClient(config);
-----
-
-Here instead of directly passing `node`, we pass a `SniffingConnectionPool` which will use our `node` to find out the rest of the available cluster nodes.
-Be sure to read more about [Connection Pooling and Cluster Failover here](/elasticsearch-net/cluster-failover.html)
-
-## Options
-
-Besides either passing a `Uri` or `IConnectionPool` to `ConnectionConfiguration`, you can also fluently control many more options. For instance:
-
-
-[source, csharp]
-----
-var node = new Uri("http://mynode.example.com:8082/apiKey");
-var connectionPool = new SniffingConnectionPool(new[] { node });
-var config = new ConnectionConfiguration(connectionPool)
- .DisableDirectStreaming()
- .BasicAuthentication("user", "pass")
- .RequestTimeout(TimeSpan.FromSeconds(5));
-----
-
-The following is a list of available connection configuration options:
-
-
-[source, csharp]
-----
-var client = new ElasticLowLevelClient();
-----
-[source, csharp]
-----
-var config = new ConnectionConfiguration()
-
- .DisableAutomaticProxyDetection()
-----
-Disable automatic proxy detection. Defaults to true.
-
-[source, csharp]
-----
-.EnableHttpCompression()
-----
-Enable compressed request and reesponses from Elasticsearch (Note that nodes need to be configured
-to allow this. See the [http module settings](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-http.html) for more info).
-
-[source, csharp]
-----
-.DisableDirectStreaming()
-----
-By default responses are deserialized off stream to the object you tell it to.
-For debugging purposes it can be very useful to keep a copy of the raw response on the result object.
-
-[source, csharp]
-----
-var result = client.Search>(new { size = 12 });
-var raw = result.ResponseBodyInBytes;
-----
-This will only have a value if the client configuration has ExposeRawResponse set
-
-[source, csharp]
-----
-var stringResult = client.Search(new { });
-----
-
-Please note that this only make sense if you need a mapped response and the raw response at the same time.
-If you need a `string` or `byte[]` response simply call:
-
-[source, csharp]
-----
-config = config
- //endhide
- .GlobalQueryStringParameters(new NameValueCollection())
-----
-Allows you to set querystring parameters that have to be added to every request. For instance, if you use a hosted elasticserch provider, and you need need to pass an `apiKey` parameter onto every request.
-
-[source, csharp]
-----
-.Proxy(new Uri("http://myproxy"), "username", "pass")
-----
-Sets proxy information on the connection.
-
-[source, csharp]
-----
-.RequestTimeout(TimeSpan.FromSeconds(4))
-----
-Sets the global maximum time a connection may take.
-Please note that this is the request timeout, the builtin .NET `WebRequest` has no way to set connection timeouts
-(see http://msdn.microsoft.com/en-us/library/system.net.httpwebrequest.timeout(v=vs.110).aspx).
-
-[source, csharp]
-----
-.ThrowExceptions()
-----
-As an alternative to the C/go like error checking on `response.IsValid`, you can instead tell the client to throw
-exceptions.
-There are three category of exceptions thay may be thrown:
-
-1) ElasticsearchClientException: These are known exceptions, either an exception that occurred in the request pipeline
-(such as max retries or timeout reached, bad authentication, etc...) or Elasticsearch itself returned an error (could
-not parse the request, bad query, missing field, etc...). If it is an Elasticsearch error, the `ServerError` property
-on the response will contain the the actual error that was returned. The inner exception will always contain the
-root causing exception.
-
-2) UnexpectedElasticsearchClientException: These are unknown exceptions, for instance a response from Elasticsearch not
-properly deserialized. These are usually bugs and should be reported. This excpetion also inherits from ElasticsearchClientException
-so an additional catch block isn't necessary, but can be helpful in distinguishing between the two.
-3) Development time exceptions: These are CLR exceptions like ArgumentException, NullArgumentException etc... that are thrown
-when an API in the client is misused. These should not be handled as you want to know about them during development.
-
-[source, csharp]
-----
-.PrettyJson()
-----
-Forces all serialization to be indented and appends `pretty=true` to all the requests so that the responses are indented as well
-
-[source, csharp]
-----
-.BasicAuthentication("username", "password")
-----
-Sets the HTTP basic authentication credentials to specify with all requests.
-
-**Note:** This can alternatively be specified on the node URI directly:
-
-[source, csharp]
-----
-var uri = new Uri("http://username:password@localhost:9200");
-----
-[source, csharp]
-----
-var settings = new ConnectionConfiguration(uri);
-----
-
-...but may become tedious when using connection pooling with multiple nodes.
-
-
-
-You can pass a callback of type `Action` that can eaves drop every time a response (good or bad) is created.
-If you have complex logging needs this is a good place to add that in.
-
-
-[source, csharp]
-----
-var counter = 0;
-var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200"));
-var settings = new ConnectionSettings(connectionPool, new InMemoryConnection())
- .OnRequestCompleted(r => counter++);
-var client = new ElasticClient(settings);
-client.RootNodeInfo();
-counter.Should().Be(1);
-client.RootNodeInfoAsync();
-counter.Should().Be(2);
-----
-
-An example of using `OnRequestCompleted()` for complex logging. Remember, if you would also like
-to capture the request and/or response bytes, you also need to set `.DisableDirectStreaming()`
-to `true`
-
-
-[source, csharp]
-----
-var list = new List();
-var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200"));
-var settings = new ConnectionSettings(connectionPool, new InMemoryConnection())
- .DisableDirectStreaming()
- .OnRequestCompleted(response =>
- {
- // log out the request
- if (response.RequestBodyInBytes != null)
- {
- list.Add(
- $"{response.HttpMethod} {response.Uri} \n" +
- $"{Encoding.UTF8.GetString(response.RequestBodyInBytes)}");
- }
- else
- {
- list.Add($"{response.HttpMethod} {response.Uri}");
- }
-
- // log out the response
- if (response.ResponseBodyInBytes != null)
- {
- list.Add($"Status: {response.HttpStatusCode}\n" +
- $"{Encoding.UTF8.GetString(response.ResponseBodyInBytes)}\n" +
- $"{new string('-', 30)}\n");
- }
- else
- {
- list.Add($"Status: {response.HttpStatusCode}\n" +
- $"{new string('-', 30)}\n");
- }
- });
-list.Add(
- $"{response.HttpMethod} {response.Uri} \n" +
- $"{Encoding.UTF8.GetString(response.RequestBodyInBytes)}");
-list.Add($"{response.HttpMethod} {response.Uri}");
-list.Add($"Status: {response.HttpStatusCode}\n" +
- $"{Encoding.UTF8.GetString(response.ResponseBodyInBytes)}\n" +
- $"{new string('-', 30)}\n");
-list.Add($"Status: {response.HttpStatusCode}\n" +
- $"{new string('-', 30)}\n");
-var client = new ElasticClient(settings);
-var syncResponse = client.Search