From c8d763278290e6468299d68bdca8deac0f7725bf Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Mon, 31 Mar 2025 17:47:00 +0200 Subject: [PATCH] Low level client update (#974) * new transport * fix multibufferentity * client builder refactor * temp * temp * temp * original tests * new tests * fixing tests * flatten low level client folder structure * Move things around, remove duplicated tests * naming refactor * Add ElasticsearchTestClient that randomly chooses an implementation * regen from latest spec * test fixes * esql test config fix * porting low level client unit tests * low level client tests to junit 5 * Randomize client implementation, remove cloud-id on Rest5, add tests for overloaded methods * Refine client builder * todos and timeouts fix * checkstyle fixes * remove maven local + remove legacy test * Add insecure SSLContext * temp rebase fix * Reduce public API surface * Extract HttpRequest.Node (future-proof this class for later use) * Remove Node.boundHosts: it's mostly useless in the container era * Port RestClient's sniffer to Rest5Client * Move Node back to its original location. TransportHttpClient is self-contained * Refactor client builder to transport configuration * Allow extensions of ElasticsearchTransportConfig * Remove calls to AccessController which is deprecated for removal * Fix licenseReport * Revert "temp rebase fix" This reverts commit 48f2b57898f4a7d7709dcb7a3f0496c0ec43c769. * [codegen] update to latest spec and generator * fixing tests * regen --------- Co-authored-by: Sylvain Wallez --- .../rest_client/RestTransportClientTest.java | 7 +- java-client/build.gradle.kts | 43 +- .../ElasticsearchAsyncClient.java | 18 + .../elasticsearch/ElasticsearchClient.java | 17 + .../transport/DefaultTransportOptions.java | 6 + .../transport/ElasticsearchTransportBase.java | 30 +- .../ElasticsearchTransportConfig.java | 335 ++++++ .../elastic/clients/transport/Transport.java | 21 + .../clients/transport/TransportOptions.java | 2 + .../clients/transport/TransportUtils.java | 37 + .../transport/http/TransportHttpClient.java | 179 ++-- .../rest5_client/MultiBufferEntity.java | 126 +++ .../rest5_client/Rest5ClientHttpClient.java | 285 +++++ .../rest5_client/Rest5ClientOptions.java | 267 +++++ .../rest5_client/Rest5ClientTransport.java | 110 ++ .../low_level/BasicAsyncEntityProducer.java | 147 +++ .../low_level/BasicAsyncResponseConsumer.java | 60 ++ .../low_level/BufferedByteConsumer.java | 76 ++ .../rest5_client/low_level/Cancellable.java | 105 ++ .../rest5_client/low_level/Constants.java | 26 + .../rest5_client/low_level/DeadHostState.java | 112 ++ .../low_level/HasAttributeNodeSelector.java | 74 ++ .../HttpAsyncResponseConsumerFactory.java | 66 ++ .../low_level/HttpDeleteWithEntity.java | 41 + .../low_level/HttpGetWithEntity.java | 41 + .../low_level/LanguageRuntimeVersions.java | 137 +++ .../rest5_client/low_level/Node.java | 277 +++++ .../rest5_client/low_level/NodeSelector.java | 91 ++ .../PreferHasAttributeNodeSelector.java | 103 ++ .../rest5_client/low_level/Request.java | 188 ++++ .../rest5_client/low_level/RequestLogger.java | 180 ++++ .../low_level/RequestOptions.java | 337 ++++++ .../rest5_client/low_level/Response.java | 208 ++++ .../low_level/ResponseException.java | 78 ++ .../low_level/ResponseListener.java | 44 + .../rest5_client/low_level/Rest5Client.java | 973 ++++++++++++++++++ .../low_level/Rest5ClientBuilder.java | 383 +++++++ .../low_level/WarningFailureException.java | 58 ++ .../low_level/WarningsHandler.java | 53 + .../sniffer/ElasticsearchNodesSniffer.java | 313 ++++++ .../low_level/sniffer/NodesSniffer.java | 34 + .../sniffer/SniffOnFailureListener.java | 63 ++ .../low_level/sniffer/Sniffer.java | 310 ++++++ .../low_level/sniffer/SnifferBuilder.java | 91 ++ .../rest_client/RestClientHttpClient.java | 4 +- .../rest_client/RestClientOptions.java | 7 +- .../rest_client/RestClientTransport.java | 63 ++ .../documentation/DocTestsTransport.java | 6 +- .../getting_started/ConnectingTest.java | 92 +- .../getting_started/MigrateHlrcTest.java | 3 + .../ElasticsearchTestClient.java | 76 ++ .../ElasticsearchTestServer.java | 45 +- .../esql/EsqlAdapterEndToEndTest.java | 59 +- .../elasticsearch/experiments/api/Bar.java | 4 +- .../experiments/api/query2/BoolQuery.java | 20 +- .../experiments/api/query2/TermsQuery.java | 2 +- .../experiments/containers/SomeUnion.java | 8 +- .../experiments/containers/UVariantA.java | 6 +- .../experiments/containers/UVariantB.java | 6 +- .../inheritance/final_/FinalClass.java | 2 +- .../elasticsearch/model/OverloadsTest.java | 78 ++ .../spec_issues/SpecIssuesTest.java | 2 +- .../clients/testkit/MockHttpClient.java | 6 +- .../ElasticsearchTransportConfigTest.java | 135 +++ .../clients/transport/RequestOptionsTest.java | 51 +- .../clients/transport/TransportTest.java | 73 +- .../clients/transport/TransportUtilsTest.java | 32 +- .../endpoints/BinaryEndpointTest.java | 12 +- .../OpenTelemetryForElasticsearchTest.java | 27 +- .../rest5_client/MultiBufferEntityTest.java | 99 ++ .../rest5_client/RequestOptionsTest.java | 163 +++ .../rest5_client/Rest5ClientOptionsTest.java | 202 ++++ .../transport/rest5_client/TransportTest.java | 157 +++ .../rest5_client/TransportUtilsTest.java | 120 +++ .../BasicAsyncResponseConsumerTests.java | 77 ++ .../low_level/DeadHostStateTests.java | 141 +++ .../FailureTrackingResponseListenerTests.java | 111 ++ .../HasAttributeNodeSelectorTests.java | 77 ++ .../HostsTrackingFailureListener.java | 61 ++ .../low_level/NodeSelectorTests.java | 131 +++ .../rest5_client/low_level/NodeTests.java | 178 ++++ .../PreferHasAttributeNodeSelectorTests.java | 88 ++ .../low_level/RequestLoggerTests.java | 205 ++++ .../low_level/RequestOptionsTests.java | 217 ++++ .../rest5_client/low_level/RequestTests.java | 253 +++++ .../low_level/ResponseExceptionTests.java | 94 ++ .../RestClientBuilderIntegTests.java | 227 ++++ .../low_level/RestClientBuilderTests.java | 181 ++++ .../RestClientGzipCompressionTests.java | 257 +++++ .../RestClientMultipleHostsIntegTests.java | 393 +++++++ .../RestClientMultipleHostsTests.java | 338 ++++++ .../RestClientSingleHostIntegTests.java | 453 ++++++++ .../low_level/RestClientSingleHostTests.java | 692 +++++++++++++ .../low_level/RestClientTestCase.java | 174 ++++ .../low_level/RestClientTestUtil.java | 117 +++ .../low_level/RestClientTests.java | 458 +++++++++ .../RestClientDocumentation.java | 461 +++++++++ .../ElasticsearchNodesSnifferParseTests.java | 188 ++++ .../ElasticsearchNodesSnifferTests.java | 378 +++++++ .../low_level/sniffer/MockNodesSniffer.java | 36 + .../sniffer/SniffOnFailureListenerTests.java | 64 ++ .../sniffer/SnifferBuilderTests.java | 89 ++ .../low_level/sniffer/SnifferTests.java | 662 ++++++++++++ .../documentation/SnifferDocumentation.java | 135 +++ .../low_level/sniffer/2.0.0_nodes_http.json | 201 ++++ .../low_level/sniffer/5.0.0_nodes_http.json | 217 ++++ .../low_level/sniffer/6.0.0_nodes_http.json | 217 ++++ .../low_level/sniffer/7.3.0_nodes_http.json | 218 ++++ .../sniffer/create_test_nodes_info.bash | 107 ++ .../es6_nodes_publication_address_format.json | 30 + .../es7_nodes_publication_address_format.json | 30 + .../rest5_client/low_level/sniffer/readme.txt | 6 + .../transport/rest5_client/low_level/test.crt | 24 + .../transport/rest5_client/low_level/test.der | Bin 0 -> 1218 bytes .../low_level/test_truststore.jks | Bin 0 -> 1097 bytes .../rest5_client/low_level/testks.jks | Bin 0 -> 2381 bytes 116 files changed, 15323 insertions(+), 375 deletions(-) create mode 100644 java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportConfig.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/MultiBufferEntity.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientHttpClient.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptions.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientTransport.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncEntityProducer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BufferedByteConsumer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Cancellable.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Constants.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostState.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelector.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpAsyncResponseConsumerFactory.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpDeleteWithEntity.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpGetWithEntity.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/LanguageRuntimeVersions.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Node.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelector.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelector.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Request.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestLogger.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptions.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Response.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseException.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseListener.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5Client.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningFailureException.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningsHandler.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSniffer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/NodesSniffer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListener.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/Sniffer.java create mode 100644 java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilder.java create mode 100644 java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestClient.java create mode 100644 java-client/src/test/java/co/elastic/clients/elasticsearch/model/OverloadsTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/ElasticsearchTransportConfigTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/MultiBufferEntityTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/RequestOptionsTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptionsTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportUtilsTest.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumerTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostStateTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/FailureTrackingResponseListenerTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelectorTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HostsTrackingFailureListener.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelectorTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelectorTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestLoggerTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptionsTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/ResponseExceptionTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderIntegTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientGzipCompressionTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsIntegTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostIntegTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestCase.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestUtil.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/documentation/RestClientDocumentation.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferParseTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/MockNodesSniffer.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListenerTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilderTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferTests.java create mode 100644 java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/documentation/SnifferDocumentation.java create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/2.0.0_nodes_http.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/5.0.0_nodes_http.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/6.0.0_nodes_http.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/7.3.0_nodes_http.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/create_test_nodes_info.bash create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es6_nodes_publication_address_format.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es7_nodes_publication_address_format.json create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/readme.txt create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.crt create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.der create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test_truststore.jks create mode 100644 java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/testks.jks diff --git a/example-transports/src/test/java/co/elastic/clients/transport/rest_client/RestTransportClientTest.java b/example-transports/src/test/java/co/elastic/clients/transport/rest_client/RestTransportClientTest.java index fe0866652..446965cbe 100644 --- a/example-transports/src/test/java/co/elastic/clients/transport/rest_client/RestTransportClientTest.java +++ b/example-transports/src/test/java/co/elastic/clients/transport/rest_client/RestTransportClientTest.java @@ -20,20 +20,21 @@ package co.elastic.clients.transport.rest_client; import co.elastic.clients.transport.TransportHttpClientTest; +import co.elastic.clients.transport.rest5_client.Rest5ClientHttpClient; import org.apache.http.HttpHost; import org.elasticsearch.client.RestClient; -public class RestTransportClientTest extends TransportHttpClientTest { +public class RestTransportClientTest extends TransportHttpClientTest { public RestTransportClientTest() { super(createClient()); } - private static RestClientHttpClient createClient() { + private static Rest5ClientHttpClient createClient() { RestClient restClient = RestClient.builder( new HttpHost(server.getAddress().getAddress(), server.getAddress().getPort(), "http") ).build(); - return new RestClientHttpClient(restClient); + return new Rest5ClientHttpClient(restClient); } } diff --git a/java-client/build.gradle.kts b/java-client/build.gradle.kts index cfcdb05c4..00eea5d26 100644 --- a/java-client/build.gradle.kts +++ b/java-client/build.gradle.kts @@ -38,8 +38,8 @@ checkstyle { } java { - targetCompatibility = JavaVersion.VERSION_1_8 - sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_17 + sourceCompatibility = JavaVersion.VERSION_17 withJavadocJar() withSourcesJar() @@ -200,7 +200,7 @@ signing { dependencies { // Compile and test with the last 7.x version to make sure transition scenarios where // the Java API client coexists with a 7.x HLRC work fine - val elasticsearchVersion = "8.10.0" + val elasticsearchVersion = "8.17.0" val jacksonVersion = "2.17.0" val openTelemetryVersion = "1.29.0" @@ -208,6 +208,8 @@ dependencies { // https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/java-rest-low.html api("org.elasticsearch.client", "elasticsearch-rest-client", elasticsearchVersion) + api("org.apache.httpcomponents.client5","httpclient5","5.4") + // Apache 2.0 // https://search.maven.org/artifact/com.google.code.findbugs/jsr305 api("com.google.code.findbugs:jsr305:3.0.2") @@ -271,6 +273,16 @@ dependencies { // Apache-2.0 // https://github.com/awaitility/awaitility testImplementation("org.awaitility", "awaitility", "4.2.0") + + // MIT + // https://github.com/mockito/mockito + testImplementation("org.mockito","mockito-core","5.12.0") + + // Apache-2.0 + // https://github.com/elastic/mocksocket + testImplementation("org.elasticsearch","mocksocket","1.2") + + } @@ -282,15 +294,16 @@ licenseReport { class SpdxReporter(val dest: File) : ReportRenderer { // License names to their SPDX identifier val spdxIds = mapOf( - "The Apache License, Version 2.0" to "Apache-2.0", - "Apache License, Version 2.0" to "Apache-2.0", - "The Apache Software License, Version 2.0" to "Apache-2.0", - "BSD Zero Clause License" to "0BSD", - "Eclipse Public License 2.0" to "EPL-2.0", - "Eclipse Public License v. 2.0" to "EPL-2.0", - "Eclipse Public License - v 2.0" to "EPL-2.0", - "GNU General Public License, version 2 with the GNU Classpath Exception" to "GPL-2.0 WITH Classpath-exception-2.0", - "COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0" to "CDDL-1.0" + "The Apache License, Version 2.0" to "Apache-2.0", + "Apache License, Version 2.0" to "Apache-2.0", + "The Apache Software License, Version 2.0" to "Apache-2.0", + "MIT License" to "MIT", + "BSD Zero Clause License" to "0BSD", + "Eclipse Public License 2.0" to "EPL-2.0", + "Eclipse Public License v. 2.0" to "EPL-2.0", + "Eclipse Public License - v 2.0" to "EPL-2.0", + "GNU General Public License, version 2 with the GNU Classpath Exception" to "GPL-2.0 WITH Classpath-exception-2.0", + "COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0" to "CDDL-1.0" ) private fun quote(str: String): String { @@ -311,7 +324,11 @@ class SpdxReporter(val dest: File) : ReportRenderer { val depName = dep.group + ":" + dep.name val info = LicenseDataCollector.multiModuleLicenseInfo(dep) - val depUrl = info.moduleUrls.first() + val depUrl = if (depName.startsWith("org.apache.httpcomponents")) { + "https://hc.apache.org/" + } else { + info.moduleUrls.first() + } val licenseIds = info.licenses.mapNotNull { license -> license.name?.let { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java index 88474f4c1..30f7ae8b5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java @@ -146,6 +146,7 @@ import co.elastic.clients.elasticsearch.watcher.ElasticsearchWatcherAsyncClient; import co.elastic.clients.elasticsearch.xpack.ElasticsearchXpackAsyncClient; import co.elastic.clients.transport.ElasticsearchTransport; +import co.elastic.clients.transport.ElasticsearchTransportConfig; import co.elastic.clients.transport.Endpoint; import co.elastic.clients.transport.JsonEndpoint; import co.elastic.clients.transport.Transport; @@ -179,6 +180,23 @@ */ public class ElasticsearchAsyncClient extends ApiClient { + /** + * Creates a client from a {@link ElasticsearchTransportConfig.Default}} + * configuration created with an inline lambda expression. + */ + public static ElasticsearchAsyncClient of( + Function fn) { + return new ElasticsearchAsyncClient( + fn.apply(new ElasticsearchTransportConfig.Builder()).build().buildTransport()); + } + + /** + * Creates a client from an {@link ElasticsearchTransportConfig}. + */ + public ElasticsearchAsyncClient(ElasticsearchTransportConfig config) { + this(config.buildTransport()); + } + public ElasticsearchAsyncClient(ElasticsearchTransport transport) { super(transport, null); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java index 3543a59d7..271d40c24 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java @@ -147,6 +147,7 @@ import co.elastic.clients.elasticsearch.watcher.ElasticsearchWatcherClient; import co.elastic.clients.elasticsearch.xpack.ElasticsearchXpackClient; import co.elastic.clients.transport.ElasticsearchTransport; +import co.elastic.clients.transport.ElasticsearchTransportConfig; import co.elastic.clients.transport.Endpoint; import co.elastic.clients.transport.JsonEndpoint; import co.elastic.clients.transport.Transport; @@ -180,6 +181,22 @@ */ public class ElasticsearchClient extends ApiClient { + /** + * Creates a client from a {@link ElasticsearchTransportConfig.Default}} + * configuration created with an inline lambda expression. + */ + public static ElasticsearchClient of( + Function fn) { + return new ElasticsearchClient(fn.apply(new ElasticsearchTransportConfig.Builder()).build().buildTransport()); + } + + /** + * Creates a client from an {@link ElasticsearchTransportConfig}. + */ + public ElasticsearchClient(ElasticsearchTransportConfig config) { + this(config.buildTransport()); + } + public ElasticsearchClient(ElasticsearchTransport transport) { super(transport, null); } diff --git a/java-client/src/main/java/co/elastic/clients/transport/DefaultTransportOptions.java b/java-client/src/main/java/co/elastic/clients/transport/DefaultTransportOptions.java index cdc35639e..9714b863c 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/DefaultTransportOptions.java +++ b/java-client/src/main/java/co/elastic/clients/transport/DefaultTransportOptions.java @@ -100,6 +100,12 @@ public Function, Boolean> onWarnings() { return onWarnings; } + @Override + public void updateToken(String token) { + this.headers.put("Authorization", "Bearer " + token); + } + + @Override public boolean keepResponseBodyOnException() { return keepResponseBodyOnException; diff --git a/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportBase.java b/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportBase.java index 26237501e..8d561f506 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportBase.java +++ b/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportBase.java @@ -77,15 +77,9 @@ public abstract class ElasticsearchTransportBase implements ElasticsearchTranspo } } - private final TransportHttpClient httpClient; - private final Instrumentation instrumentation; - - @Override - public void close() throws IOException { - httpClient.close(); - } - - private final JsonpMapper mapper; + protected final TransportHttpClient httpClient; + protected final Instrumentation instrumentation; + protected final JsonpMapper mapper; protected final TransportOptions transportOptions; public ElasticsearchTransportBase(TransportHttpClient httpClient, TransportOptions options, @@ -113,6 +107,20 @@ public ElasticsearchTransportBase( this.instrumentation = instrumentation; } + /** INTERNAL, used only for tests. */ + protected ElasticsearchTransportBase cloneWith( + @Nullable TransportOptions options, + @Nullable JsonpMapper mapper, + @Nullable Instrumentation instrumentation + ) { + throw new UnsupportedOperationException(); + } + + @Override + public void close() throws IOException { + httpClient.close(); + } + @Override public final JsonpMapper jsonpMapper() { return mapper; @@ -123,6 +131,10 @@ public final TransportOptions options() { return transportOptions; } + public TransportHttpClient httpClient() { + return httpClient; + } + @Override public final ResponseT performRequest( RequestT request, diff --git a/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportConfig.java b/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportConfig.java new file mode 100644 index 000000000..2d7b9ccb3 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/ElasticsearchTransportConfig.java @@ -0,0 +1,335 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport; + +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.jackson.JacksonJsonpMapper; +import co.elastic.clients.transport.instrumentation.Instrumentation; +import co.elastic.clients.transport.rest5_client.Rest5ClientTransport; +import co.elastic.clients.transport.rest_client.RestClientTransport; + +import javax.annotation.Nullable; +import javax.net.ssl.SSLContext; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +import java.util.function.Function; + +public abstract class ElasticsearchTransportConfig { + protected List hosts; + protected String username; + protected String password; + protected String token; + protected String apiKey; + protected boolean useCompression = false; + protected SSLContext sslContext; + protected JsonpMapper mapper; + protected TransportOptions transportOptions; + protected Instrumentation instrumentation; + + public List hosts() { + return hosts; + } + + @Nullable + public String username() { + return username; + } + + @Nullable + public String password() { + return password; + } + + @Nullable + public String token() { + return token; + } + + @Nullable + public String apiKey() { + return apiKey; + } + + public boolean useCompression() { + return useCompression; + } + + @Nullable + public SSLContext sslContext() { + return sslContext; + } + + public JsonpMapper mapper() { + return mapper; + } + + public TransportOptions transportOptions() { + return transportOptions; + } + + @Nullable + public Instrumentation instrumentation() { + return instrumentation; + } + + public abstract ElasticsearchTransport buildTransport(); + + //--------------------------------------------------------------------------------------------- + + /** + * Default configuration that can be used with any transport implementation. If no transport + * factory is defined with {@link #transportFactory}, a {@link Rest5ClientTransport} is used. + */ + public static class Default extends ElasticsearchTransportConfig { + + protected Function transportFactory; + + public Function transportFactory() { + return transportFactory; + } + + public ElasticsearchTransport buildTransport() { + return this.transportFactory.apply(this); + } + } + + /** + * Builder for {@link Default} transport configurations. + */ + public static class Builder extends ElasticsearchTransportConfig.AbstractBuilder { + private final Default dconfig; + + public Builder() { + this(new Default()); + } + + private Builder(Default dconfig) { + super(dconfig); + // Typed accessor to this.config + this.dconfig = dconfig; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Should we use the legacy http implementation based on Elasticsearch's Low Level Rest Client? + *

+ * Shortcut for {@code transportFactory(RestClientTransport::new)}. + * + * @see RestClientTransport + */ + public Builder useLegacyTransport(boolean useLegacyTransport) { + if (useLegacyTransport) { + dconfig.transportFactory = RestClientTransport::new; + } else { + dconfig.transportFactory = null; + } + return this; + } + + /** + * Defines the factory function to create a transport with this configuration. + * + * @see #buildTransport() + */ + public Builder transportFactory(Function factory) { + dconfig.transportFactory = factory; + return this; + } + + @Override + public Default build() { + Default config = (Default) super.build(); + + // Default transport implementation + if (config.transportFactory == null) { + config.transportFactory = Rest5ClientTransport::new; + } + + return config; + } + } + + //--------------------------------------------------------------------------------------------- + + public abstract static class AbstractBuilder> { + protected final ElasticsearchTransportConfig config; + + public AbstractBuilder(ElasticsearchTransportConfig config) { + this.config = config; + } + + protected abstract BuilderT self(); + + protected ElasticsearchTransportConfig build() { + + //---- Validate credentials + + if (config.username != null) { + checkNull(config.token, "token", "username/password"); + checkNull(config.apiKey, "API key", "username/password"); + if (config.password == null) { + throw new IllegalArgumentException("password required with username"); + } + } else if (config.password != null) { + throw new IllegalArgumentException("username required with password"); + } + + if (config.token != null) { + checkNull(config.apiKey, "API key", "token"); + checkNull(config.username, "username", "token"); + } + + if (config.apiKey != null) { + checkNull(config.token, "token", "API key"); + checkNull(config.username, "username", "API key"); + } + + //---- Validate other settings + + if (config.hosts() == null || config.hosts.isEmpty()) { + throw new IllegalArgumentException("hosts cannot be empty"); + } + + if (config.mapper == null) { + config.mapper = new JacksonJsonpMapper(); + } + + return config; + }; + + /** + * Elasticsearch host location + */ + public BuilderT host(String url) { + try { + config.hosts = List.of(new URI(url)); + } catch (URISyntaxException e) { + // Avoid requiring a checked exception in the builder + throw new RuntimeException(e); + } + return self(); + } + + /** + * Elasticsearch host location + */ + public BuilderT host(URI url) { + config.hosts = List.of(url); + return self(); + } + + /** + * Elasticsearch hosts locations + */ + public BuilderT hosts(List hosts) { + config.hosts = hosts; + return self(); + } + + /** + * Set the username and password to use to connect to Elasticsearch. + */ + public BuilderT usernameAndPassword(String username, String password) { + config.username = username; + config.password = password; + return self(); + } + + /** + * Set the bearer token to use to authenticate to Elasticsearch. + */ + public BuilderT token(String token) { + config.token = token; + return self(); + } + + /** + * Set the API key to use to authenticate to Elasticsearch. + */ + public BuilderT apiKey(String apiKey) { + config.apiKey = apiKey; + return self(); + } + + /** + * Should request and response body compression be used? + */ + public BuilderT useCompression(boolean useCompression) { + this.config.useCompression = useCompression; + return self(); + } + + /** + * SSL context to use for https connections. See {@link co.elastic.clients.transport.TransportUtils} to create it + * from a certificate file or a certificate fingerprint. + * + * @see co.elastic.clients.transport.TransportUtils + */ + public BuilderT sslContext(SSLContext sslContext) { + config.sslContext = sslContext; + return self(); + } + + /** + * The JSON mapper to use. Defaults to {@link JacksonJsonpMapper}. + */ + public BuilderT jsonMapper(JsonpMapper mapper) { + config.mapper = mapper; + return self(); + } + + /** + * Transport instrumentation to log client traffic. See + * {@link co.elastic.clients.transport.instrumentation.OpenTelemetryForElasticsearch} for OpenTelemetry integration. + */ + public BuilderT instrumentation(Instrumentation instrumentation) { + config.instrumentation = instrumentation; + return self(); + } + + /** + * Lower level transport options. + */ + public BuilderT transportOptions(TransportOptions transportOptions) { + config.transportOptions = transportOptions; + return self(); + } + + /** + * Lower level transport options. This method adds options to the ones already set, if any. + */ + public BuilderT transportOptions(Function fn) { + var builder = config.transportOptions == null ? new DefaultTransportOptions.Builder() : config.transportOptions.toBuilder(); + config.transportOptions = fn.apply(builder).build(); + return self(); + } + + protected void checkNull(Object value, String name, String other) { + if (value != null) { + throw new IllegalArgumentException("Cannot set both " + other + " and " + name + "."); + } + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/Transport.java b/java-client/src/main/java/co/elastic/clients/transport/Transport.java index e08263379..7a0a19e47 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/Transport.java +++ b/java-client/src/main/java/co/elastic/clients/transport/Transport.java @@ -26,6 +26,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.concurrent.CompletableFuture; +import java.util.function.Function; /** * The transport layer that allows {@link ApiClient}s to send requests. @@ -51,4 +52,24 @@ CompletableFuture performRequestAsync( * {@link #performRequestAsync(Object, Endpoint, TransportOptions)}; */ TransportOptions options(); + + /** + * Clone this transport with new options. + * + * @throws UnsupportedOperationException + */ + default Transport withOptions(@Nullable TransportOptions options) { + throw new UnsupportedOperationException(); + } + + /** + * Clone this transport with additional options. The lambda expression is provided an options builder + * initialized with the transport's current options. + * + * @param fn a lambda expression that takes the current options as input + * @throws UnsupportedOperationException + */ + default Transport withOptions(Function fn) { + return withOptions(fn.apply(options().toBuilder()).build()); + } } diff --git a/java-client/src/main/java/co/elastic/clients/transport/TransportOptions.java b/java-client/src/main/java/co/elastic/clients/transport/TransportOptions.java index 9cbbdd40d..f5603b9b3 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/TransportOptions.java +++ b/java-client/src/main/java/co/elastic/clients/transport/TransportOptions.java @@ -38,6 +38,8 @@ public interface TransportOptions { Function, Boolean> onWarnings(); + void updateToken(String token); + /** * If {@code true}, the response body in {@code TransportException.response().body()} is guaranteed to be * replayable (i.e. buffered), even if the original response was streamed. This allows inspecting the diff --git a/java-client/src/main/java/co/elastic/clients/transport/TransportUtils.java b/java-client/src/main/java/co/elastic/clients/transport/TransportUtils.java index 72e6832cb..add9e1157 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/TransportUtils.java +++ b/java-client/src/main/java/co/elastic/clients/transport/TransportUtils.java @@ -26,6 +26,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; +import java.security.GeneralSecurityException; import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; @@ -144,4 +145,40 @@ public X509Certificate[] getAcceptedIssuers() { throw new RuntimeException(e); } } + + /** + * Returns an insecure SSLContext that will accept any server certificate. + *

+ * Use with care as it allows man-in-the-middle attacks. + */ + public static SSLContext insecureSSLContext() { + SSLContext result; + + X509TrustManager trustManager = new X509TrustManager() { + @Override + public void checkClientTrusted(X509Certificate[] certs, String authType) { + // Accept anything + } + + @Override + public void checkServerTrusted(X509Certificate[] certs, String authType) { + // Accept anything + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + }; + + try { + result = SSLContext.getInstance("SSL"); + result.init(null, new X509TrustManager[] { trustManager }, null); + } catch (GeneralSecurityException e) { + // An exception here means SSL is not supported, which is unlikely + throw new RuntimeException(e); + } + + return result; + } } diff --git a/java-client/src/main/java/co/elastic/clients/transport/http/TransportHttpClient.java b/java-client/src/main/java/co/elastic/clients/transport/http/TransportHttpClient.java index 74ce8bd7a..4a8b3ffa3 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/http/TransportHttpClient.java +++ b/java-client/src/main/java/co/elastic/clients/transport/http/TransportHttpClient.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.net.URI; import java.nio.ByteBuffer; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -90,78 +89,6 @@ default TransportOptions createOptions(@Nullable TransportOptions options) { */ void close() throws IOException; - /** - * A node/host to send requests to. - */ - class Node { - private final URI uri; - private final Set roles; - private final Map attributes; - - /** - * Create a node with its URI, roles and attributes. - *

- * If the URI doesn't end with a '{@code /}', then one is added. - * - * @param uri the node's URI - * @param roles the node's roles (such as "master", "ingest", etc). This can be used for routing decisions by multi-node - * implementations. - * @param attributes the node's attributes. This can be used for routing decisions by multi-node implementations. - */ - public Node(URI uri, Set roles, Map attributes) { - if (!uri.isAbsolute()) { - throw new IllegalArgumentException("Node URIs must be absolute: " + uri); - } - - if (!uri.getRawPath().endsWith("/")) { - uri = uri.resolve(uri.getRawPath() + "/"); - } - - this.uri = uri; - this.roles = roles; - this.attributes = attributes; - } - - public Node(URI uri) { - this(uri, Collections.emptySet(), Collections.emptyMap()); - } - - public Node(String uri) { - this(URI.create(uri), Collections.emptySet(), Collections.emptyMap()); - } - - /** - * The URI of this node. This is an absolute URL with a path ending with a "/". - */ - public URI uri() { - return this.uri; - } - - @Override - public String toString() { - return uri.toString(); - } - - /** - * Two nodes are considered equal if their URIs are equal. Roles and attributes are ignored. - */ - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof Node)) return false; - Node node = (Node) o; - return Objects.equals(uri, node.uri); - } - - /** - * A node's hash code is that of its URI. Roles and attributes are ignored. - */ - @Override - public int hashCode() { - return Objects.hash(uri); - } - } - /** * An http request. */ @@ -258,4 +185,110 @@ interface Response extends Closeable { */ void close() throws IOException; } + + /** + * A node/host to send requests to. + */ + class Node { + private final URI uri; + private final String name; + private final String version; + private final Set roles; + private final Map attributes; + + /** + * Create a node with its URI, roles and attributes. + *

+ * If the URI doesn't end with a '{@code /}', then one is added. + * + * @param uri the node's URI + * @param name the node name/identifier + * @param version the node's version, if known + * @param roles the node's roles, such as "master", "ingest", etc. + * @param attributes the node's attributes. This can be used for routing decisions by multi-node implementations. + */ + public Node( + URI uri, + @Nullable String name, + @Nullable String version, + @Nullable Set roles, + @Nullable Map attributes + ) { + + if (!uri.isAbsolute()) { + throw new IllegalArgumentException("Node URIs must be absolute: " + uri); + } + + if (!uri.getRawPath().endsWith("/")) { + uri = uri.resolve(uri.getRawPath() + "/"); + } + + this.uri = uri; + this.name = name; + this.version = version; + this.roles = roles; + this.attributes = attributes; + } + + public Node(URI uri) { + this(uri, null, null, null, null); + } + + public Node(String uri) { + this(URI.create(uri), null, null, null, null); + } + + /** + * The URI of this node. This is an absolute URL with a path ending with a "/". + */ + public URI uri() { + return this.uri; + } + + /** + * The node name/identifier + */ + @Nullable + public String name() { + return name; + } + + @Nullable + public String version() { + return version; + } + + @Nullable + public Set roles() { + return roles; + } + + @Nullable + public Map attributes() { + return attributes; + } + + @Override + public String toString() { + return uri.toString(); + } + + /** + * Two nodes are considered equal if their URIs are equal. Other properties are ignored. + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Node node)) return false; + return Objects.equals(uri, node.uri); + } + + /** + * A node's hash code is that of its URI. Other properties are ignored. + */ + @Override + public int hashCode() { + return Objects.hash(uri); + } + } } diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/MultiBufferEntity.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/MultiBufferEntity.java new file mode 100644 index 000000000..674e85da8 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/MultiBufferEntity.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.util.NoCopyByteArrayOutputStream; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.AbstractHttpEntity; +import org.apache.hc.core5.http.nio.AsyncDataProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.util.Iterator; + +/** + * An HTTP entity based on a sequence of byte buffers. + */ +class MultiBufferEntity extends AbstractHttpEntity implements AsyncDataProducer { + + private final Iterable buffers; + + private Iterator iterator; + private volatile ByteBuffer currentBuffer; + + MultiBufferEntity(Iterable buffers, ContentType contentType) { + super(contentType,null,true); + this.buffers = buffers; + init(); + } + + @Override + public void close() throws IOException { + // Reset state, the request may be retried + init(); + } + + private void init() { + this.iterator = buffers.iterator(); + if (this.iterator.hasNext()) { + this.currentBuffer = this.iterator.next().duplicate(); + } else { + this.currentBuffer = null; + } + } + + @Override + public boolean isRepeatable() { + return true; + } + + @Override + public long getContentLength() { + // Use chunked encoding + return -1; + } + + @Override + public boolean isStreaming() { + return false; + } + + @Override + public InputStream getContent() throws IOException, UnsupportedOperationException { + NoCopyByteArrayOutputStream baos = new NoCopyByteArrayOutputStream(); + writeTo(baos); + return baos.asInputStream(); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + WritableByteChannel channel = Channels.newChannel(out); + for (ByteBuffer buffer: buffers) { + channel.write(buffer.duplicate()); + } + } + + @Override + public int available() { + return currentBuffer.remaining(); + } + + @Override + public void produce(DataStreamChannel channel) throws IOException { + if (currentBuffer == null) { + channel.endStream(); + return; + } + + channel.write(currentBuffer); + + if (!currentBuffer.hasRemaining()) { + if (iterator.hasNext()) { + currentBuffer = iterator.next().duplicate(); + } else { + currentBuffer = null; + channel.endStream(); + } + } + } + + @Override + public void releaseResources() { + init(); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientHttpClient.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientHttpClient.java new file mode 100644 index 000000000..a8649fe65 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientHttpClient.java @@ -0,0 +1,285 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.transport.TransportOptions; +import co.elastic.clients.transport.http.HeaderMap; +import co.elastic.clients.transport.http.TransportHttpClient; +import co.elastic.clients.transport.rest5_client.low_level.Cancellable; +import co.elastic.clients.transport.rest5_client.low_level.ResponseListener; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.util.BinaryData; +import co.elastic.clients.util.NoCopyByteArrayOutputStream; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HeaderElement; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.BasicHeaderValueParser; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.AbstractList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; + +public class Rest5ClientHttpClient implements TransportHttpClient { + + private static final ConcurrentHashMap ContentTypeCache = new ConcurrentHashMap<>(); + + /** + * The {@code Future} implementation returned by async requests. + * It wraps the Rest5Client's cancellable and propagates cancellation. + */ + private static class RequestFuture extends CompletableFuture { + private volatile Cancellable cancellable; + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + boolean cancelled = super.cancel(mayInterruptIfRunning); + if (cancelled && cancellable != null) { + cancellable.cancel(); + } + return cancelled; + } + } + + private final Rest5Client restClient; + + public Rest5ClientHttpClient(Rest5Client restClient) { + this.restClient = restClient; + } + + /** + * Returns the underlying low level Rest Client used by this transport. + */ + public Rest5Client restClient() { + return this.restClient; + } + + @Override + public Rest5ClientOptions createOptions(@Nullable TransportOptions options) { + return Rest5ClientOptions.of(options); + } + + @Override + public Response performRequest(String endpointId, @Nullable Node node, Request request, + TransportOptions options) throws IOException { + Rest5ClientOptions rcOptions = Rest5ClientOptions.of(options); + co.elastic.clients.transport.rest5_client.low_level.Request restRequest = createRestRequest(request, rcOptions); + co.elastic.clients.transport.rest5_client.low_level.Response restResponse = restClient.performRequest(restRequest); + return new RestResponse(restResponse); + } + + @Override + public CompletableFuture performRequestAsync( + String endpointId, @Nullable Node node, Request request, TransportOptions options + ) { + + RequestFuture future = new RequestFuture<>(); + co.elastic.clients.transport.rest5_client.low_level.Request restRequest; + + try { + Rest5ClientOptions rcOptions = Rest5ClientOptions.of(options); + restRequest = createRestRequest(request, rcOptions); + } catch (Throwable thr) { + // Terminate early + future.completeExceptionally(thr); + return future; + } + + future.cancellable = restClient.performRequestAsync(restRequest, new ResponseListener() { + @Override + public void onSuccess(co.elastic.clients.transport.rest5_client.low_level.Response response) { + future.complete(new RestResponse(response)); + } + + @Override + public void onFailure(Exception exception) { + future.completeExceptionally(exception); + } + }); + + return future; + } + + @Override + public void close() throws IOException { + this.restClient.close(); + } + + private co.elastic.clients.transport.rest5_client.low_level.Request createRestRequest(Request request, Rest5ClientOptions options) { + co.elastic.clients.transport.rest5_client.low_level.Request clientReq = + new co.elastic.clients.transport.rest5_client.low_level.Request(request.method(), request.path()); + + Iterable body = request.body(); + + Map requestHeaders = request.headers(); + if (!requestHeaders.isEmpty()) { + + int headerCount = requestHeaders.size(); + if ((body == null && headerCount != 3) || headerCount != 4) { + if (options == null) { + options = Rest5ClientOptions.initialOptions(); + } + + Rest5ClientOptions.Builder builder = options.toBuilder(); + for (Map.Entry header : requestHeaders.entrySet()) { + builder.setHeader(header.getKey(), header.getValue()); + } + // Original option headers have precedence + for (Map.Entry header : options.headers()) { + builder.setHeader(header.getKey(), header.getValue()); + } + options = builder.build(); + } + } + + if (options != null) { + clientReq.setOptions(options.restClientRequestOptions()); + } + + clientReq.addParameters(request.queryParams()); + + if (body != null) { + ContentType ct = null; + String ctStr; + if ((ctStr = requestHeaders.get(HeaderMap.CONTENT_TYPE)) != null) { + ct = ContentTypeCache.computeIfAbsent(ctStr, ContentType::parse); + } + clientReq.setEntity(new MultiBufferEntity(body, ct)); + } + + return clientReq; + } + + static class RestResponse implements Response { + private final co.elastic.clients.transport.rest5_client.low_level.Response restResponse; + + RestResponse(co.elastic.clients.transport.rest5_client.low_level.Response restResponse) { + this.restResponse = restResponse; + } + + @Override + public Node node() { + return new Node(restResponse.getHost().toURI()); + } + + @Override + public int statusCode() { + return restResponse.getStatusCode(); + } + + @Override + public String header(String name) { + return restResponse.getHeader(name); + } + + @Override + public List headers(String name) { + Header[] headers = restResponse.getHeaders(); + for (int i = 0; i < headers.length; i++) { + Header header = headers[i]; + if (header.getName().equalsIgnoreCase(name)) { + BasicHeaderValueParser elementParser = new BasicHeaderValueParser(); + HeaderElement[] elements = elementParser.parseElements(header.getValue(), null); + return new AbstractList<>() { + @Override + public String get(int index) { + return elements[index].getValue(); + } + + @Override + public int size() { + return elements.length; + } + }; + } + } + return Collections.emptyList(); + } + + @Nullable + @Override + public BinaryData body() throws IOException { + HttpEntity entity = restResponse.getEntity(); + return entity == null ? null : new HttpEntityBinaryData(restResponse.getEntity()); + } + + @Nullable + @Override + public co.elastic.clients.transport.rest5_client.low_level.Response originalResponse() { + return this.restResponse; + } + + @Override + public void close() throws IOException { + EntityUtils.consume(restResponse.getEntity()); + } + } + + private static class HttpEntityBinaryData implements BinaryData { + private final HttpEntity entity; + + HttpEntityBinaryData(HttpEntity entity) { + this.entity = entity; + } + + @Override + public String contentType() { + String h = entity.getContentType(); + return h == null ? "application/octet-stream" : h; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + entity.writeTo(out); + } + + @Override + public ByteBuffer asByteBuffer() throws IOException { + NoCopyByteArrayOutputStream out = new NoCopyByteArrayOutputStream(); + entity.writeTo(out); + return out.asByteBuffer(); + } + + @Override + public InputStream asInputStream() throws IOException { + return entity.getContent(); + } + + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + @Override + public long size() { + long len = entity.getContentLength(); + return len < 0 ? -1 : entity.getContentLength(); + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptions.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptions.java new file mode 100644 index 000000000..0e553088a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptions.java @@ -0,0 +1,267 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.transport.TransportOptions; +import co.elastic.clients.transport.Version; +import co.elastic.clients.transport.http.HeaderMap; +import co.elastic.clients.transport.rest5_client.low_level.RequestOptions; +import co.elastic.clients.transport.rest5_client.low_level.WarningsHandler; +import co.elastic.clients.util.LanguageRuntimeVersions; +import co.elastic.clients.util.VisibleForTesting; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.util.VersionInfo; + +import javax.annotation.Nullable; +import java.util.AbstractMap; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class Rest5ClientOptions implements TransportOptions { + + private final RequestOptions options; + + boolean keepResponseBodyOnException; + + @VisibleForTesting + static final String CLIENT_META_VALUE = getClientMeta(); + @VisibleForTesting + static final String USER_AGENT_VALUE = getUserAgent(); + + public static Rest5ClientOptions of(@Nullable TransportOptions options) { + if (options == null) { + return initialOptions(); + } + + if (options instanceof Rest5ClientOptions) { + return (Rest5ClientOptions)options; + } + + final Builder builder = new Builder(RequestOptions.DEFAULT.toBuilder()); + options.headers().forEach(h -> builder.addHeader(h.getKey(), h.getValue())); + options.queryParameters().forEach(builder::setParameter); + builder.onWarnings(options.onWarnings()); + return builder.build(); + } + + public Rest5ClientOptions(RequestOptions options, boolean keepResponseBodyOnException) { + this.keepResponseBodyOnException = keepResponseBodyOnException; + this.options = addBuiltinHeaders(options.toBuilder()).build(); + } + + /** + * Get the wrapped Rest Client request options + */ + public RequestOptions restClientRequestOptions() { + return this.options; + } + + @Override + public Collection> headers() { + return options.getHeaders().stream() + .map(h -> new AbstractMap.SimpleImmutableEntry<>(h.getName(), h.getValue())) + .collect(Collectors.toList()); + } + + @Override + public Map queryParameters() { + return options.getParameters(); + } + + /** + * Called if there are warnings to determine if those warnings should fail the request. + */ + @Override + public Function, Boolean> onWarnings() { + final WarningsHandler handler = options.getWarningsHandler(); + if (handler == null) { + return null; + } + + return warnings -> options.getWarningsHandler().warningsShouldFailRequest(warnings); + } + + @Override + public void updateToken(String token) { + options.updateToken(token); + } + + @Override + public boolean keepResponseBodyOnException() { + return this.keepResponseBodyOnException; + } + + @Override + public Builder toBuilder() { + return new Builder(options.toBuilder()); + } + + public static class Builder implements TransportOptions.Builder { + + private RequestOptions.Builder builder; + + private boolean keepResponseBodyOnException; + + public Builder(RequestOptions.Builder builder) { + this.builder = builder; + } + + /** + * Get the wrapped Rest Client request options builder. + */ + public RequestOptions.Builder restClientRequestOptionsBuilder() { + return this.builder; + } + + @Override + public TransportOptions.Builder addHeader(String name, String value) { + if (name.equalsIgnoreCase(HeaderMap.CLIENT_META)) { + // Not overridable + return this; + } + if (name.equalsIgnoreCase(HeaderMap.USER_AGENT)) { + // We must remove our own user-agent from the options, or we'll end up with multiple values for the header + builder.removeHeader(HeaderMap.USER_AGENT); + } + builder.addHeader(name, value); + return this; + } + + @Override + public TransportOptions.Builder setHeader(String name, String value) { + if (name.equalsIgnoreCase(HeaderMap.CLIENT_META)) { + // Not overridable + return this; + } + builder.removeHeader(name).addHeader(name, value); + return this; + } + + @Override + public TransportOptions.Builder removeHeader(String name) { + builder.removeHeader(name); + return this; + } + + @Override + public TransportOptions.Builder setParameter(String name, String value) { + // Should be remove and add, but we can't remove. + builder.addParameter(name, value); + return this; + } + + @Override + public TransportOptions.Builder removeParameter(String name) { + throw new UnsupportedOperationException("This implementation does not support removing parameters"); + } + + /** + * Called if there are warnings to determine if those warnings should fail the request. + */ + @Override + public TransportOptions.Builder onWarnings(Function, Boolean> listener) { + if (listener == null) { + builder.setWarningsHandler(null); + } else { + builder.setWarningsHandler(w -> { + if (w != null && !w.isEmpty()) { + return listener.apply(w); + } else { + return false; + } + }); + } + + return this; + } + + @Override + public TransportOptions.Builder keepResponseBodyOnException(boolean value) { + this.keepResponseBodyOnException = value; + return this; + } + + @Override + public Rest5ClientOptions build() { + return new Rest5ClientOptions(addBuiltinHeaders(builder).build(), keepResponseBodyOnException); + } + } + + static Rest5ClientOptions initialOptions() { + return new Rest5ClientOptions(RequestOptions.DEFAULT, false); + } + + private static RequestOptions.Builder addBuiltinHeaders(RequestOptions.Builder builder) { + builder.removeHeader(HeaderMap.CLIENT_META); + builder.addHeader(HeaderMap.CLIENT_META, CLIENT_META_VALUE); + if (builder.getHeaders().stream().noneMatch(h -> h.getName().equalsIgnoreCase(HeaderMap.USER_AGENT))) { + builder.addHeader(HeaderMap.USER_AGENT, USER_AGENT_VALUE); + } + if (builder.getHeaders().stream().noneMatch(h -> h.getName().equalsIgnoreCase(HeaderMap.ACCEPT))) { + builder.addHeader(HeaderMap.ACCEPT, Rest5ClientTransport.JSON_CONTENT_TYPE); + } + + return builder; + } + + private static String getUserAgent() { + return String.format( + Locale.ROOT, + "elastic-java/%s (Java/%s)", + Version.VERSION == null ? "Unknown" : Version.VERSION.toString(), + System.getProperty("java.version") + ); + } + + private static String getClientMeta() { + VersionInfo httpClientVersion = null; + try { + httpClientVersion = VersionInfo.loadVersionInfo( + "org.apache.http.nio.client", + HttpAsyncClientBuilder.class.getClassLoader() + ); + } catch (Exception e) { + // Keep unknown + } + + // Use a single 'p' suffix for all prerelease versions (snapshot, beta, etc). + String metaVersion = Version.VERSION == null ? "" : Version.VERSION.toString(); + int dashPos = metaVersion.indexOf('-'); + if (dashPos > 0) { + metaVersion = metaVersion.substring(0, dashPos) + "p"; + } + + // service, language, transport, followed by additional information + return "es=" + + metaVersion + + ",jv=" + + System.getProperty("java.specification.version") + + ",t=" + + metaVersion + + ",hl=2" + + ",hc=" + + (httpClientVersion == null ? "" : httpClientVersion.getRelease()) + + LanguageRuntimeVersions.getRuntimeMetadata(); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientTransport.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientTransport.java new file mode 100644 index 000000000..3b0ad7fe6 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/Rest5ClientTransport.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.transport.ElasticsearchTransportBase; +import co.elastic.clients.transport.Transport; +import co.elastic.clients.transport.ElasticsearchTransportConfig; +import co.elastic.clients.transport.TransportOptions; +import co.elastic.clients.transport.instrumentation.Instrumentation; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.Rest5ClientBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; + +import javax.annotation.Nullable; +import java.util.Base64; + +public class Rest5ClientTransport extends ElasticsearchTransportBase { + + private final Rest5Client restClient; + + public Rest5ClientTransport(ElasticsearchTransportConfig config) { + this( + buildRest5Client(config), + config.mapper(), + Rest5ClientOptions.of(config.transportOptions()), + config.instrumentation() + ); + } + + public Rest5ClientTransport(Rest5Client restClient, JsonpMapper jsonpMapper) { + this(restClient, jsonpMapper, null); + } + + public Rest5ClientTransport(Rest5Client restClient, JsonpMapper jsonpMapper, Rest5ClientOptions options) { + super(new Rest5ClientHttpClient(restClient), options, jsonpMapper, null); + this.restClient = restClient; + } + + public Rest5ClientTransport(Rest5Client restClient, JsonpMapper jsonpMapper, Rest5ClientOptions options, + Instrumentation instrumentation) { + super(new Rest5ClientHttpClient(restClient), options, jsonpMapper, instrumentation); + this.restClient = restClient; + } + + private static Rest5Client buildRest5Client(ElasticsearchTransportConfig config) { + Rest5ClientBuilder restClientBuilder = Rest5Client.builder(config.hosts()); + + if (config.username() != null && config.password() != null) { + var cred = Base64.getEncoder().encodeToString((config.username() + ":" + config.password()).getBytes()); + restClientBuilder.setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "Basic " + cred) + }); + } else if (config.apiKey() != null) { + restClientBuilder.setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "ApiKey " + config.apiKey()) + }); + } else if (config.token() != null) { + restClientBuilder.setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "Bearer " + config.token()) + }); + } + + if (config.sslContext() != null) { + restClientBuilder.setSSLContext(config.sslContext()); + } + + restClientBuilder.setCompressionEnabled(config.useCompression()); + + return restClientBuilder.build(); + } + + public Rest5Client restClient() { + return this.restClient; + } + + @Override + public Transport withOptions(@Nullable TransportOptions options) { + return new Rest5ClientTransport(restClient, mapper, Rest5ClientOptions.of(options), instrumentation); + } + + /** INTERNAL, used only for tests. */ + @Override + protected ElasticsearchTransportBase cloneWith(TransportOptions options, JsonpMapper mapper, Instrumentation instrumentation) { + return new Rest5ClientTransport( + restClient, + mapper != null ? mapper : this.mapper, + Rest5ClientOptions.of(options != null ? options : this.transportOptions), + instrumentation != null ? instrumentation : this.instrumentation + ); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncEntityProducer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncEntityProducer.java new file mode 100644 index 000000000..060c71fea --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncEntityProducer.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; +import org.apache.hc.core5.util.Args; + +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import static co.elastic.clients.transport.rest5_client.low_level.Constants.DEFAULT_BUFFER_INITIAL_CAPACITY; + +/** + * Basic implementation of {@link AsyncEntityProducer} + */ +class BasicAsyncEntityProducer implements AsyncEntityProducer { + + private final HttpEntity entity; + private final ByteBuffer buffer; + private final AtomicReference channelRef; + private final AtomicReference exceptionRef; + private final String contentType; + private final boolean isChunked; + private boolean eof; + + /** + * Create new basic entity producer + */ + BasicAsyncEntityProducer(final HttpEntity entity, final int bufferSize) { + this.entity = Args.notNull(entity, "Http Entity"); + this.buffer = ByteBuffer.allocate(bufferSize); + this.channelRef = new AtomicReference<>(); + this.exceptionRef = new AtomicReference<>(); + this.contentType = entity.getContentType(); + this.isChunked = entity.isChunked(); + } + + /** + * Create new basic entity producer with default buffer limit of 100MB + */ + BasicAsyncEntityProducer(final HttpEntity entity) { + this(entity, DEFAULT_BUFFER_INITIAL_CAPACITY); + } + + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + @Override + public String getContentType() { + return this.contentType; + } + + @Override + public long getContentLength() { + return entity.getContentLength(); + } + + @Override + public int available() { + return Integer.MAX_VALUE; + } + + @Override + public String getContentEncoding() { + return entity.getContentEncoding(); + } + + @Override + public boolean isChunked() { + return this.isChunked; + } + + @Override + public Set getTrailerNames() { + return entity.getTrailerNames(); + } + + @Override + public void produce(final DataStreamChannel dataStream) throws IOException { + ReadableByteChannel channel = channelRef.get(); + if (channel == null) { + channel = Channels.newChannel(entity.getContent()); + channelRef.getAndSet(channel); + } + if (!eof) { + final int bytesRead = channel.read(buffer); + if (bytesRead < 0) { + eof = true; + } + } + if (buffer.position() > 0) { + ((Buffer) buffer).flip(); + dataStream.write(buffer); + buffer.compact(); + } + if (eof && buffer.position() == 0) { + dataStream.endStream(); + releaseResources(); + } + } + + @Override + public void failed(final Exception cause) { + if (exceptionRef.compareAndSet(null, cause)) { + releaseResources(); + } + } + + @Override + public void releaseResources() { + eof = false; + ReadableByteChannel channel = channelRef.getAndSet(null); + if (channel != null) { + try { + channel.close(); + } catch (IOException e) { + } + } + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumer.java new file mode 100644 index 000000000..39b3d0ee8 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumer.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; + +class BasicAsyncResponseConsumer extends AbstractAsyncResponseConsumer { + + private volatile BasicClassicHttpResponse finalResponse; + private volatile HttpResponse response; + private volatile HttpContext context; + + /** + * Creates a new instance of this consumer with the provided buffer limit + */ + BasicAsyncResponseConsumer(AbstractBinAsyncEntityConsumer consumer) { + super(consumer); + } + + @Override + public void informationResponse(HttpResponse response, HttpContext context) { + this.response = response; + this.context = context; + } + + @Override + protected BasicClassicHttpResponse buildResult(HttpResponse response, ByteArrayEntity entity, + ContentType contentType) { + finalResponse = new BasicClassicHttpResponse(response.getCode(), response.getReasonPhrase()); + finalResponse.setEntity(entity); + finalResponse.setHeaders(response.getHeaders()); + return finalResponse; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BufferedByteConsumer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BufferedByteConsumer.java new file mode 100644 index 000000000..63cee0af9 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/BufferedByteConsumer.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.util.ByteArrayBuffer; + +import java.nio.ByteBuffer; + +import static co.elastic.clients.transport.rest5_client.low_level.Constants.DEFAULT_BUFFER_INITIAL_CAPACITY; + +class BufferedByteConsumer extends AbstractBinAsyncEntityConsumer { + + private volatile ByteArrayBuffer buffer; + private final int limit; + private ContentType contentType; + + BufferedByteConsumer(int bufferLimit) { + super(); + if (bufferLimit <= 0) { + throw new IllegalArgumentException("Buffer limit must be greater than 0"); + } + this.limit = bufferLimit; + this.buffer = new ByteArrayBuffer(DEFAULT_BUFFER_INITIAL_CAPACITY); + } + + @Override + protected void streamStart(final ContentType contentType) { + this.contentType = contentType; + } + + @Override + protected int capacityIncrement() { + return limit; + } + + @Override + protected void data(final ByteBuffer src, final boolean endOfStream) throws ContentTooLongException { + if (src.capacity() > limit) { + throw new ContentTooLongException( + "entity content is too long [" + src.capacity() + "] for the configured buffer limit [" + limit + "]" + ); + } + buffer.append(src); + } + + @Override + protected ByteArrayEntity generateContent() { + return new ByteArrayEntity(buffer.toByteArray(), contentType); + } + + @Override + public void releaseResources() { + buffer.clear(); + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Cancellable.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Cancellable.java new file mode 100644 index 000000000..05bc7b749 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Cancellable.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.concurrent.CancellableDependency; + +import java.util.concurrent.CancellationException; + +/** + * Represents an operation that can be cancelled. + * Returned when executing async requests through + * {@link Rest5Client#performRequestAsync(Request, ResponseListener)}, so that the request + * can be cancelled if needed. Cancelling a request will result in calling + * {@link HttpUriRequestBase#abort()} on the underlying + * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. + * Note that cancelling a request does not automatically translate to aborting its execution on the server + * side, which needs to be specifically implemented in each API. + */ +public abstract class Cancellable implements org.apache.hc.core5.concurrent.Cancellable { + + /** + * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws + * {@link CancellationException}. + */ + abstract void runIfNotCancelled(Runnable runnable); + + static final Cancellable NO_OP = new Cancellable() { + @Override + public boolean cancel() { + throw new UnsupportedOperationException(); + } + + @Override + void runIfNotCancelled(Runnable runnable) { + throw new UnsupportedOperationException(); + } + }; + + static Cancellable fromRequest(HttpUriRequestBase httpRequest) { + return new RequestCancellable(httpRequest); + } + + private static class RequestCancellable extends Cancellable { + + private final CancellableDependency httpRequest; + + private RequestCancellable(HttpUriRequestBase httpRequest) { + this.httpRequest = httpRequest; + } + + public synchronized boolean cancel() { + return this.httpRequest.cancel(); + } + + /** + * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws + * {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case + * {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the + * {@link HttpUriRequestBase} by + * calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} + * method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as + * one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and + * artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset + * the request and send the next + * attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a + * request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized void runIfNotCancelled(Runnable runnable) { + if (this.httpRequest.isCancelled()) { + throw newCancellationException(); + } + runnable.run(); + } + } + + static CancellationException newCancellationException() { + return new CancellationException("request was cancelled"); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Constants.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Constants.java new file mode 100644 index 000000000..9cecb019d --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Constants.java @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +class Constants { + // default buffer limit is 100MB + public static final int DEFAULT_BUFFER_LIMIT = 100 * 1024 * 1024; + public static final int DEFAULT_BUFFER_INITIAL_CAPACITY = 8192; +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostState.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostState.java new file mode 100644 index 000000000..7024413f8 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostState.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +/** + * Holds the state of a dead connection to a host. Keeps track of how many failed attempts were performed and + * when the host should be retried (based on number of previous failed attempts). + * Class is immutable, a new copy of it should be created each time the state has to be changed. + */ +final class DeadHostState implements Comparable { + + private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); + static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + static final Supplier DEFAULT_TIME_SUPPLIER = System::nanoTime; + + private final int failedAttempts; + private final long deadUntilNanos; + private final Supplier timeSupplier; + + /** + * Build the initial dead state of a host. Useful when a working host stops functioning + * and needs to be marked dead after its first failure. In such case the host will be retried after a minute or so. + * + * @param timeSupplier a way to supply the current time and allow for unit testing + */ + DeadHostState(Supplier timeSupplier) { + this.failedAttempts = 1; + this.deadUntilNanos = timeSupplier.get() + MIN_CONNECTION_TIMEOUT_NANOS; + this.timeSupplier = timeSupplier; + } + + /** + * Build the dead state of a host given its previous dead state. Useful when a host has been failing before, hence + * it already failed for one or more consecutive times. The more failed attempts we register the longer we wait + * to retry that same host again. Minimum is 1 minute (for a node the only failed once created + * through {@link #DeadHostState(Supplier)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) + * + * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt + */ + DeadHostState(DeadHostState previousDeadHostState) { + long timeoutNanos = (long) Math.min( + MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), + MAX_CONNECTION_TIMEOUT_NANOS + ); + this.deadUntilNanos = previousDeadHostState.timeSupplier.get() + timeoutNanos; + this.failedAttempts = previousDeadHostState.failedAttempts + 1; + this.timeSupplier = previousDeadHostState.timeSupplier; + } + + /** + * Indicates whether it's time to retry to failed host or not. + * + * @return true if the host should be retried, false otherwise + */ + boolean shallBeRetried() { + return timeSupplier.get() - deadUntilNanos > 0; + } + + /** + * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried. + * After that the host should be retried. + */ + long getDeadUntilNanos() { + return deadUntilNanos; + } + + int getFailedAttempts() { + return failedAttempts; + } + + @Override + public int compareTo(DeadHostState other) { + if (timeSupplier != other.timeSupplier) { + throw new IllegalArgumentException( + "can't compare DeadHostStates holding different time suppliers as they may be based on different clocks" + ); + } + return Long.compare(deadUntilNanos, other.deadUntilNanos); + } + + @Override + public String toString() { + return "DeadHostState{" + + "failedAttempts=" + + failedAttempts + + ", deadUntilNanos=" + + deadUntilNanos + + ", timeSupplier=" + + timeSupplier + + '}'; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelector.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelector.java new file mode 100644 index 000000000..c979a3a80 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelector.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A {@link NodeSelector} that selects nodes that have a particular value + * for an attribute. + */ +public final class HasAttributeNodeSelector implements NodeSelector { + private final String key; + private final String value; + + public HasAttributeNodeSelector(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public void select(Iterable nodes) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Map> allAttributes = itr.next().getAttributes(); + if (allAttributes == null) continue; + List values = allAttributes.get(key); + if (values == null || !values.contains(value)) { + itr.remove(); + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HasAttributeNodeSelector that = (HasAttributeNodeSelector) o; + return Objects.equals(key, that.key) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public String toString() { + return key + "=" + value; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpAsyncResponseConsumerFactory.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpAsyncResponseConsumerFactory.java new file mode 100644 index 000000000..7730c3837 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpAsyncResponseConsumerFactory.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; + +import static co.elastic.clients.transport.rest5_client.low_level.Constants.DEFAULT_BUFFER_LIMIT; + +/** + * Factory used to create instances of {@link AsyncResponseConsumer}. Each request retry needs its own + * instance of the + * consumer object. Users can implement this interface and pass their own instance to the specialized + * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. + */ +public interface HttpAsyncResponseConsumerFactory { + + /** + * Creates the default type of {@link AsyncResponseConsumer}, + * with a buffered consumer with a limit of 100MB. + */ + HttpAsyncResponseConsumerFactory DEFAULT = + new BasicAsyncResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); + + /** + * Creates the {@link AbstractAsyncResponseConsumer}, called once per request attempt. + */ + AsyncResponseConsumer createHttpAsyncResponseConsumer(); + + /** + * Default factory used to create instances of {@link AsyncResponseConsumer}. + * Creates one instance of {@link BasicAsyncResponseConsumer} for each request attempt, with a + * configurable + * buffer limit which defaults to 100MB. + */ + class BasicAsyncResponseConsumerFactory implements HttpAsyncResponseConsumerFactory { + + private final int bufferLimit; + + public BasicAsyncResponseConsumerFactory(int bufferLimit) { + this.bufferLimit = bufferLimit; + } + + @Override + public AsyncResponseConsumer createHttpAsyncResponseConsumer() { + return new BasicAsyncResponseConsumer(new BufferedByteConsumer(bufferLimit)); + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpDeleteWithEntity.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpDeleteWithEntity.java new file mode 100644 index 000000000..ba8253219 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpDeleteWithEntity.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; + +import java.net.URI; + +/** + * Allows to send DELETE requests providing a body (not supported out of the box) + */ +final class HttpDeleteWithEntity extends HttpUriRequestBase { + + static final String METHOD_NAME = HttpDelete.METHOD_NAME; + + HttpDeleteWithEntity(URI requestUri) { + super(METHOD_NAME, requestUri); + } + + @Override + public String getMethod() { + return METHOD_NAME; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpGetWithEntity.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpGetWithEntity.java new file mode 100644 index 000000000..93f28d443 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/HttpGetWithEntity.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; + +import java.net.URI; + +/** + * Allows to send GET requests providing a body (not supported out of the box) + */ +final class HttpGetWithEntity extends HttpUriRequestBase { + + static final String METHOD_NAME = HttpGet.METHOD_NAME; + + HttpGetWithEntity(URI requestUri) { + super(METHOD_NAME, requestUri); + } + + @Override + public String getMethod() { + return METHOD_NAME; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/LanguageRuntimeVersions.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/LanguageRuntimeVersions.java new file mode 100644 index 000000000..dd2800d42 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/LanguageRuntimeVersions.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +// Copied verbatim from https://github.com/elastic/jvm-languages-sniffer + +class LanguageRuntimeVersions { + + /** + * Returns runtime information by looking up classes identifying non-Java JVM + * languages and appending a key with their name and their major.minor version, if available + */ + public static String getRuntimeMetadata() { + StringBuilder s = new StringBuilder(); + String version; + + version = kotlinVersion(); + if (version != null) { + s.append(",kt=").append(version); + } + + version = scalaVersion(); + if (version != null) { + s.append(",sc=").append(version); + } + + version = clojureVersion(); + if (version != null) { + s.append(",clj=").append(version); + } + + version = groovyVersion(); + if (version != null) { + s.append(",gy=").append(version); + } + + version = jRubyVersion(); + if (version != null) { + s.append(",jrb=").append(version); + } + + return s.toString(); + } + + public static String kotlinVersion() { + // KotlinVersion.CURRENT.toString() + return keepMajorMinor(getStaticField("kotlin.KotlinVersion", "CURRENT")); + } + + public static String scalaVersion() { + // scala.util.Properties.versionNumberString() + return keepMajorMinor(callStaticMethod("scala.util.Properties", "versionNumberString")); + } + + public static String clojureVersion() { + // (clojure-version) which translates to + // clojure.core$clojure_version.invokeStatic() + return keepMajorMinor(callStaticMethod("clojure.core$clojure_version", "invokeStatic")); + } + + public static String groovyVersion() { + // groovy.lang.GroovySystem.getVersion() + // There's also getShortVersion(), but only since Groovy 3.0.1 + return keepMajorMinor(callStaticMethod("groovy.lang.GroovySystem", "getVersion")); + } + + public static String jRubyVersion() { + // org.jruby.runtime.Constants.VERSION + return keepMajorMinor(getStaticField("org.jruby.runtime.Constants", "VERSION")); + } + + private static String getStaticField(String className, String fieldName) { + Class clazz; + try { + clazz = Class.forName(className); + } catch (ClassNotFoundException e) { + return null; + } + + try { + Field field = clazz.getField(fieldName); + return field.get(null).toString(); + } catch (Exception e) { + return ""; // can't get version information + } + } + + private static String callStaticMethod(String className, String methodName) { + Class clazz; + try { + clazz = Class.forName(className); + } catch (ClassNotFoundException e) { + return null; + } + + try { + Method m = clazz.getMethod(methodName); + return m.invoke(null).toString(); + } catch (Exception e) { + return ""; // can't get version information + } + } + + static String keepMajorMinor(String version) { + if (version == null) { + return null; + } + + int firstDot = version.indexOf('.'); + int secondDot = version.indexOf('.', firstDot + 1); + if (secondDot < 0) { + return version; + } else { + return version.substring(0, secondDot); + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Node.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Node.java new file mode 100644 index 000000000..29a0ef916 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Node.java @@ -0,0 +1,277 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +/** + * Metadata about an {@link HttpHost} running Elasticsearch. + */ +public class Node { + /** + * Address that this host claims is its primary contact point. + */ + private final HttpHost host; + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + private final Set boundHosts; + /** + * Name of the node as configured by the {@code node.name} attribute. + */ + private final String name; + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + private final String version; + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + private final Roles roles; + /** + * Attributes declared on the node. + */ + private final Map> attributes; + + /** + * Create a {@linkplain Node} with metadata. All parameters except + * {@code host} are nullable and implementations of {@link NodeSelector} + * need to decide what to do in their absence. + */ + public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles, Map> attributes) { + if (host == null) { + throw new IllegalArgumentException("host cannot be null"); + } + this.host = host; + this.boundHosts = boundHosts; + this.name = name; + this.version = version; + this.roles = roles; + this.attributes = attributes; + } + + /** + * Create a {@linkplain Node} without any metadata. + */ + public Node(HttpHost host) { + this(host, null, null, null, null, null); + } + + /** + * Contact information for the host. + */ + public HttpHost getHost() { + return host; + } + + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + public Set getBoundHosts() { + return boundHosts; + } + + /** + * The {@code node.name} of the node. + */ + public String getName() { + return name; + } + + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + public String getVersion() { + return version; + } + + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + public Roles getRoles() { + return roles; + } + + /** + * Attributes declared on the node. + */ + public Map> getAttributes() { + return attributes; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("[host=").append(host); + if (boundHosts != null) { + b.append(", bound=").append(boundHosts); + } + if (name != null) { + b.append(", name=").append(name); + } + if (version != null) { + b.append(", version=").append(version); + } + if (roles != null) { + b.append(", roles=").append(roles); + } + if (attributes != null) { + b.append(", attributes=").append(attributes); + } + return b.append(']').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Node other = (Node) obj; + return host.equals(other.host) + && Objects.equals(boundHosts, other.boundHosts) + && Objects.equals(name, other.name) + && Objects.equals(version, other.version) + && Objects.equals(roles, other.roles) + && Objects.equals(attributes, other.attributes); + } + + @Override + public int hashCode() { + return Objects.hash(host, boundHosts, name, version, roles, attributes); + } + + /** + * Role information about an Elasticsearch process. + */ + public static final class Roles { + + private final Set roles; + + public Roles(final Set roles) { + this.roles = new TreeSet<>(roles); + } + + /** + * Returns whether or not the node could be elected master. + */ + public boolean isMasterEligible() { + return roles.contains("master"); + } + + /** + * Returns whether or not the node stores data. + * @deprecated use {@link #hasDataRole()} or {@link #canContainData()} + */ + @Deprecated + public boolean isData() { + return roles.contains("data"); + } + + /** + * @return true if node has the "data" role + */ + public boolean hasDataRole() { + return roles.contains("data"); + } + + /** + * @return true if node has the "data_content" role + */ + public boolean hasDataContentRole() { + return roles.contains("data_content"); + } + + /** + * @return true if node has the "data_hot" role + */ + public boolean hasDataHotRole() { + return roles.contains("data_hot"); + } + + /** + * @return true if node has the "data_warm" role + */ + public boolean hasDataWarmRole() { + return roles.contains("data_warm"); + } + + /** + * @return true if node has the "data_cold" role + */ + public boolean hasDataColdRole() { + return roles.contains("data_cold"); + } + + /** + * @return true if node has the "data_frozen" role + */ + public boolean hasDataFrozenRole() { + return roles.contains("data_frozen"); + } + + /** + * @return true if node stores any type of data + */ + public boolean canContainData() { + return hasDataRole() || roles.stream().anyMatch(role -> role.startsWith("data_")); + } + + /** + * Returns whether or not the node runs ingest pipelines. + */ + public boolean isIngest() { + return roles.contains("ingest"); + } + + @Override + public String toString() { + return String.join(",", roles); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Roles other = (Roles) obj; + return roles.equals(other.roles); + } + + @Override + public int hashCode() { + return roles.hashCode(); + } + + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelector.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelector.java new file mode 100644 index 000000000..80a2aba4e --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelector.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.util.Iterator; + +/** + * Selects nodes that can receive requests. Used to keep requests away + * from master nodes or to send them to nodes with a particular attribute. + * Use with {@link Rest5ClientBuilder#setNodeSelector(NodeSelector)}. + */ +public interface NodeSelector { + /** + * Select the {@link Node}s to which to send requests. This is called with + * a mutable {@link Iterable} of {@linkplain Node}s in the order that the + * rest client would prefer to use them and implementers should remove + * nodes from the that should not receive the request. Implementers may + * iterate the nodes as many times as they need. + *

+ * This may be called twice per request: first for "living" nodes that + * have not been blacklisted by previous errors. If the selector removes + * all nodes from the list or if there aren't any living nodes then the + * {@link Rest5Client} will call this method with a list of "dead" nodes. + *

+ * Implementers should not rely on the ordering of the nodes. + */ + void select(Iterable nodes); + /* + * We were fairly careful with our choice of Iterable here. The caller has + * a List but reordering the list is likely to break round robin. Luckily + * Iterable doesn't allow any reordering. + */ + + /** + * Selector that matches any node. + */ + NodeSelector ANY = new NodeSelector() { + @Override + public void select(Iterable nodes) { + // Intentionally does nothing + } + + @Override + public String toString() { + return "ANY"; + } + }; + + /** + * Selector that matches any node that has metadata and doesn't + * have the {@code master} role OR it has the data {@code data} + * role. + */ + NodeSelector SKIP_DEDICATED_MASTERS = new NodeSelector() { + @Override + public void select(Iterable nodes) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Node node = itr.next(); + if (node.getRoles() == null) continue; + if (node.getRoles().isMasterEligible() + && !node.getRoles().canContainData() + && !node.getRoles().isIngest()) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "SKIP_DEDICATED_MASTERS"; + } + }; +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelector.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelector.java new file mode 100644 index 000000000..6526ec071 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelector.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Both {@link PreferHasAttributeNodeSelector} and {@link HasAttributeNodeSelector} will work the same + * if there is a {@link Node} with particular attribute in the attributes, + * but {@link PreferHasAttributeNodeSelector} will select another {@link Node}s even if there is no + * {@link Node} + * with particular attribute in the attributes. + */ +public final class PreferHasAttributeNodeSelector implements NodeSelector { + private final String key; + private final String value; + + public PreferHasAttributeNodeSelector(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public void select(Iterable nodes) { + boolean foundAtLeastOne = false; + + for (Node node : nodes) { + Map> attributes = node.getAttributes(); + + if (attributes == null) { + continue; + } + + List values = attributes.get(key); + + if (values == null) { + continue; + } + + if (values.contains(value)) { + foundAtLeastOne = true; + break; + } + } + + if (foundAtLeastOne) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Map> attributes = itr.next().getAttributes(); + if (attributes == null) { + continue; + } + List values = attributes.get(key); + + if (values == null || !values.contains(value)) { + itr.remove(); + } + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PreferHasAttributeNodeSelector that = (PreferHasAttributeNodeSelector) o; + return Objects.equals(key, that.key) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public String toString() { + return key + "=" + value; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Request.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Request.java new file mode 100644 index 000000000..e0d0855d3 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Request.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Request to Elasticsearch. + */ +public final class Request { + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private HttpEntity entity; + private RequestOptions options = RequestOptions.DEFAULT; + + /** + * Create the {@linkplain Request}. + * @param method the HTTP method + * @param endpoint the path of the request (without scheme, host, port, or prefix) + */ + public Request(String method, String endpoint) { + this.method = Objects.requireNonNull(method, "method cannot be null"); + this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null"); + } + + /** + * The HTTP method. + */ + public String getMethod() { + return method; + } + + /** + * The path of the request (without scheme, host, port, or prefix). + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + public void addParameters(Map paramSource) { + paramSource.forEach(this::addParameter); + } + + /** + * Query string parameters. The returned map is an unmodifiable view of the + * map in the request so calls to {@link #addParameter(String, String)} + * will change it. + */ + public Map getParameters() { + return unmodifiableMap(parameters); + } + + /** + * Set the body of the request. If not set or set to {@code null} then no + * body is sent with the request. + */ + public void setEntity(HttpEntity entity) { + this.entity = entity; + } + + /** + * Set the body of the request to a string. If not set or set to + * {@code null} then no body is sent with the request. The + * {@code Content-Type} will be sent as {@code application/json}. + * If you need a different content type then use + * {@link #setEntity(HttpEntity)}. + */ + public void setJsonEntity(String body) { + setEntity(body == null ? null : new StringEntity(body, ContentType.APPLICATION_JSON)); + } + + /** + * The body of the request. If {@code null} then no body + * is sent with the request. + */ + public HttpEntity getEntity() { + return entity; + } + + /** + * Set the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. + */ + public void setOptions(RequestOptions options) { + Objects.requireNonNull(options, "options cannot be null"); + this.options = options; + } + + /** + * Set the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. + */ + public void setOptions(RequestOptions.Builder options) { + Objects.requireNonNull(options, "options cannot be null"); + this.options = options.build(); + } + + /** + * Get the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. + */ + public RequestOptions getOptions() { + return options; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("Request{"); + b.append("method='").append(method).append('\''); + b.append(", endpoint='").append(endpoint).append('\''); + if (false == parameters.isEmpty()) { + b.append(", params=").append(parameters); + } + if (entity != null) { + b.append(", entity=").append(entity); + } + b.append(", options=").append(options); + return b.append('}').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || (obj.getClass() != getClass())) { + return false; + } + if (obj == this) { + return true; + } + + Request other = (Request) obj; + return method.equals(other.method) + && endpoint.equals(other.endpoint) + && parameters.equals(other.parameters) + && Objects.equals(entity, other.entity) + && options.equals(other.options); + } + + @Override + public int hashCode() { + return Objects.hash(method, endpoint, parameters, entity, options); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestLogger.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestLogger.java new file mode 100644 index 000000000..4f41421ad --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestLogger.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; + +/** + * Helper class that exposes static methods to unify the way requests are logged. + * Includes trace logging to log complete requests and responses in curl format. + * Useful for debugging, manually sending logged requests via curl and checking their responses. + * Trace logging is a feature that all the language clients provide. + */ +final class RequestLogger { + + private static final Log tracer = LogFactory.getLog(Request.class); + + private RequestLogger() { + } + + /** + * Logs a request that yielded a response + */ + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, + ClassicHttpResponse httpResponse) { + if (logger.isDebugEnabled()) { + logger.debug( + "request [" + + request.getMethod() + + " " + + host + + getUri(request.getRequestUri()) + + "] returned [" + + httpResponse.getCode() + + "]" + ); + } + if (logger.isWarnEnabled()) { + Header[] warnings = httpResponse.getHeaders("Warning"); + if (warnings != null && warnings.length > 0) { + logger.warn(buildWarningMessage(request, host, warnings)); + } + } + if (tracer.isTraceEnabled()) { + String requestLine; + try { + requestLine = buildTraceRequest(request, host); + } catch (IOException | ParseException e) { + requestLine = ""; + tracer.trace("error while reading request for trace purposes", e); + } + String responseLine; + try { + responseLine = buildTraceResponse(httpResponse); + } catch (IOException e) { + responseLine = ""; + tracer.trace("error while reading response for trace purposes", e); + } + tracer.trace(requestLine + '\n' + responseLine); + } + } + + /** + * Logs a request that failed + */ + static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { + if (logger.isDebugEnabled()) { + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestUri()) + "] failed", e); + } + if (tracer.isTraceEnabled()) { + String traceRequest; + try { + traceRequest = buildTraceRequest(request, node.getHost()); + } catch (IOException | ParseException e1) { + tracer.trace("error while reading request for trace purposes", e); + traceRequest = ""; + } + tracer.trace(traceRequest); + } + } + + static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[] warnings) { + StringBuilder message = new StringBuilder("request [").append(request.getMethod()) + .append(" ") + .append(host) + .append(getUri(request.getRequestUri())) + .append("] returned ") + .append(warnings.length) + .append(" warnings: "); + for (int i = 0; i < warnings.length; i++) { + if (i > 0) { + message.append(","); + } + message.append("[").append(warnings[i].getValue()).append("]"); + } + return message.toString(); + } + + /** + * Creates curl output for given request + */ + static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException, + ParseException { + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestUri()) + "'"; + + if (request.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = request.getEntity(); + if (!entity.isRepeatable()) { + entity = new BufferedHttpEntity(request.getEntity()); + request.setEntity(entity); + } + requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } + return requestLine; + } + + /** + * Creates curl output for given response + */ + static String buildTraceResponse(ClassicHttpResponse httpResponse) throws IOException { + StringBuilder responseLine = new StringBuilder(); + responseLine.append("# ").append(httpResponse.getCode()); + for (Header header : httpResponse.getHeaders()) { + responseLine.append("\n# ").append(header.getName()).append(": ").append(header.getValue()); + } + responseLine.append("\n#"); + HttpEntity entity = httpResponse.getEntity(); + if (entity != null) { + if (!entity.isRepeatable()) { + entity = new BufferedHttpEntity(entity); + } + httpResponse.setEntity(entity); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent()))) { + String line; + while ((line = reader.readLine()) != null) { + responseLine.append("\n# ").append(line); + } + } + } + return responseLine.toString(); + } + + private static String getUri(String requestLine) { + if (requestLine.charAt(0) != '/') { + return "/" + requestLine; + } + return requestLine; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptions.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptions.java new file mode 100644 index 000000000..7df4dd4f1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptions.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * The portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. + */ +public final class RequestOptions { + /** + * Default request options. + */ + public static final RequestOptions DEFAULT = new Builder( + Collections.emptyList(), + Collections.emptyMap(), + HttpAsyncResponseConsumerFactory.DEFAULT, + null, + null + ).build(); + + private final List

headers; + private final Map parameters; + private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; + private final WarningsHandler warningsHandler; + private final RequestConfig requestConfig; + + private RequestOptions(Builder builder) { + this.headers = builder.headers; + this.parameters = Collections.unmodifiableMap(builder.parameters); + this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; + this.warningsHandler = builder.warningsHandler; + this.requestConfig = builder.requestConfig; + } + + /** + * Create a builder that contains these options but can be modified. + */ + public Builder toBuilder() { + return new Builder(headers, parameters, httpAsyncResponseConsumerFactory, warningsHandler, + requestConfig); + } + + /** + * Headers to attach to the request. + */ + public List
getHeaders() { + return List.copyOf(headers); + } + + /** + * Return true if the options contain the given header + */ + public boolean containsHeader(String name) { + return headers.stream().anyMatch(h -> name.equalsIgnoreCase(h.getName())); + } + + /** + * Replaces an existing bearer token header + */ + public void updateToken(String token) { + headers.stream() + .filter(h -> "Bearer ".equalsIgnoreCase(h.getName())) + .findAny().ifPresent(headers::remove); + headers.add(new ReqHeader("Authorization", "Bearer " +token)); + } + + public Map getParameters() { + return parameters; + } + + /** + * The {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link AsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() { + return httpAsyncResponseConsumerFactory; + } + + + /** + * How this request should handle warnings. If null (the default) then + * this request will default to the behavior dictacted by + * {@link Rest5ClientBuilder#setStrictDeprecationMode}. + *

+ * This can be set to {@link WarningsHandler#PERMISSIVE} if the client + * should ignore all warnings which is the same behavior as setting + * strictDeprecationMode to true. It can be set to + * {@link WarningsHandler#STRICT} if the client should fail if there are + * any warnings which is the same behavior as settings + * strictDeprecationMode to false. + *

+ * It can also be set to a custom implementation of + * {@linkplain WarningsHandler} to permit only certain warnings or to + * fail the request if the warnings returned don't + * exactly match some set. + */ + public WarningsHandler getWarningsHandler() { + return warningsHandler; + } + + /** + * get RequestConfig, which can set socketTimeout, connectTimeout + * and so on by request + * + * @return RequestConfig + */ + public RequestConfig getRequestConfig() { + return requestConfig; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("RequestOptions{"); + boolean comma = false; + if (!headers.isEmpty()) { + b.append("headers="); + comma = true; + for (int h = 0; h < headers.size(); h++) { + if (h != 0) { + b.append(','); + } + b.append(headers.get(h).toString()); + } + } + if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { + if (comma) b.append(", "); + comma = true; + b.append("consumerFactory=").append(httpAsyncResponseConsumerFactory); + } + if (warningsHandler != null) { + if (comma) b.append(", "); + comma = true; + b.append("warningsHandler=").append(warningsHandler); + } + return b.append('}').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || (obj.getClass() != getClass())) { + return false; + } + if (obj == this) { + return true; + } + + RequestOptions other = (RequestOptions) obj; + return headers.equals(other.headers) + && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory) + && Objects.equals(warningsHandler, other.warningsHandler); + } + + @Override + public int hashCode() { + return Objects.hash(headers, warningsHandler, httpAsyncResponseConsumerFactory); + } + + /** + * Builds {@link RequestOptions}. Get one by calling + * {@link RequestOptions#toBuilder} on {@link RequestOptions#DEFAULT} or + * any other {@linkplain RequestOptions}. + */ + public static class Builder { + private final List

headers; + private final Map parameters; + private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; + private WarningsHandler warningsHandler; + private RequestConfig requestConfig; + + private Builder( + List
headers, + Map parameters, + HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, + WarningsHandler warningsHandler, + RequestConfig requestConfig + ) { + this.headers = new ArrayList<>(headers); + this.parameters = new HashMap<>(parameters); + this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; + this.warningsHandler = warningsHandler; + this.requestConfig = requestConfig; + } + + /** + * Build the {@linkplain RequestOptions}. + */ + public RequestOptions build() { + return new RequestOptions(this); + } + + /** + * Add the provided header to the request. + */ + public Builder addHeader(String name, String value) { + Objects.requireNonNull(name, "header name cannot be null"); + Objects.requireNonNull(value, "header value cannot be null"); + this.headers.add(new ReqHeader(name, value)); + return this; + } + + /** + * Remove all headers with the given name. + */ + public Builder removeHeader(String name) { + Objects.requireNonNull(name, "header name cannot be null"); + this.headers.removeIf(h -> name.equalsIgnoreCase(h.getName())); + return this; + } + + /** + * Return all headers for the request + */ + public List
getHeaders() { + return this.headers; + } + + /** + * Add the provided parameter to the request. + */ + public Builder addParameter(String key, String value) { + Objects.requireNonNull(key, "parameter key cannot be null"); + Objects.requireNonNull(value, "parameter value cannot be null"); + this.parameters.merge(key, value, (existingValue, newValue) -> String.join(",", existingValue, + newValue)); + return this; + } + + /** + * Set the {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link AsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public Builder setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + this.httpAsyncResponseConsumerFactory = Objects.requireNonNull( + httpAsyncResponseConsumerFactory, + "httpAsyncResponseConsumerFactory cannot be null" + ); + return this; + } + + /** + * How this request should handle warnings. If null (the default) then + * this request will default to the behavior dictacted by + * {@link Rest5ClientBuilder#setStrictDeprecationMode}. + *

+ * This can be set to {@link WarningsHandler#PERMISSIVE} if the client + * should ignore all warnings which is the same behavior as setting + * strictDeprecationMode to true. It can be set to + * {@link WarningsHandler#STRICT} if the client should fail if there are + * any warnings which is the same behavior as settings + * strictDeprecationMode to false. + *

+ * It can also be set to a custom implementation of + * {@linkplain WarningsHandler} to permit only certain warnings or to + * fail the request if the warnings returned don't + * exactly match some set. + */ + public Builder setWarningsHandler(WarningsHandler warningsHandler) { + this.warningsHandler = warningsHandler; + return this; + } + + /** + * set RequestConfig, which can set socketTimeout, connectTimeout + * and so on by request + * + * @param requestConfig http client RequestConfig + * @return Builder + */ + public Builder setRequestConfig(RequestConfig requestConfig) { + this.requestConfig = requestConfig; + return this; + } + } + + /** + * Custom implementation of {@link BasicHeader} that overrides equals and + * hashCode so it is easier to test equality of {@link RequestOptions}. + */ + static final class ReqHeader extends BasicHeader { + + ReqHeader(String name, String value) { + super(name, value); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ReqHeader) { + Header otherHeader = (Header) other; + return Objects.equals(getName(), otherHeader.getName()) && Objects.equals(getValue(), + otherHeader.getValue()); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getValue()); + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Response.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Response.java new file mode 100644 index 000000000..ce9c9afa7 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Response.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Holds an elasticsearch response. It wraps the {@link BasicClassicHttpResponse} returned and associates + * it with its corresponding {@link RequestLine} and {@link HttpHost}. + */ +public class Response { + + private final RequestLine requestLine; + private final HttpHost host; + private final ClassicHttpResponse response; + + Response(RequestLine requestLine, HttpHost host, ClassicHttpResponse response) { + Objects.requireNonNull(requestLine, "requestLine cannot be null"); + Objects.requireNonNull(host, "host cannot be null"); + Objects.requireNonNull(response, "response cannot be null"); + this.requestLine = requestLine; + this.host = host; + this.response = response; + } + + /** + * Returns the request line that generated this response + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Returns the node that returned this response + */ + public HttpHost getHost() { + return host; + } + + public int getStatusCode() { + return response.getCode(); + } + + /** + * Returns all the response headers + */ + public Header[] getHeaders() { + return response.getHeaders(); + } + + /** + * Returns the value of the first header with a specified name of this message. + * If there is more than one matching header in the message the first element is returned. + * If there is no matching header in the message null is returned. + */ + public String getHeader(String name) { + Header header = response.getFirstHeader(name); + if (header == null) { + return null; + } + return header.getValue(); + } + + /** + * Returns the response body available, null otherwise + * + * @see HttpEntity + */ + public HttpEntity getEntity() { + return response.getEntity(); + } + + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, + // leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent + // backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + // tag::noformat + private static final int WARNING_HEADER_DATE_LENGTH = 0 + + 1 + + 1 + + 3 + 1 + 1 + + 2 + 1 + + 3 + 1 + + 4 + 1 + + 2 + 1 + 2 + 1 + 2 + 1 + + 3 + + 1; + // end::noformat + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 or 300 and the warn agent is + * always Elasticsearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 Elasticsearch-") || s.startsWith("300 Elasticsearch-"); + } + + /** + * Refer to org.elasticsearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the + * date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + return warningValue; + } + + /** + * Returns a list of all warning headers returned in the response. + */ + public List getWarnings() { + List warnings = new ArrayList<>(); + for (Header header : response.getHeaders("Warning")) { + String warning = header.getValue(); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); + } else { + warnings.add(warning); + } + } + return warnings; + } + + /** + * Returns true if there is at least one warning header returned in the + * response. + */ + public boolean hasWarnings() { + Header[] warnings = response.getHeaders("Warning"); + return warnings != null && warnings.length > 0; + } + + ClassicHttpResponse getHttpResponse() { + return response; + } + + @Override + public String toString() { + return "Response{requestLine=" + requestLine + ", host=" + host + ", response=" + response.getCode() + '}'; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseException.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseException.java new file mode 100644 index 000000000..f02b9803a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseException.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; + +import java.io.IOException; +import java.util.Locale; + +/** + * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. + * Holds the response that was returned. + */ +public final class ResponseException extends IOException { + + private final Response response; + + public ResponseException(Response response) throws IOException { + super(buildMessage(response)); + this.response = response; + } + + static String buildMessage(Response response) throws IOException { + String message = String.format( + Locale.ROOT, + "method [%s], host [%s], URI [%s], status line [%s]", + response.getRequestLine().getMethod(), + response.getHost(), + response.getRequestLine().getUri(), + response.getStatusCode() + ); + + if (response.hasWarnings()) { + message += "\nWarnings: " + response.getWarnings(); + } + + HttpEntity entity = response.getEntity(); + if (entity != null) { + if (!entity.isRepeatable()) { + entity = new BufferedHttpEntity(entity); + response.getHttpResponse().setEntity(entity); + } + try { + message += "\n" + EntityUtils.toString(entity); + } catch (ParseException e) { + throw new IOException("Could not parse headers: " + e); + } + } + return message; + } + + /** + * Returns the {@link Response} that caused this exception to be thrown. + */ + public Response getResponse() { + return response; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseListener.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseListener.java new file mode 100644 index 000000000..0ee20741f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/ResponseListener.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +/** + * Listener to be provided when calling async performRequest methods provided by {@link Rest5Client}. + * Those methods that do accept a listener will return immediately, execute asynchronously, and notify + * the listener whenever the request yielded a response, or failed with an exception. + * + *

+ * Note that it is not safe to call {@link Rest5Client#close()} from either of these + * callbacks. + */ +public interface ResponseListener { + + /** + * Method invoked if the request yielded a successful response + */ + void onSuccess(Response response); + + /** + * Method invoked if the request failed. There are two main categories of failures: connection failures (usually + * {@link java.io.IOException}s, or responses that were treated as errors based on their error response code + * ({@link ResponseException}s). + */ + void onFailure(Exception exception); +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5Client.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5Client.java new file mode 100644 index 000000000..790004e87 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5Client.java @@ -0,0 +1,973 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.async.HttpAsyncClient; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.entity.GzipCompressingEntity; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.ProtocolException; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AsyncRequestBuilder; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; + +import javax.net.ssl.SSLHandshakeException; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.net.ConnectException; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; +import static org.apache.hc.core5.http.HttpHeaders.CONTENT_ENCODING; +import static org.apache.hc.core5.http.HttpHeaders.CONTENT_LENGTH; + +/** + * Client that connects to an Elasticsearch cluster through HTTP. + *

+ * Must be created using {@link Rest5ClientBuilder}, which allows to set all the different options or just + * rely on defaults. + * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later + * by calling {@link #setNodes(Collection)}. + *

+ * The method {@link #performRequest(Request)} allows to send a request to the cluster. When + * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts + * are marked dead and + * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many + * times they previously + * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes + * (or dead nodes that + * deserve a retry) are retried until one responds or none of them does, in which case an + * {@link IOException} will be thrown. + *

+ * Requests can be either synchronous or asynchronous. The asynchronous variants all end with {@code Async}. + *

+ * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and + * responses in curl format. + */ +public class Rest5Client implements Closeable { + + public static final String IGNORE_RESPONSE_CODES_PARAM = "ignore"; + + private static final Log logger = LogFactory.getLog(Rest5Client.class); + + private final CloseableHttpAsyncClient client; + // We don't rely on default headers supported by HttpAsyncClient as those cannot be replaced. + // These are package private for tests. + final List

defaultHeaders; + private final String pathPrefix; + private final AtomicInteger lastNodeIndex = new AtomicInteger(0); + private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); + private final FailureListener failureListener; + private final NodeSelector nodeSelector; + private volatile List nodes; + private final WarningsHandler warningsHandler; + private final boolean compressionEnabled; + private final boolean metaHeaderEnabled; + + Rest5Client( + CloseableHttpAsyncClient client, + Header[] defaultHeaders, + List nodes, + String pathPrefix, + FailureListener failureListener, + NodeSelector nodeSelector, + boolean strictDeprecationMode, + boolean compressionEnabled, + boolean metaHeaderEnabled + ) { + this.client = client; + this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); + this.failureListener = failureListener; + this.pathPrefix = pathPrefix; + this.nodeSelector = nodeSelector; + this.warningsHandler = strictDeprecationMode ? WarningsHandler.STRICT : WarningsHandler.PERMISSIVE; + this.compressionEnabled = compressionEnabled; + this.metaHeaderEnabled = metaHeaderEnabled; + setNodes(nodes); + } + + /** + * Returns a new {@link Rest5ClientBuilder} to help with {@link Rest5Client} creation. + * Creates a new builder instance and sets the hosts that the client will send requests to. + */ + public static Rest5ClientBuilder builder(URI... uris) { + if (uris == null || uris.length == 0) { + throw new IllegalArgumentException("uris must not be null nor empty"); + } + List nodes = Arrays.stream(uris).map(u -> new Node(HttpHost.create(u))).toList(); + return new Rest5ClientBuilder(nodes); + } + + /** + * Returns a new {@link Rest5ClientBuilder} to help with {@link Rest5Client} creation. + * Creates a new builder instance and sets the hosts that the client will send requests to. + */ + public static Rest5ClientBuilder builder(List uris) { + if (uris == null || uris.isEmpty()) { + throw new IllegalArgumentException("uris must not be null nor empty"); + } + List nodes = uris.stream().map(u -> { + if (!u.isAbsolute()) { + throw new IllegalArgumentException("Expecting an absolute url: [" + u + "]"); + } + return new Node(HttpHost.create(u)); + }).toList(); + return new Rest5ClientBuilder(nodes); + } + + /** + * Returns a new {@link Rest5ClientBuilder} to help with {@link Rest5Client} creation. + * Creates a new builder instance and sets the hosts that the client will send requests to. + *

+ * Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes. + * If you don't either one is fine. + */ + public static Rest5ClientBuilder builder(Node... nodes) { + return new Rest5ClientBuilder(nodes == null ? null : Arrays.asList(nodes)); + } + + /** + * Returns a new {@link Rest5ClientBuilder} to help with {@link Rest5Client} creation. + * Creates a new builder instance and sets the nodes that the client will send requests to. + *

+ * You can use this if you do not have metadata up front about the nodes. If you do, prefer + * {@link #builder(Node...)}. + * + * @see Node#Node(HttpHost) + */ + public static Rest5ClientBuilder builder(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); + } + List nodes = Arrays.stream(hosts).map(Node::new).collect(Collectors.toList()); + return new Rest5ClientBuilder(nodes); + } + + /** + * Get the underlying HTTP client. + */ + public HttpAsyncClient getHttpClient() { + return this.client; + } + + /** + * Replaces the nodes with which the client communicates. + */ + public synchronized void setNodes(Collection nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("node list must not be null or empty"); + } + + Map nodesByHost = new LinkedHashMap<>(); + for (Node node : nodes) { + Objects.requireNonNull(node, "node cannot be null"); + nodesByHost.put(node.getHost(), node); + } + this.nodes = new ArrayList<>(nodesByHost.values()); + this.blacklist.clear(); + } + + /** + * Get the list of nodes that the client knows about. The list is + * unmodifiable. + */ + public List getNodes() { + return nodes; + } + + /** + * check client running status + * + * @return client running status + */ + public boolean isRunning() { + return client.getStatus() == IOReactorStatus.ACTIVE; + } + + /** + * Sends a request to the Elasticsearch cluster that the client points to. + * Blocks until the request is completed and returns its response or fails + * by throwing an exception. Selects a host out of the provided ones in a + * round-robin fashion. Failing hosts are marked dead and retried after a + * certain amount of time (minimum 1 minute, maximum 30 minutes), depending + * on how many times they previously failed (the more failures, the later + * they will be retried). In case of failures all of the alive nodes (or + * dead nodes that deserve a retry) are retried until one responds or none + * of them does, in which case an {@link IOException} will be thrown. + *

+ * This method works by performing an asynchronous call and waiting + * for the result. If the asynchronous call throws an exception we wrap + * it and rethrow it so that the stack trace attached to the exception + * contains the call site. While we attempt to preserve the original + * exception this isn't always possible and likely haven't covered all of + * the cases. You can get the original exception from + * {@link Exception#getCause()}. + * + * @param request the request to perform + * @return the response returned by Elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ResponseException in case Elasticsearch responded with a status code that indicated an + * error + */ + public Response performRequest(Request request) throws IOException { + InternalRequest internalRequest = new InternalRequest(request); + return performRequest(nextNodes(), internalRequest, null); + } + + private Response performRequest(final Iterator nodes, final InternalRequest request, + Exception previousException) + throws IOException { + RequestContext context = request.createContextForNextAttempt(nodes.next()); + ClassicHttpResponse httpResponse; + try { + httpResponse = client.execute(context.requestProducer, + context.asyncResponseConsumer, + context.context, null).get(); + } catch (Exception e) { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, e); + onFailure(context.node); + Exception cause = extractAndWrapCause(e); + addSuppressedException(previousException, cause); + if (isRetryableException(e) && nodes.hasNext()) { + return performRequest(nodes, request, cause); + } + if (cause instanceof IOException) { + throw (IOException) cause; + } + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } + throw new IllegalStateException("unexpected exception type: must be either RuntimeException or " + + "IOException", cause); + } + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, + httpResponse); + if (responseOrResponseException.responseException == null) { + return responseOrResponseException.response; + } + addSuppressedException(previousException, responseOrResponseException.responseException); + if (nodes.hasNext()) { + return performRequest(nodes, request, responseOrResponseException.responseException); + } + throw responseOrResponseException.responseException; + } + + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, + ClassicHttpResponse httpResponse) throws IOException { + RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); + int statusCode = httpResponse.getCode(); + + HttpEntity entity = httpResponse.getEntity(); + if (entity != null) { + Header encoding = null; + try { + encoding = httpResponse.getHeader(CONTENT_ENCODING); + } catch (ProtocolException e) { + throw new IOException("Couldn't retrieve content encoding: " + e); + } + if (encoding != null && "gzip".equals(encoding.getValue())) { + // Decompress and cleanup response headers + httpResponse.setEntity(new GzipDecompressingEntity(entity)); + httpResponse.removeHeaders(CONTENT_ENCODING); + httpResponse.removeHeaders(CONTENT_LENGTH); + } + } + + Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); + if (isCorrectServerResponse(statusCode)) { + onResponse(node); + if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { + throw new WarningFailureException(response); + } + return new ResponseOrResponseException(response); + } + ResponseException responseException = new ResponseException(response); + if (isRetryStatus(statusCode)) { + // mark host dead and retry against next one + onFailure(node); + return new ResponseOrResponseException(responseException); + } + // mark host alive and don't retry, as the error should be a request problem + onResponse(node); + throw responseException; + } + + /** + * Sends a request to the Elasticsearch cluster that the client points to. + * The request is executed asynchronously and the provided + * {@link ResponseListener} gets notified upon request completion or + * failure. Selects a host out of the provided ones in a round-robin + * fashion. Failing hosts are marked dead and retried after a certain + * amount of time (minimum 1 minute, maximum 30 minutes), depending on how + * many times they previously failed (the more failures, the later they + * will be retried). In case of failures all of the alive nodes (or dead + * nodes that deserve a retry) are retried until one responds or none of + * them does, in which case an {@link IOException} will be thrown. + * + * @param request the request to perform + * @param responseListener the {@link ResponseListener} to notify when the + * request is completed or fails + */ + public Cancellable performRequestAsync(Request request, ResponseListener responseListener) { + try { + FailureTrackingResponseListener failureTrackingResponseListener = + new FailureTrackingResponseListener(responseListener); + InternalRequest internalRequest = new InternalRequest(request); + performRequestAsync(nextNodes(), internalRequest, failureTrackingResponseListener); + return internalRequest.cancellable; + } catch (Exception e) { + responseListener.onFailure(e); + return Cancellable.NO_OP; + } + } + + private void performRequestAsync( + final Iterator nodes, + final InternalRequest request, + final FailureTrackingResponseListener listener + ) { + request.cancellable.runIfNotCancelled(() -> { + final RequestContext context; + context = request.createContextForNextAttempt(nodes.next()); + Future futureRef = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, + new FutureCallback() { + @Override + public void completed(ClassicHttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, + context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodes, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, + failure); + onFailure(context.node); + if (isRetryableException(failure) && nodes.hasNext()) { + listener.trackFailure(failure); + performRequestAsync(nodes, request, listener); + } else { + listener.onDefinitiveFailure(failure); + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); + } + }); + // needed to be able to cancel asnyc requests + if (futureRef instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) futureRef); + } + }); + } + + /** + * Returns a non-empty {@link Iterator} of nodes to be used for a request + * that match the {@link NodeSelector}. + *

+ * If there are no living nodes that match the {@link NodeSelector} + * this will return the dead node that matches the {@link NodeSelector} + * that is closest to being revived. + * + * @throws IOException if no nodes are available + */ + private Iterator nextNodes() throws IOException { + List nodes = this.nodes; + return selectNodes(nodes, blacklist, lastNodeIndex, nodeSelector).iterator(); + } + + /** + * Select nodes to try and sorts them so that the first one will be tried initially, then the following + * ones + * if the previous attempt failed and so on. Package private for testing. + */ + static Iterable selectNodes( + List nodes, + Map blacklist, + AtomicInteger lastNodeIndex, + NodeSelector nodeSelector + ) throws IOException { + /* + * Sort the nodes into living and dead lists. + */ + List livingNodes = new ArrayList<>(Math.max(0, nodes.size() - blacklist.size())); + List deadNodes = null; + if (!blacklist.isEmpty()) { + deadNodes = new ArrayList<>(blacklist.size()); + new ArrayList<>(blacklist.size()); + for (Node node : nodes) { + DeadHostState deadness = blacklist.get(node.getHost()); + if (deadness == null || deadness.shallBeRetried()) { + livingNodes.add(node); + } else { + deadNodes.add(new DeadNode(node, deadness)); + } + } + } + // happy path when no failing nodes + else { + livingNodes.addAll(nodes); + } + + if (!livingNodes.isEmpty()) { + /* + * Normal state: there is at least one living node. If the + * selector is ok with any over the living nodes then use them + * for the request. + */ + List selectedLivingNodes = new ArrayList<>(livingNodes); + nodeSelector.select(selectedLivingNodes); + if (!selectedLivingNodes.isEmpty()) { + /* + * Rotate the list using a global counter as the distance so subsequent + * requests will try the nodes in a different order. + */ + Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); + return selectedLivingNodes; + } + } + + /* + * Last resort: there are no good nodes to use, either because + * the selector rejected all the living nodes or because there aren't + * any living ones. Either way, we want to revive a single dead node + * that the NodeSelectors are OK with. We do this by passing the dead + * nodes through the NodeSelector so it can have its say in which nodes + * are ok. If the selector is ok with any of the nodes then we will take + * the one in the list that has the lowest revival time and try it. + */ + if (deadNodes != null && !deadNodes.isEmpty()) { + final List selectedDeadNodes = new ArrayList<>(deadNodes); + /* + * We'd like NodeSelectors to remove items directly from deadNodes + * so we can find the minimum after it is filtered without having + * to compare many things. This saves us a sort on the unfiltered + * list. + */ + nodeSelector.select(() -> new DeadNodeIteratorAdapter(selectedDeadNodes.iterator())); + if (!selectedDeadNodes.isEmpty()) { + return singletonList(Collections.min(selectedDeadNodes).node); + } + } + throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, living: " + livingNodes + " and dead: " + deadNodes); + } + + /** + * Called after each successful request call. + * Receives as an argument the host that was used for the successful request. + */ + private void onResponse(Node node) { + DeadHostState removedHost = this.blacklist.remove(node.getHost()); + if (logger.isDebugEnabled() && removedHost != null) { + logger.debug("removed [" + node + "] from blacklist"); + } + } + + /** + * Called after each failed attempt. + * Receives as an argument the host that was used for the failed attempt. + */ + private void onFailure(Node node) { + DeadHostState previousDeadHostState = blacklist.putIfAbsent( + node.getHost(), + new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER) + ); + if (previousDeadHostState == null) { + if (logger.isDebugEnabled()) { + logger.debug("added [" + node + "] to blacklist"); + } + } else { + blacklist.replace(node.getHost(), previousDeadHostState, + new DeadHostState(previousDeadHostState)); + if (logger.isDebugEnabled()) { + logger.debug("updated [" + node + "] already in blacklist"); + } + } + failureListener.onFailure(node); + } + + @Override + public void close() throws IOException { + client.close(); + } + + /** + * 400 or lower responses are part of the server behavior, only considering >500 for dead nodes marking + * purposes + */ + private static boolean isCorrectServerResponse(int statusCode) { + return statusCode < 500; + } + + private static boolean isRetryStatus(int statusCode) { + switch (statusCode) { + case 502: + case 503: + case 504: + return true; + } + return false; + } + + /** + * Should an exception cause retrying the request? + */ + private static boolean isRetryableException(Throwable e) { + if (e instanceof ExecutionException) { + e = e.getCause(); + } + if (e instanceof ContentTooLongException) { + return false; + } + return true; + } + + private static void addSuppressedException(Exception suppressedException, Exception currentException) { + if (suppressedException != null && suppressedException != currentException) { + currentException.addSuppressed(suppressedException); + } + } + + private static HttpUriRequestBase createHttpRequest(String method, URI uri, HttpEntity entity, + boolean compressionEnabled) { + switch (method.toUpperCase(Locale.ROOT)) { + case HttpDeleteWithEntity.METHOD_NAME: + return addRequestBody(new HttpDeleteWithEntity(uri), entity, compressionEnabled); + case HttpGetWithEntity.METHOD_NAME: + return addRequestBody(new HttpGetWithEntity(uri), entity, compressionEnabled); + case HttpHead.METHOD_NAME: + return addRequestBody(new HttpHead(uri), entity, compressionEnabled); + case HttpOptions.METHOD_NAME: + return addRequestBody(new HttpOptions(uri), entity, compressionEnabled); + case HttpPatch.METHOD_NAME: + return addRequestBody(new HttpPatch(uri), entity, compressionEnabled); + case HttpPost.METHOD_NAME: + HttpPost httpPost = new HttpPost(uri); + addRequestBody(httpPost, entity, compressionEnabled); + return httpPost; + case HttpPut.METHOD_NAME: + return addRequestBody(new HttpPut(uri), entity, compressionEnabled); + case HttpTrace.METHOD_NAME: + return addRequestBody(new HttpTrace(uri), entity, compressionEnabled); + default: + throw new UnsupportedOperationException("http method not supported: " + method); + } + } + + private static HttpUriRequestBase addRequestBody(HttpUriRequestBase httpRequest, HttpEntity entity, + boolean compressionEnabled) { + if (entity != null) { + if (canHaveBody(httpRequest)) { + if (compressionEnabled) { + entity = new ContentCompressingEntity(entity); + } + httpRequest.setEntity(entity); + } else { + throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not " + + "supported"); + } + } + return httpRequest; + } + + private static boolean canHaveBody(HttpRequest httpRequest) { + return httpRequest.getMethod().equals("PUT") || httpRequest.getMethod().equals("POST") || + httpRequest.getMethod().equals("PATCH") || + httpRequest instanceof HttpDeleteWithEntity || httpRequest instanceof HttpGetWithEntity; + } + + static URI buildUri(String pathPrefix, String path, Map params) { + Objects.requireNonNull(path, "path must not be null"); + try { + String fullPath; + if (pathPrefix != null && pathPrefix.isEmpty() == false) { + if (pathPrefix.endsWith("/") && path.startsWith("/")) { + fullPath = pathPrefix.substring(0, pathPrefix.length() - 1) + path; + } else if (pathPrefix.endsWith("/") || path.startsWith("/")) { + fullPath = pathPrefix + path; + } else { + fullPath = pathPrefix + "/" + path; + } + } else { + fullPath = path; + } + + URIBuilder uriBuilder = new URIBuilder(fullPath); + for (Map.Entry param : params.entrySet()) { + uriBuilder.addParameter(param.getKey(), param.getValue()); + } + return uriBuilder.build(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * Listener used in any async call to wrap the provided user listener (or SyncResponseListener in sync + * calls). + * Allows to track potential failures coming from the different retry attempts and returning to the + * original listener + * only when we got a response (successful or not to be retried) or there are no hosts to retry against. + */ + static class FailureTrackingResponseListener { + private final ResponseListener responseListener; + private volatile Exception exception; + + FailureTrackingResponseListener(ResponseListener responseListener) { + this.responseListener = responseListener; + } + + /** + * Notifies the caller of a response through the wrapped listener + */ + void onSuccess(Response response) { + responseListener.onSuccess(response); + } + + /** + * Tracks one last definitive failure and returns to the caller by notifying the wrapped listener + */ + void onDefinitiveFailure(Exception e) { + trackFailure(e); + responseListener.onFailure(this.exception); + } + + /** + * Tracks an exception, which caused a retry hence we should not return yet to the caller + */ + void trackFailure(Exception e) { + addSuppressedException(this.exception, e); + this.exception = e; + } + } + + /** + * Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so + * that we can sniff on failure. + * The default implementation is a no-op. + */ + public static class FailureListener { + /** + * Notifies that the node provided as argument has just failed + */ + public void onFailure(Node node) { + } + } + + /** + * Contains a reference to a blacklisted node and the time until it is + * revived. We use this so we can do a single pass over the blacklist. + */ + private static class DeadNode implements Comparable { + final Node node; + final DeadHostState deadness; + + DeadNode(Node node, DeadHostState deadness) { + this.node = node; + this.deadness = deadness; + } + + @Override + public String toString() { + return node.toString(); + } + + @Override + public int compareTo(DeadNode rhs) { + return deadness.compareTo(rhs.deadness); + } + } + + /** + * Adapts an Iterator<DeadNodeAndRevival> into an + * Iterator<Node>. + */ + private static class DeadNodeIteratorAdapter implements Iterator { + private final Iterator itr; + + private DeadNodeIteratorAdapter(Iterator itr) { + this.itr = itr; + } + + @Override + public boolean hasNext() { + return itr.hasNext(); + } + + @Override + public Node next() { + return itr.next().node; + } + + @Override + public void remove() { + itr.remove(); + } + } + + private class InternalRequest { + private final Request request; + private final HttpUriRequestBase httpRequest; + private final Cancellable cancellable; + private final WarningsHandler warningsHandler; + + InternalRequest(Request request) { + this.request = request; + Map params = new HashMap<>(request.getParameters()); + params.putAll(request.getOptions().getParameters()); + URI uri = buildUri(pathPrefix, request.getEndpoint(), params); + this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity(), + compressionEnabled); + this.cancellable = Cancellable.fromRequest(httpRequest); + setHeaders(httpRequest, request.getOptions().getHeaders()); + setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); + this.warningsHandler = request.getOptions().getWarningsHandler() == null + ? Rest5Client.this.warningsHandler + : request.getOptions().getWarningsHandler(); + } + + private void setHeaders(HttpRequest req, Collection

requestHeaders) { + // request headers override default headers, so we don't add default headers if they exist as + // request headers + final Set requestNames = new HashSet<>(requestHeaders.size()); + for (Header requestHeader : requestHeaders) { + req.addHeader(requestHeader); + requestNames.add(requestHeader.getName()); + } + for (Header defaultHeader : defaultHeaders) { + if (!requestNames.contains(defaultHeader.getName())) { + req.addHeader(defaultHeader); + } + } + if (compressionEnabled) { + req.addHeader("Accept-Encoding", "gzip"); + } + if (metaHeaderEnabled) { + if (!req.containsHeader(Rest5ClientBuilder.META_HEADER_NAME)) { + req.setHeader(Rest5ClientBuilder.META_HEADER_NAME, Rest5ClientBuilder.META_HEADER_VALUE); + } + } else { + req.removeHeaders(Rest5ClientBuilder.META_HEADER_NAME); + } + } + + private void setRequestConfig(HttpUriRequestBase requestBase, RequestConfig requestConfig) { + if (requestConfig != null) { + requestBase.setConfig(requestConfig); + } + } + + RequestContext createContextForNextAttempt(Node node) { + this.httpRequest.reset(); + return new RequestContext(this, node); + } + } + + private static class RequestContext { + private final Node node; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; + private final HttpClientContext context; + + RequestContext(InternalRequest request, Node node) { + this.node = node; + // we stream the request body if the entity allows for it + AsyncRequestBuilder builder = AsyncRequestBuilder + .create(request.httpRequest.getMethod()) + .setUri(request.httpRequest.getRequestUri()) + .setHttpHost(node.getHost()) + .setHeaders(request.httpRequest.getHeaders()); + + if (request.httpRequest.getEntity() != null) { + builder.setEntity(new BasicAsyncEntityProducer(request.httpRequest.getEntity())); + } + + this.requestProducer = builder.build(); + this.asyncResponseConsumer = request.request.getOptions() + .getHttpAsyncResponseConsumerFactory() + .createHttpAsyncResponseConsumer(); + this.context = HttpClientContext.create(); + } + } + + private static class ResponseOrResponseException { + private final Response response; + private final ResponseException responseException; + + ResponseOrResponseException(Response response) { + this.response = Objects.requireNonNull(response); + this.responseException = null; + } + + ResponseOrResponseException(ResponseException responseException) { + this.responseException = Objects.requireNonNull(responseException); + this.response = null; + } + } + + /** + * Wrap the exception so the caller's signature shows up in the stack trace, taking care to copy the + * original type and message + * where possible so async and sync code don't have to check different exceptions. + */ + private static Exception extractAndWrapCause(Exception exception) { + if (exception instanceof InterruptedException) { + Thread.currentThread().interrupt(); + throw new RuntimeException("thread waiting for the response was interrupted", exception); + } + if (exception instanceof ExecutionException) { + ExecutionException executionException = (ExecutionException) exception; + Throwable t = executionException.getCause() == null ? executionException : + executionException.getCause(); + if (t instanceof Error) { + throw (Error) t; + } + exception = (Exception) t; + } + if (exception instanceof ConnectTimeoutException) { + ConnectTimeoutException e = new ConnectTimeoutException(exception.getMessage()); + e.initCause(exception); + return e; + } + if (exception instanceof SocketTimeoutException) { + SocketTimeoutException e = new SocketTimeoutException(exception.getMessage()); + e.initCause(exception); + return e; + } + if (exception instanceof ConnectionClosedException) { + ConnectionClosedException e = new ConnectionClosedException(exception.getMessage(), exception); + return e; + } + if (exception instanceof SSLHandshakeException) { + SSLHandshakeException e = new SSLHandshakeException(exception.getMessage()); + e.initCause(exception); + return e; + } + if (exception instanceof ConnectException) { + ConnectException e = new ConnectException(exception.getMessage()); + e.initCause(exception); + return e; + } + if (exception instanceof IOException) { + return new IOException(exception.getMessage(), exception); + } + if (exception instanceof RuntimeException) { + return new RuntimeException(exception.getMessage(), exception); + } + return new RuntimeException("error while performing request", exception); + } + + /** + * A gzip compressing entity that also implements {@code getContent()}. + */ + public static class ContentCompressingEntity extends GzipCompressingEntity { + + public ContentCompressingEntity(HttpEntity entity) { + super(entity); + } + + @Override + public InputStream getContent() throws IOException { + ByteArrayInputOutputStream out = new ByteArrayInputOutputStream(1024); + writeTo(out); + return out.asInput(); + } + } + + /** + * A ByteArrayOutputStream that can be turned into an input stream without copying the underlying buffer. + */ + private static class ByteArrayInputOutputStream extends ByteArrayOutputStream { + ByteArrayInputOutputStream(int size) { + super(size); + } + + public InputStream asInput() { + return new ByteArrayInputStream(this.buf, 0, this.count); + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java new file mode 100644 index 000000000..4df40f375 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/Rest5ClientBuilder.java @@ -0,0 +1,383 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.client5.http.config.ConnectionConfig; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.BasicClientTlsStrategy; +import org.apache.hc.core5.util.Timeout; +import org.apache.hc.core5.util.VersionInfo; + +import javax.net.ssl.SSLContext; +import java.io.IOException; +import java.io.InputStream; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Properties; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static co.elastic.clients.transport.rest5_client.low_level.LanguageRuntimeVersions.getRuntimeMetadata; + +/** + * Helps creating a new {@link Rest5Client}. Allows to set the most common http client configuration options + * when internally + * creating the underlying {@link org.apache.hc.client5.http.async.HttpAsyncClient}. Also allows to provide + * an externally created + * {@link org.apache.hc.client5.http.async.HttpAsyncClient} in case additional customization is needed. + */ +public final class Rest5ClientBuilder { + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; + public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; + public static final int DEFAULT_RESPONSE_TIMEOUT_MILLIS = 0; // meaning infinite + public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10; + public static final int DEFAULT_MAX_CONN_TOTAL = 30; + + static final String THREAD_NAME_PREFIX = "elasticsearch-rest-client-"; + private static final String THREAD_NAME_FORMAT = THREAD_NAME_PREFIX + "%d-thread-%d"; + + public static final String VERSION; + static final String META_HEADER_NAME = "X-Elastic-Client-Meta"; + static final String META_HEADER_VALUE; + private static final String USER_AGENT_HEADER_VALUE; + + private static final Header[] EMPTY_HEADERS = new Header[0]; + + private final List nodes; + private CloseableHttpAsyncClient httpClient; + private Header[] defaultHeaders = EMPTY_HEADERS; + private Rest5Client.FailureListener failureListener; + private SSLContext sslContext; + private String pathPrefix; + private NodeSelector nodeSelector = NodeSelector.ANY; + private boolean strictDeprecationMode = false; + private boolean compressionEnabled = false; + private boolean metaHeaderEnabled = true; + + static { + // Never fail on unknown version, even if an environment messed up their classpath enough that we + // can't find it. + // Better have incomplete telemetry than crashing user applications. + String version = null; + try (InputStream is = Rest5Client.class.getResourceAsStream("version.properties")) { + if (is != null) { + Properties versions = new Properties(); + versions.load(is); + version = versions.getProperty("elasticsearch-client"); + } + } catch (IOException e) { + // Keep version unknown + } + + if (version == null) { + version = ""; // unknown values are reported as empty strings in X-Elastic-Client-Meta + } + + VERSION = version; + + USER_AGENT_HEADER_VALUE = String.format( + Locale.ROOT, + "elasticsearch-java/%s (Java/%s)", + VERSION.isEmpty() ? "Unknown" : VERSION, + System.getProperty("java.version") + ); + + VersionInfo httpClientVersion = null; + try { + httpClientVersion = VersionInfo.loadVersionInfo( + "org.apache.hc.core5", + HttpAsyncClientBuilder.class.getClassLoader() + + ); + } catch (Exception e) { + // Keep unknown + } + + // Use a single 'p' suffix for all prerelease versions (snapshot, beta, etc). + String metaVersion = version; + int dashPos = metaVersion.indexOf('-'); + if (dashPos > 0) { + metaVersion = metaVersion.substring(0, dashPos) + "p"; + } + + // service, language, transport, followed by additional information + META_HEADER_VALUE = "es=" + + metaVersion + + ",jv=" + + System.getProperty("java.specification.version") + + ",t=" + + metaVersion + + ",hc=" + + (httpClientVersion == null ? "" : httpClientVersion.getRelease()) + + getRuntimeMetadata(); + } + + /** + * Creates a new builder instance and sets the hosts that the client will send requests to. + * + * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty. + */ + Rest5ClientBuilder(List nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("nodes must not be null or empty"); + } + for (Node node : nodes) { + if (node == null) { + throw new IllegalArgumentException("node cannot be null"); + } + } + this.nodes = nodes; + } + + /** + * Advanced setting, sets the rest client that will be used to handle requests. + * The rest client has to be fully configured, as it will be used as provided, + * also this will overwrite all builder setters. + * + * @throws NullPointerException if {@code httpAsyncClient} is {@code null}. + */ + public Rest5ClientBuilder setHttpClient(CloseableHttpAsyncClient httpAsyncClient) { + Objects.requireNonNull(httpAsyncClient, "custom rest client must not be null"); + this.httpClient = httpAsyncClient; + return this; + } + + /** + * Sets the SSL context, which will be applied to all requests. + * + * @throws NullPointerException if {@code httpAsyncClient} is {@code null}. + */ + public Rest5ClientBuilder setSSLContext(SSLContext sslContext) { + Objects.requireNonNull(sslContext, "ssl context must not be null"); + this.sslContext = sslContext; + return this; + } + + /** + * Sets the default request headers, which will be sent along with each request. + *

+ * Request-time headers will always overwrite any default headers. + * + * @throws NullPointerException if {@code defaultHeaders} or any header is {@code null}. + */ + public Rest5ClientBuilder setDefaultHeaders(Header[] defaultHeaders) { + Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null"); + for (Header defaultHeader : defaultHeaders) { + Objects.requireNonNull(defaultHeader, "default header must not be null"); + } + this.defaultHeaders = defaultHeaders; + return this; + } + + + /** + * Sets the {@link Rest5Client.FailureListener} to be notified for each request failure + * + * @throws NullPointerException if {@code failureListener} is {@code null}. + */ + public Rest5ClientBuilder setFailureListener(Rest5Client.FailureListener failureListener) { + Objects.requireNonNull(failureListener, "failureListener must not be null"); + this.failureListener = failureListener; + return this; + } + + + /** + * Sets the path's prefix for every request used by the http client. + *

+ * For example, if this is set to "/my/path", then any client request will become "/my/path/" + + * endpoint. + *

+ * In essence, every request's {@code endpoint} is prefixed by this {@code pathPrefix}. The path prefix + * is useful for when + * Elasticsearch is behind a proxy that provides a base path or a proxy that requires all paths to + * start with '/'; + * it is not intended for other purposes and it should not be supplied in other scenarios. + * + * @throws NullPointerException if {@code pathPrefix} is {@code null}. + * @throws IllegalArgumentException if {@code pathPrefix} is empty, or ends with more than one '/'. + */ + public Rest5ClientBuilder setPathPrefix(String pathPrefix) { + this.pathPrefix = cleanPathPrefix(pathPrefix); + return this; + } + + public static String cleanPathPrefix(String pathPrefix) { + Objects.requireNonNull(pathPrefix, "pathPrefix must not be null"); + + if (pathPrefix.isEmpty()) { + throw new IllegalArgumentException("pathPrefix must not be empty"); + } + + String cleanPathPrefix = pathPrefix; + if (!cleanPathPrefix.startsWith("/")) { + cleanPathPrefix = "/" + cleanPathPrefix; + } + + // best effort to ensure that it looks like "/base/path" rather than "/base/path/" + if (cleanPathPrefix.endsWith("/") && cleanPathPrefix.length() > 1) { + cleanPathPrefix = cleanPathPrefix.substring(0, cleanPathPrefix.length() - 1); + + if (cleanPathPrefix.endsWith("/")) { + throw new IllegalArgumentException("pathPrefix is malformed. too many trailing slashes: [" + pathPrefix + "]"); + } + } + return cleanPathPrefix; + } + + /** + * Sets the {@link NodeSelector} to be used for all requests. + * + * @throws NullPointerException if the provided nodeSelector is null + */ + public Rest5ClientBuilder setNodeSelector(NodeSelector nodeSelector) { + Objects.requireNonNull(nodeSelector, "nodeSelector must not be null"); + this.nodeSelector = nodeSelector; + return this; + } + + /** + * Whether the REST client should return any response containing at least + * one warning header as a failure. + */ + public Rest5ClientBuilder setStrictDeprecationMode(boolean strictDeprecationMode) { + this.strictDeprecationMode = strictDeprecationMode; + return this; + } + + /** + * Whether the REST client should compress requests using gzip content encoding and add the + * "Accept-Encoding: gzip" + * header to receive compressed responses. + */ + public Rest5ClientBuilder setCompressionEnabled(boolean compressionEnabled) { + this.compressionEnabled = compressionEnabled; + return this; + } + + /** + * Whether to send a {@code X-Elastic-Client-Meta} header that describes the runtime environment. It + * contains + * information that is similar to what could be found in {@code User-Agent}. Using a separate header + * allows + * applications to use {@code User-Agent} for their own needs, e.g. to identify application version or + * other + * environment information. Defaults to {@code true}. + */ + public Rest5ClientBuilder setMetaHeaderEnabled(boolean metadataEnabled) { + this.metaHeaderEnabled = metadataEnabled; + return this; + } + + /** + * Creates a new {@link Rest5Client} based on the provided configuration. + */ + public Rest5Client build() { + if (failureListener == null) { + failureListener = new Rest5Client.FailureListener(); + } + CloseableHttpAsyncClient httpClient = this.createHttpClient(); + Rest5Client restClient = new Rest5Client( + httpClient, + defaultHeaders, + nodes, + pathPrefix, + failureListener, + nodeSelector, + strictDeprecationMode, + compressionEnabled, + metaHeaderEnabled + ); + httpClient.start(); + return restClient; + } + + /** + * Similar to {@code org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor + * .DefaultThreadFactory} but with better thread names. + */ + private static class RestClientThreadFactory implements ThreadFactory { + private static final AtomicLong CLIENT_THREAD_POOL_ID_GENERATOR = new AtomicLong(); + + private final long clientThreadPoolId = CLIENT_THREAD_POOL_ID_GENERATOR.getAndIncrement(); // 0-based + private final AtomicLong clientThreadId = new AtomicLong(); + + @Override + public Thread newThread(Runnable runnable) { + return new Thread( + runnable, + String.format(Locale.ROOT, THREAD_NAME_FORMAT, clientThreadPoolId, + clientThreadId.incrementAndGet()) // 1-based + ); + } + } + + private CloseableHttpAsyncClient createHttpClient() { + // if the user provided a client, using that one + if (this.httpClient != null) { + return this.httpClient; + } + // otherwise, creating a default instance of CloseableHttpAsyncClient + // default timeouts are all 3 mins + RequestConfig requestConfigBuilder = RequestConfig.custom() + .setConnectionRequestTimeout(Timeout.of(DEFAULT_SOCKET_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) + .setResponseTimeout(Timeout.of(DEFAULT_RESPONSE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) + .build(); + + try { + + SSLContext sslContext = this.sslContext != null ? this.sslContext : SSLContext.getDefault(); + + ConnectionConfig connectionConfig = ConnectionConfig.custom() + .setConnectTimeout(Timeout.of(DEFAULT_CONNECT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) + .build(); + + PoolingAsyncClientConnectionManager defaultConnectionManager = + PoolingAsyncClientConnectionManagerBuilder.create() + .setDefaultConnectionConfig(connectionConfig) + .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) + .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) + .setTlsStrategy(new BasicClientTlsStrategy(sslContext)) + .build(); + + HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() + .setDefaultRequestConfig(requestConfigBuilder) + .setConnectionManager(defaultConnectionManager) + .setUserAgent(USER_AGENT_HEADER_VALUE) + .setTargetAuthenticationStrategy(new DefaultAuthenticationStrategy()) + .setThreadFactory(new RestClientThreadFactory()); + + return httpClientBuilder.build(); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("could not create the default ssl context", e); + } + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningFailureException.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningFailureException.java new file mode 100644 index 000000000..706b8f586 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningFailureException.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.io.IOException; + +import static co.elastic.clients.transport.rest5_client.low_level.ResponseException.buildMessage; + +/** + * This exception is used to indicate that one or more {@link Response#getWarnings()} exist + * and is typically used when the {@link Rest5Client} is set to fail by setting + * {@link Rest5ClientBuilder#setStrictDeprecationMode(boolean)} to `true`. + */ +// This class extends RuntimeException in order to deal with wrapping that is done in FutureUtils on exception. +// if the exception is not of type ElasticsearchException or RuntimeException it will be wrapped in a UncategorizedExecutionException +public final class WarningFailureException extends RuntimeException { + + private final Response response; + + public WarningFailureException(Response response) throws IOException { + super(buildMessage(response)); + this.response = response; + } + + /** + * Wrap a {@linkplain WarningFailureException} with another one with the current + * stack trace. This is used during synchronous calls so that the caller + * ends up in the stack trace of the exception thrown. + */ + WarningFailureException(WarningFailureException e) { + super(e.getMessage(), e); + this.response = e.getResponse(); + } + + /** + * Returns the {@link Response} that caused this exception to be thrown. + */ + public Response getResponse() { + return response; + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningsHandler.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningsHandler.java new file mode 100644 index 000000000..d79854763 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/WarningsHandler.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import java.util.List; + +/** + * Called if there are warnings to determine if those warnings should fail the + * request. + */ +public interface WarningsHandler { + boolean warningsShouldFailRequest(List warnings); + + WarningsHandler PERMISSIVE = new WarningsHandler() { + @Override + public boolean warningsShouldFailRequest(List warnings) { + return false; + } + + @Override + public String toString() { + return "permissive"; + } + }; + WarningsHandler STRICT = new WarningsHandler() { + @Override + public boolean warningsShouldFailRequest(List warnings) { + return false == warnings.isEmpty(); + } + + @Override + public String toString() { + return "strict"; + } + }; +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSniffer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSniffer.java new file mode 100644 index 000000000..73e4185c5 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSniffer.java @@ -0,0 +1,313 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Node.Roles; +import co.elastic.clients.transport.rest5_client.low_level.Request; +import co.elastic.clients.transport.rest5_client.low_level.Response; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + +/** + * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. + * Compatible with elasticsearch 2.x+. + */ +public final class ElasticsearchNodesSniffer implements NodesSniffer { + + private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class); + + public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); + + private final Rest5Client restClient; + private final Request request; + private final Scheme scheme; + private final JsonFactory jsonFactory = new JsonFactory(); + + /** + * Creates a new instance of the Elasticsearch sniffer. It will use the provided {@link Rest5Client} to fetch the hosts, + * through the nodes info api, the default sniff request timeout value {@link #DEFAULT_SNIFF_REQUEST_TIMEOUT} and http + * as the scheme for all the hosts. + * @param restClient client used to fetch the hosts from elasticsearch through nodes info api. Usually the same instance + * that is also provided to {@link Sniffer#builder(Rest5Client)}, so that the hosts are set to the same + * client that was used to fetch them. + */ + public ElasticsearchNodesSniffer(Rest5Client restClient) { + this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP); + } + + /** + * Creates a new instance of the Elasticsearch sniffer. It will use the provided {@link Rest5Client} to fetch the hosts + * through the nodes info api, the provided sniff request timeout value and scheme. + * @param restClient client used to fetch the hosts from elasticsearch through nodes info api. Usually the same instance + * that is also provided to {@link Sniffer#builder(Rest5Client)}, so that the hosts are set to the same + * client that was used to sniff them. + * @param sniffRequestTimeoutMillis the sniff request timeout (in milliseconds) to be passed in as a query string parameter + * to elasticsearch. Allows to halt the request without any failure, as only the nodes + * that have responded within this timeout will be returned. + * @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) + */ + public ElasticsearchNodesSniffer(Rest5Client restClient, long sniffRequestTimeoutMillis, Scheme scheme) { + this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null"); + if (sniffRequestTimeoutMillis < 0) { + throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); + } + this.request = new Request("GET", "/_nodes/http"); + request.addParameter("timeout", sniffRequestTimeoutMillis + "ms"); + this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null"); + } + + /** + * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts + */ + @Override + public List sniff() throws IOException { + Response response = restClient.performRequest(request); + return readHosts(response.getEntity(), scheme, jsonFactory); + } + + static List readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException { + try (InputStream inputStream = entity.getContent()) { + JsonParser parser = jsonFactory.createParser(inputStream); + if (parser.nextToken() != JsonToken.START_OBJECT) { + throw new IOException("expected data to start with an object"); + } + List nodes = new ArrayList<>(); + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("nodes".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + JsonToken token = parser.nextToken(); + assert token == JsonToken.START_OBJECT; + String nodeId = parser.getCurrentName(); + Node node = readNode(nodeId, parser, scheme); + if (node != null) { + nodes.add(node); + } + } + } else { + parser.skipChildren(); + } + } + } + return nodes; + } + } + + private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException { + HttpHost publishedHost = null; + /* + * We sniff the bound hosts so we can look up the node based on any + * address on which it is listening. This is useful in Elasticsearch's + * test framework where we sometimes publish ipv6 addresses but the + * tests contact the node on ipv4. + */ + Set boundHosts = new HashSet<>(); + String name = null; + String version = null; + /* + * Multi-valued attributes come with key = `real_key.index` and we + * unflip them after reading them because we can't rely on the order + * that they arive. + */ + final Map protoAttributes = new HashMap(); + + boolean sawRoles = false; + final Set roles = new TreeSet<>(); + + String fieldName = null; + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("http".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { + String address = parser.getValueAsString(); + String host; + URI publishAddressAsURI; + + // ES7 cname/ip:port format + if (address.contains("/")) { + String[] cnameAndURI = address.split("/", 2); + publishAddressAsURI = URI.create(scheme + "://" + cnameAndURI[1]); + host = cnameAndURI[0]; + } else { + publishAddressAsURI = URI.create(scheme + "://" + address); + host = publishAddressAsURI.getHost(); + } + publishedHost = new HttpHost(publishAddressAsURI.getScheme(), host, publishAddressAsURI.getPort()); + } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_ARRAY) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + boundHosts.add( + new HttpHost(boundAddressAsURI.getScheme(), boundAddressAsURI.getHost(), boundAddressAsURI.getPort()) + ); + } + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else if ("attributes".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING) { + String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString()); + if (oldValue != null) { + throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]"); + } + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } else if (parser.currentToken() == JsonToken.START_ARRAY) { + if ("roles".equals(fieldName)) { + sawRoles = true; + while (parser.nextToken() != JsonToken.END_ARRAY) { + roles.add(parser.getText()); + } + } else { + parser.skipChildren(); + } + } else if (parser.currentToken().isScalarValue()) { + if ("version".equals(fieldName)) { + version = parser.getText(); + } else if ("name".equals(fieldName)) { + name = parser.getText(); + } + } + } + // http section is not present if http is not enabled on the node, ignore such nodes + if (publishedHost == null) { + logger.debug("skipping node [" + nodeId + "] with http disabled"); + return null; + } + + Map> realAttributes = new HashMap<>(protoAttributes.size()); + List keys = new ArrayList<>(protoAttributes.keySet()); + for (String key : keys) { + if (key.endsWith(".0")) { + String realKey = key.substring(0, key.length() - 2); + List values = new ArrayList<>(); + int i = 0; + while (true) { + String value = protoAttributes.remove(realKey + "." + i); + if (value == null) { + break; + } + values.add(value); + i++; + } + realAttributes.put(realKey, unmodifiableList(values)); + } + } + for (Map.Entry entry : protoAttributes.entrySet()) { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); + } + + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); + Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); + Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); + if ((masterAttribute == null && false == clientAttribute) || masterAttribute) { + roles.add("master"); + } + if ((dataAttribute == null && false == clientAttribute) || dataAttribute) { + roles.add("data"); + } + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + logger.trace("adding node [" + nodeId + "]"); + return new Node(publishedHost, boundHosts, name, version, new Roles(roles), unmodifiableMap(realAttributes)); + } + + /** + * Returns {@code defaultValue} if the attribute didn't come back, + * {@code true} or {@code false} if it did come back as + * either of those, or throws an IOException if the attribute + * came back in a strange way. + */ + private static Boolean v2RoleAttributeValue(Map> attributes, String name, Boolean defaultValue) + throws IOException { + List valueList = attributes.remove(name); + if (valueList == null) { + return defaultValue; + } + if (valueList.size() != 1) { + throw new IOException("expected only a single attribute value for [" + name + "] but got " + valueList); + } + switch (valueList.get(0)) { + case "true": + return true; + case "false": + return false; + default: + throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + valueList.get(0) + "]"); + } + } + + public enum Scheme { + HTTP("http"), + HTTPS("https"); + + private final String name; + + Scheme(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/NodesSniffer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/NodesSniffer.java new file mode 100644 index 000000000..552d6317c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/NodesSniffer.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; + +import java.io.IOException; +import java.util.List; + +/** + * Responsible for sniffing the http hosts + */ +public interface NodesSniffer { + /** + * Returns the sniffed Elasticsearch nodes. + */ + List sniff() throws IOException; +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListener.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListener.java new file mode 100644 index 000000000..0127945f9 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListener.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * {@link Rest5Client.FailureListener} implementation that allows to perform + * sniffing on failure. Gets notified whenever a failure happens and uses a {@link Sniffer} instance + * to manually reload hosts and sets them back to the {@link Rest5Client}. The {@link Sniffer} instance + * needs to be lazily set through {@link #setSniffer(Sniffer)}. + */ +public class SniffOnFailureListener extends Rest5Client.FailureListener { + + private volatile Sniffer sniffer; + private final AtomicBoolean set; + + public SniffOnFailureListener() { + this.set = new AtomicBoolean(false); + } + + /** + * Sets the {@link Sniffer} instance used to perform sniffing + * @throws IllegalStateException if the sniffer was already set, as it can only be set once + */ + public void setSniffer(Sniffer sniffer) { + Objects.requireNonNull(sniffer, "sniffer must not be null"); + if (set.compareAndSet(false, true)) { + this.sniffer = sniffer; + } else { + throw new IllegalStateException("sniffer can only be set once"); + } + } + + @Override + public void onFailure(Node node) { + if (sniffer == null) { + throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); + } + sniffer.sniffOnFailure(); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/Sniffer.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/Sniffer.java new file mode 100644 index 000000000..7ed0bcc2c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/Sniffer.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.Rest5ClientBuilder; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of + * {@link Rest5Client}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults. + * A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link Rest5Client} instance. + * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to + * {@link Rest5ClientBuilder#setFailureListener(Rest5Client.FailureListener)}. The Sniffer implementation needs to be lazily set to the + * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. + */ +public class Sniffer implements Closeable { + + private static final Log logger = LogFactory.getLog(Sniffer.class); + private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; + + private final NodesSniffer nodesSniffer; + private final Rest5Client restClient; + private final long sniffIntervalMillis; + private final long sniffAfterFailureDelayMillis; + private final Scheduler scheduler; + private final AtomicBoolean initialized = new AtomicBoolean(false); + private volatile ScheduledTask nextScheduledTask; + + Sniffer(Rest5Client restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) { + this(restClient, nodesSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); + } + + Sniffer(Rest5Client restClient, NodesSniffer nodesSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { + this.nodesSniffer = nodesSniffer; + this.restClient = restClient; + this.sniffIntervalMillis = sniffInterval; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; + this.scheduler = scheduler; + /* + * The first sniffing round is async, so this constructor returns before nextScheduledTask is assigned to a task. + * The initialized flag is a protection against NPE due to that. + */ + Task task = new Task(sniffIntervalMillis) { + @Override + public void run() { + super.run(); + initialized.compareAndSet(false, true); + } + }; + /* + * We do not keep track of the returned future as we never intend to cancel the initial sniffing round, we rather + * prevent any other operation from being executed till the sniffer is properly initialized + */ + scheduler.schedule(task, 0L); + } + + /** + * Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs + * it will also schedule a new round after sniffAfterFailureDelay ms. + */ + public void sniffOnFailure() { + // sniffOnFailure does nothing until the initial sniffing round has been completed + if (initialized.get()) { + /* + * If sniffing is already running, there is no point in scheduling another round right after the current one. + * Concurrent calls may be checking the same task state, but only the first skip call on the same task returns true. + * The task may also get replaced while we check its state, in which case calling skip on it returns false. + */ + if (this.nextScheduledTask.skip()) { + /* + * We do not keep track of this future as the task will immediately run and we don't intend to cancel it + * due to concurrent sniffOnFailure runs. Effectively the previous (now cancelled or skipped) task will stay + * assigned to nextTask till this onFailure round gets run and schedules its corresponding afterFailure round. + */ + scheduler.schedule(new Task(sniffAfterFailureDelayMillis), 0L); + } + } + } + + enum TaskState { + WAITING, + SKIPPED, + STARTED + } + + class Task implements Runnable { + final long nextTaskDelay; + final AtomicReference taskState = new AtomicReference<>(TaskState.WAITING); + + Task(long nextTaskDelay) { + this.nextTaskDelay = nextTaskDelay; + } + + @Override + public void run() { + /* + * Skipped or already started tasks do nothing. In most cases tasks will be cancelled and not run, but we want to protect for + * cases where future#cancel returns true yet the task runs. We want to make sure that such tasks do nothing otherwise they will + * schedule another round at the end and so on, leaving us with multiple parallel sniffing "tracks" whish is undesirable. + */ + if (taskState.compareAndSet(TaskState.WAITING, TaskState.STARTED) == false) { + return; + } + try { + sniff(); + } catch (Exception e) { + logger.error("error while sniffing nodes", e); + } finally { + Task task = new Task(sniffIntervalMillis); + Future future = scheduler.schedule(task, nextTaskDelay); + // tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable + ScheduledTask previousTask = nextScheduledTask; + nextScheduledTask = new ScheduledTask(task, future); + assert initialized.get() == false || previousTask.task.isSkipped() || previousTask.task.hasStarted() + : "task that we are replacing is neither cancelled nor has it ever started"; + } + } + + /** + * Returns true if the task has started, false in case it didn't start (yet?) or it was skipped + */ + boolean hasStarted() { + return taskState.get() == TaskState.STARTED; + } + + /** + * Sets this task to be skipped. Returns true if the task will be skipped, false if the task has already started. + */ + boolean skip() { + /* + * Threads may still get run although future#cancel returns true. We make sure that a task is either cancelled (or skipped), + * or entirely run. In the odd case that future#cancel returns true and the thread still runs, the task won't do anything. + * In case future#cancel returns true but the task has already started, this state change will not succeed hence this method + * returns false and the task will normally run. + */ + return taskState.compareAndSet(TaskState.WAITING, TaskState.SKIPPED); + } + + /** + * Returns true if the task was set to be skipped before it was started + */ + boolean isSkipped() { + return taskState.get() == TaskState.SKIPPED; + } + } + + static final class ScheduledTask { + final Task task; + final Future future; + + ScheduledTask(Task task, Future future) { + this.task = task; + this.future = future; + } + + /** + * Cancels this task. Returns true if the task has been successfully cancelled, meaning it won't be executed + * or if it is its execution won't have any effect. Returns false if the task cannot be cancelled (possibly it was + * already cancelled or already completed). + */ + boolean skip() { + /* + * Future#cancel should return false whenever a task cannot be cancelled, most likely as it has already started. We don't + * trust it much though so we try to cancel hoping that it will work. At the same time we always call skip too, which means + * that if the task has already started the state change will fail. We could potentially not call skip when cancel returns + * false but we prefer to stay on the safe side. + */ + future.cancel(false); + return task.skip(); + } + } + + final void sniff() throws IOException { + List sniffedNodes = nodesSniffer.sniff(); + if (logger.isDebugEnabled()) { + logger.debug("sniffed nodes: " + sniffedNodes); + } + if (sniffedNodes.isEmpty()) { + logger.warn("no nodes to set, nodes will be updated at the next sniffing round"); + } else { + restClient.setNodes(sniffedNodes); + } + } + + @Override + public void close() { + if (initialized.get()) { + nextScheduledTask.skip(); + } + this.scheduler.shutdown(); + } + + /** + * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. + * + * @param restClient the client that gets its hosts set (via + * {@link Rest5Client#setNodes(Collection)}) once they are fetched + * @return a new instance of {@link SnifferBuilder} + */ + public static SnifferBuilder builder(Rest5Client restClient) { + return new SnifferBuilder(restClient); + } + + /** + * The Scheduler interface allows to isolate the sniffing scheduling aspects so that we can test + * the sniffer by injecting when needed a custom scheduler that is more suited for testing. + */ + interface Scheduler { + /** + * Schedules the provided {@link Runnable} to be executed in delayMillis milliseconds + */ + Future schedule(Task task, long delayMillis); + + /** + * Shuts this scheduler down + */ + void shutdown(); + } + + /** + * Default implementation of {@link Scheduler}, based on {@link ScheduledExecutorService} + */ + static final class DefaultScheduler implements Scheduler { + final ScheduledExecutorService executor; + + DefaultScheduler() { + this(initScheduledExecutorService()); + } + + DefaultScheduler(ScheduledExecutorService executor) { + this.executor = executor; + } + + private static ScheduledExecutorService initScheduledExecutorService() { + ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new SnifferThreadFactory(SNIFFER_THREAD_NAME)); + executor.setRemoveOnCancelPolicy(true); + return executor; + } + + @Override + public Future schedule(Task task, long delayMillis) { + return executor.schedule(task, delayMillis, TimeUnit.MILLISECONDS); + } + + @Override + public void shutdown() { + executor.shutdown(); + try { + if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + executor.shutdownNow(); + } catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } + } + + static class SnifferThreadFactory implements ThreadFactory { + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + private final ThreadFactory originalThreadFactory; + + private SnifferThreadFactory(String namePrefix) { + this.namePrefix = namePrefix; + this.originalThreadFactory = Executors.defaultThreadFactory(); + } + + @Override + public Thread newThread(final Runnable r) { + Thread t = originalThreadFactory.newThread(r); + t.setName(namePrefix + "[T#" + threadNumber.getAndIncrement() + "]"); + t.setDaemon(true); + return t; + } + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilder.java b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilder.java new file mode 100644 index 000000000..a04009350 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilder.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * Sniffer builder. Helps creating a new {@link Sniffer}. + */ +public final class SnifferBuilder { + public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5); + public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1); + + private final Rest5Client restClient; + private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; + private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; + private NodesSniffer nodesSniffer; + + /** + * Creates a new builder instance by providing the {@link Rest5Client} that will be used to communicate with elasticsearch + */ + SnifferBuilder(Rest5Client restClient) { + Objects.requireNonNull(restClient, "restClient cannot be null"); + this.restClient = restClient; + } + + /** + * Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when + * sniffOnFailure is disabled or when there are no failures between consecutive sniff executions. + * @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0 + */ + public SnifferBuilder setSniffIntervalMillis(int sniffIntervalMillis) { + if (sniffIntervalMillis <= 0) { + throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0"); + } + this.sniffIntervalMillis = sniffIntervalMillis; + return this; + } + + /** + * Sets the delay of a sniff execution scheduled after a failure (in milliseconds) + */ + public SnifferBuilder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) { + if (sniffAfterFailureDelayMillis <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0"); + } + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; + return this; + } + + /** + * Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer} + * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer}, + * or to provide a different implementation (e.g. in case hosts need to taken from a different source). + */ + public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) { + Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null"); + this.nodesSniffer = nodesSniffer; + return this; + } + + /** + * Creates the {@link Sniffer} based on the provided configuration. + */ + public Sniffer build() { + if (nodesSniffer == null) { + this.nodesSniffer = new ElasticsearchNodesSniffer(restClient); + } + return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); + } +} diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientHttpClient.java b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientHttpClient.java index 1bcc06b05..85b92c107 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientHttpClient.java +++ b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientHttpClient.java @@ -51,7 +51,7 @@ public class RestClientHttpClient implements TransportHttpClient { /** * The {@code Future} implementation returned by async requests. - * It wraps the RestClient's cancellable and propagates cancellation. + * It wraps the Rest5Client's cancellable and propagates cancellation. */ private static class RequestFuture extends CompletableFuture { private volatile Cancellable cancellable; @@ -227,7 +227,7 @@ public int size() { @Override public BinaryData body() throws IOException { HttpEntity entity = restResponse.getEntity(); - return entity == null ? null : new HttpEntityBinaryData(restResponse.getEntity()); + return entity == null ? null : new HttpEntityBinaryData(entity); } @Nullable diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientOptions.java b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientOptions.java index 842a45c62..de1658e26 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientOptions.java +++ b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientOptions.java @@ -49,7 +49,7 @@ public class RestClientOptions implements TransportOptions { @VisibleForTesting static final String USER_AGENT_VALUE = getUserAgent(); - static RestClientOptions of(@Nullable TransportOptions options) { + public static RestClientOptions of(@Nullable TransportOptions options) { if (options == null) { return initialOptions(); } @@ -102,6 +102,11 @@ public Function, Boolean> onWarnings() { return warnings -> options.getWarningsHandler().warningsShouldFailRequest(warnings); } + @Override + public void updateToken(String token) { + throw new UnsupportedOperationException("Operation unsupported in the legacy client, use rest5 client"); + } + @Override public boolean keepResponseBodyOnException() { return this.keepResponseBodyOnException; diff --git a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientTransport.java b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientTransport.java index dbadfbe1a..4ace1d0ed 100644 --- a/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientTransport.java +++ b/java-client/src/main/java/co/elastic/clients/transport/rest_client/RestClientTransport.java @@ -21,13 +21,30 @@ import co.elastic.clients.json.JsonpMapper; import co.elastic.clients.transport.ElasticsearchTransportBase; +import co.elastic.clients.transport.Transport; +import co.elastic.clients.transport.ElasticsearchTransportConfig; +import co.elastic.clients.transport.TransportOptions; import co.elastic.clients.transport.instrumentation.Instrumentation; +import org.apache.http.HttpHost; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; + +import javax.annotation.Nullable; +import java.util.Base64; public class RestClientTransport extends ElasticsearchTransportBase { private final RestClient restClient; + public RestClientTransport(ElasticsearchTransportConfig config) { + this( + buildRestClient(config), + config.mapper(), + RestClientOptions.of(config.transportOptions()), + config.instrumentation() + ); + } + public RestClientTransport(RestClient restClient, JsonpMapper jsonpMapper) { this(restClient, jsonpMapper, null); } @@ -42,7 +59,53 @@ public RestClientTransport(RestClient restClient, JsonpMapper jsonpMapper, RestC this.restClient = restClient; } + private static RestClient buildRestClient(ElasticsearchTransportConfig config) { + RestClientBuilder restClientBuilder = RestClient.builder(config.hosts().stream() + .map(h -> HttpHost.create(h.toString())).toArray(HttpHost[]::new) + ); + + if (config.username() != null && config.password() != null) { + var cred = Base64.getEncoder().encodeToString((config.username() + ":" + config.password()).getBytes()); + restClientBuilder.setDefaultHeaders(new org.apache.http.Header[]{ + new org.apache.http.message.BasicHeader("Authorization", "Basic " + cred) + }); + } else if (config.apiKey() != null) { + restClientBuilder.setDefaultHeaders(new org.apache.http.Header[]{ + new org.apache.http.message.BasicHeader("Authorization", "ApiKey " + config.apiKey()) + }); + } else if (config.token() != null) { + restClientBuilder.setDefaultHeaders(new org.apache.http.Header[]{ + new org.apache.http.message.BasicHeader("Authorization", "Bearer " + config.token()) + }); + } + + if (config.sslContext() != null) { + restClientBuilder.setHttpClientConfigCallback(hc -> hc.setSSLContext(config.sslContext())); + } + + restClientBuilder.setCompressionEnabled(config.useCompression()); + + return restClientBuilder.build(); + } + public RestClient restClient() { return this.restClient; } + + + @Override + public Transport withOptions(@Nullable TransportOptions options) { + return new RestClientTransport(restClient, mapper, RestClientOptions.of(options), instrumentation); + } + + /** INTERNAL, used only for tests. */ + @Override + protected ElasticsearchTransportBase cloneWith(TransportOptions options, JsonpMapper mapper, Instrumentation instrumentation) { + return new RestClientTransport( + restClient, + mapper != null ? mapper : this.mapper, + RestClientOptions.of(options != null ? options : this.transportOptions), + instrumentation != null ? instrumentation : this.instrumentation + ); + } } diff --git a/java-client/src/test/java/co/elastic/clients/documentation/DocTestsTransport.java b/java-client/src/test/java/co/elastic/clients/documentation/DocTestsTransport.java index 6d1c41eb5..01a616c04 100644 --- a/java-client/src/test/java/co/elastic/clients/documentation/DocTestsTransport.java +++ b/java-client/src/test/java/co/elastic/clients/documentation/DocTestsTransport.java @@ -37,7 +37,7 @@ import java.util.function.Function; /** - * A transport implementation that always returns the same result. Used for doc snippets where we can to check + * A transport implementation that always returns the same result. Used for doc snippets where we can check * compilation and do very simple tests. */ public class DocTestsTransport implements ElasticsearchTransport { @@ -73,6 +73,10 @@ public boolean keepResponseBodyOnException() { public Builder toBuilder() { return null; } + + @Override + public void updateToken(String token) { + } }; public void setResult(Object result) { diff --git a/java-client/src/test/java/co/elastic/clients/documentation/getting_started/ConnectingTest.java b/java-client/src/test/java/co/elastic/clients/documentation/getting_started/ConnectingTest.java index a97a2ac8e..b8fef1ecf 100644 --- a/java-client/src/test/java/co/elastic/clients/documentation/getting_started/ConnectingTest.java +++ b/java-client/src/test/java/co/elastic/clients/documentation/getting_started/ConnectingTest.java @@ -24,19 +24,10 @@ import co.elastic.clients.elasticsearch.core.SearchResponse; import co.elastic.clients.elasticsearch.core.search.Hit; import co.elastic.clients.json.jackson.JacksonJsonpMapper; -import co.elastic.clients.transport.ElasticsearchTransport; import co.elastic.clients.transport.TransportUtils; import co.elastic.clients.transport.instrumentation.OpenTelemetryForElasticsearch; -import co.elastic.clients.transport.rest_client.RestClientTransport; import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.sdk.OpenTelemetrySdk; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.message.BasicHeader; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -53,20 +44,12 @@ public void createClient() throws Exception { String serverUrl = "https://localhost:9200"; String apiKey = "VnVhQ2ZHY0JDZGJrU..."; - // Create the low-level client - RestClient restClient = RestClient - .builder(HttpHost.create(serverUrl)) - .setDefaultHeaders(new Header[]{ - new BasicHeader("Authorization", "ApiKey " + apiKey) - }) - .build(); - - // Create the transport with a Jackson mapper - ElasticsearchTransport transport = new RestClientTransport( - restClient, new JacksonJsonpMapper()); - - // And create the API client - ElasticsearchClient esClient = new ElasticsearchClient(transport); + ElasticsearchClient esClient = ElasticsearchClient.of(b -> b + .host(serverUrl) + .apiKey(apiKey) + // Use the Jackson mapper to deserialize JSON to application objects + .jsonMapper(new JacksonJsonpMapper()) + ); // Use the client... @@ -98,13 +81,6 @@ public void createClientWithOpenTelemetry() throws Exception { String serverUrl = "https://localhost:9200"; String apiKey = "VnVhQ2ZHY0JDZGJrU..."; - // Create the low-level client - RestClient restClient = RestClient - .builder(HttpHost.create(serverUrl)) - .setDefaultHeaders(new Header[]{ - new BasicHeader("Authorization", "ApiKey " + apiKey) - }) - .build(); // Create and configure custom OpenTelemetry instance OpenTelemetry customOtel = OpenTelemetrySdk.builder().build(); @@ -113,14 +89,12 @@ public void createClientWithOpenTelemetry() throws Exception { OpenTelemetryForElasticsearch esOtelInstrumentation = new OpenTelemetryForElasticsearch(customOtel, false); - // Create the transport with the custom Instrumentation instance - ElasticsearchTransport transport = new RestClientTransport( - restClient, new JacksonJsonpMapper(), null, esOtelInstrumentation + ElasticsearchClient esClient = ElasticsearchClient.of(b -> b + .host(serverUrl) + .apiKey(apiKey) + .instrumentation(esOtelInstrumentation) ); - // And create the API client - ElasticsearchClient esClient = new ElasticsearchClient(transport); - // Use the client... // Close the client, also closing the underlying transport object and network connections. @@ -132,9 +106,7 @@ restClient, new JacksonJsonpMapper(), null, esOtelInstrumentation @Test public void createSecureClientCert() throws Exception { - // Create the low-level client - String host = "localhost"; - int port = 9200; + String url = "https://localhost:9200"; String login = "elastic"; String password = "changeme"; @@ -144,23 +116,12 @@ public void createSecureClientCert() throws Exception { SSLContext sslContext = TransportUtils .sslContextFromHttpCaCrt(certFile); // <1> - BasicCredentialsProvider credsProv = new BasicCredentialsProvider(); // <2> - credsProv.setCredentials( - AuthScope.ANY, new UsernamePasswordCredentials(login, password) + ElasticsearchClient esClient = ElasticsearchClient.of(b -> b + .host(url) // <3> + .usernameAndPassword(login, password) // <2> + .sslContext(sslContext) // <4> ); - RestClient restClient = RestClient - .builder(new HttpHost(host, port, "https")) // <3> - .setHttpClientConfigCallback(hc -> hc - .setSSLContext(sslContext) // <4> - .setDefaultCredentialsProvider(credsProv) - ) - .build(); - - // Create the transport and the API client - ElasticsearchTransport transport = new RestClientTransport(restClient, new JacksonJsonpMapper()); - ElasticsearchClient esClient = new ElasticsearchClient(transport); - // Use the client... // Close the client, also closing the underlying transport object and network connections. @@ -172,9 +133,7 @@ AuthScope.ANY, new UsernamePasswordCredentials(login, password) @Test public void createSecureClientFingerPrint() throws Exception { - // Create the low-level client - String host = "localhost"; - int port = 9200; + String url = "https://localhost:9200"; String login = "elastic"; String password = "changeme"; @@ -184,23 +143,12 @@ public void createSecureClientFingerPrint() throws Exception { SSLContext sslContext = TransportUtils .sslContextFromCaFingerprint(fingerprint); // <1> - BasicCredentialsProvider credsProv = new BasicCredentialsProvider(); // <2> - credsProv.setCredentials( - AuthScope.ANY, new UsernamePasswordCredentials(login, password) + ElasticsearchClient esClient = ElasticsearchClient.of(b -> b + .host(url) // <3> + .usernameAndPassword(login, password) // <2> + .sslContext(sslContext) // <4> ); - RestClient restClient = RestClient - .builder(new HttpHost(host, port, "https")) // <3> - .setHttpClientConfigCallback(hc -> hc - .setSSLContext(sslContext) // <4> - .setDefaultCredentialsProvider(credsProv) - ) - .build(); - - // Create the transport and the API client - ElasticsearchTransport transport = new RestClientTransport(restClient, new JacksonJsonpMapper()); - ElasticsearchClient esClient = new ElasticsearchClient(transport); - // Use the client... // Close the client, also closing the underlying transport object and network connections. diff --git a/java-client/src/test/java/co/elastic/clients/documentation/getting_started/MigrateHlrcTest.java b/java-client/src/test/java/co/elastic/clients/documentation/getting_started/MigrateHlrcTest.java index e857188b7..ef903cf27 100644 --- a/java-client/src/test/java/co/elastic/clients/documentation/getting_started/MigrateHlrcTest.java +++ b/java-client/src/test/java/co/elastic/clients/documentation/getting_started/MigrateHlrcTest.java @@ -49,6 +49,9 @@ public RestHighLevelClient build() { @Test public void migrate() { + // Note: we keep the legacy client on purpose here since this is to illustrate + // the co-existence of HLRC and the Java client + //tag::migrate // Create the low-level client RestClient httpClient = RestClient.builder( diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestClient.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestClient.java new file mode 100644 index 000000000..87461bc93 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestClient.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch; + +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.jsonb.JsonbJsonpMapper; +import co.elastic.clients.transport.ElasticsearchTransport; +import co.elastic.clients.transport.ElasticsearchTransportConfig; +import co.elastic.clients.transport.rest5_client.Rest5ClientTransport; +import co.elastic.clients.transport.rest_client.RestClientTransport; +import com.sun.net.httpserver.HttpServer; + +import javax.annotation.Nullable; +import javax.net.ssl.SSLContext; +import java.security.SecureRandom; +import java.util.function.Function; + +public class ElasticsearchTestClient { + + protected enum ClientImpl { + Rest4, + Rest5; + + Function transportFactory() { + return switch (this) { + case Rest4 -> RestClientTransport::new; + case Rest5 -> Rest5ClientTransport::new; + }; + } + } + + // Same value for all tests in a test run + private static final ClientImpl flavor; + static { + var flavors = ClientImpl.values(); + flavor = flavors[new SecureRandom().nextInt(flavors.length)]; + } + + private static JsonpMapper mapper(JsonpMapper mapper) { + return mapper != null ? mapper : new JsonbJsonpMapper(); + } + + public static ElasticsearchClient createClient(String url, @Nullable JsonpMapper mapper, @Nullable SSLContext sslContext) { + System.out.println("Using a " + flavor + " client"); + + return ElasticsearchClient.of(b -> b + .host(url) + .jsonMapper(mapper(mapper)) + .usernameAndPassword("elastic", "changeme") + .sslContext(sslContext) + .transportFactory(flavor.transportFactory()) + ); + } + + public static ElasticsearchClient createClient(HttpServer server, @Nullable JsonpMapper mapper) { + var address = server.getAddress(); + return createClient("http://" + address.getHostString() + ":" + address.getPort(), mapper, null); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestServer.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestServer.java index 9fca54c03..52c3f82e6 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestServer.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/ElasticsearchTestServer.java @@ -22,19 +22,10 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.json.JsonData; import co.elastic.clients.json.JsonpDeserializer; -import co.elastic.clients.json.JsonpMapper; -import co.elastic.clients.json.jsonb.JsonbJsonpMapper; -import co.elastic.clients.transport.ElasticsearchTransport; import co.elastic.clients.transport.JsonEndpoint; import co.elastic.clients.transport.Version; import co.elastic.clients.transport.endpoints.DelegatingJsonEndpoint; -import co.elastic.clients.transport.rest_client.RestClientTransport; import org.apache.commons.io.FileUtils; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.elasticsearch.client.RestClient; import org.testcontainers.elasticsearch.ElasticsearchContainer; import org.testcontainers.images.builder.ImageFromDockerfile; import org.testcontainers.shaded.org.apache.commons.io.IOUtils; @@ -51,14 +42,14 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Base64; +import java.util.Random; public class ElasticsearchTestServer implements AutoCloseable { private final String[] plugins; private volatile ElasticsearchContainer container; - private final JsonpMapper mapper = new JsonbJsonpMapper(); - private RestClient restClient; - private ElasticsearchTransport transport; + private String url; + private SSLContext sslContext; private ElasticsearchClient client; private static ElasticsearchTestServer global; @@ -102,19 +93,13 @@ public ElasticsearchTestServer(String... plugins) { this.plugins = plugins; } + // Same value for all tests in a test run + private static final int RAND = new Random().nextInt(100); + protected void setup(String url, SSLContext sslContext) { - BasicCredentialsProvider credsProv = new BasicCredentialsProvider(); - credsProv.setCredentials( - AuthScope.ANY, new UsernamePasswordCredentials("elastic", "changeme") - ); - restClient = RestClient.builder(HttpHost.create(url)) - .setHttpClientConfigCallback(hc -> hc - .setDefaultCredentialsProvider(credsProv) - .setSSLContext(sslContext) - ) - .build(); - transport = new RestClientTransport(restClient, mapper); - client = new ElasticsearchClient(transport); + this.url = url; + this.sslContext = sslContext; + this.client = ElasticsearchTestClient.createClient(url, null, sslContext); } private Version selectLatestVersion(Version version, String info) { @@ -263,16 +248,12 @@ public ElasticsearchContainer container() { return this.container; } - public RestClient restClient() { - return restClient; - } - - public ElasticsearchTransport transport() { - return transport; + public String url() { + return url; } - public JsonpMapper mapper() { - return mapper; + public SSLContext sslContext() { + return sslContext; } public ElasticsearchClient client() { diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlAdapterEndToEndTest.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlAdapterEndToEndTest.java index eaa413e2b..c374e51bc 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlAdapterEndToEndTest.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/_helpers/esql/EsqlAdapterEndToEndTest.java @@ -21,24 +21,23 @@ import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient; import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; import co.elastic.clients.elasticsearch.ElasticsearchTestServer; import co.elastic.clients.elasticsearch._helpers.esql.jdbc.ResultSetEsqlAdapter; import co.elastic.clients.elasticsearch._helpers.esql.objects.ObjectsEsqlAdapter; import co.elastic.clients.json.jackson.JacksonJsonpMapper; -import co.elastic.clients.transport.rest_client.RestClientTransport; +import co.elastic.clients.transport.ElasticsearchTransportBase; +import co.elastic.clients.transport.http.TransportHttpClient; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.PropertyNamingStrategies; import com.fasterxml.jackson.databind.annotation.JsonNaming; -import org.apache.commons.io.IOUtils; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.Timestamp; import java.time.ZoneId; @@ -46,6 +45,7 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; public class EsqlAdapterEndToEndTest extends Assertions { @@ -54,19 +54,32 @@ public class EsqlAdapterEndToEndTest extends Assertions { @BeforeAll public static void setup() throws Exception { - ElasticsearchClient global = ElasticsearchTestServer.global().client(); - RestClient restClient = ((RestClientTransport) global._transport()).restClient(); - esClient = new ElasticsearchClient(new RestClientTransport(restClient, new JacksonJsonpMapper())); + var server = ElasticsearchTestServer.global(); + esClient = ElasticsearchTestClient.createClient(server.url(), new JacksonJsonpMapper(), server.sslContext()); + // Make sure index is empty esClient.indices().delete(d -> d.index("employees").ignoreUnavailable(true)); - Request request = new Request("POST", "/employees/_bulk?refresh=true"); + // Upload bulk data + TransportHttpClient httpClient = ((ElasticsearchTransportBase)esClient._transport()).httpClient(); + ByteBuffer body; + try (InputStream data = EsqlAdapterTest.class.getResourceAsStream("employees.ndjson")) { + body = ByteBuffer.wrap(data.readAllBytes()); + } + + TransportHttpClient.Request request = new TransportHttpClient.Request( + "POST", "/employees/_bulk", + Map.of("refresh", "true"), + Map.of("Content-Type", "application/vnd.elasticsearch+json; compatible-with=9" /*, "Accept", "application/json"*/), + List.of(body) + ); - InputStream resourceAsStream = EsqlAdapterTest.class.getResourceAsStream("employees.ndjson"); - byte[] bytes = IOUtils.toByteArray(resourceAsStream); - request.setEntity(new ByteArrayEntity(bytes, ContentType.APPLICATION_JSON)); + var response = httpClient.performRequest("bulk", null, request, null); - restClient.performRequest(request); + if (response.statusCode() != 200) { + fail("Unexpected response code: " + response.statusCode() + " - " + + new String(response.body().asByteBuffer().array(), StandardCharsets.UTF_8)); + } } @Test @@ -74,7 +87,8 @@ public void resultSetTest() throws Exception { ResultSet rs = esClient.esql().query( ResultSetEsqlAdapter.INSTANCE, - "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | SORT emp_no | LIMIT 300", + "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | " + + "SORT emp_no | LIMIT 300", // Testing parameters. Note that FROM and LIMIT do not accept parameters "10042", "10002" ); @@ -116,7 +130,8 @@ public void resultSetTest() throws Exception { public void objectsTest() throws Exception { Iterable result = esClient.esql().query( ObjectsEsqlAdapter.of(EmpData.class), - "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | SORT emp_no | LIMIT 300", + "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | " + + "SORT emp_no | LIMIT 300", // Testing parameters. Note that FROM and LIMIT do not accept parameters "10042", "10002" ); @@ -152,12 +167,14 @@ public void objectsTest() throws Exception { @Test public void asyncObjects() throws Exception { - ElasticsearchAsyncClient asyncClient = new ElasticsearchAsyncClient(esClient._transport(), esClient._transportOptions()); + ElasticsearchAsyncClient asyncClient = new ElasticsearchAsyncClient(esClient._transport(), + esClient._transportOptions()); CompletableFuture> future = asyncClient.esql().query( ObjectsEsqlAdapter.of(EmpData.class), - "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | SORT emp_no | LIMIT 300", + "FROM employees | WHERE emp_no == ? or emp_no == ? | KEEP emp_no, job_positions, hire_date | " + + "SORT emp_no | LIMIT 300", // Testing parameters. Note that FROM and LIMIT do not accept parameters "10042", "10002" ); @@ -169,7 +186,8 @@ public void asyncObjects() throws Exception { EmpData emp = it.next(); assertEquals("10002", emp.empNo); List jobPositions = emp.jobPositions; - // In addition to the value, this tests that single strings are correctly deserialized as a list + // In addition to the value, this tests that single strings are correctly deserialized + // as a list assertEquals(Arrays.asList("Senior Team Lead"), emp.jobPositions); } @@ -183,7 +201,8 @@ public void asyncObjects() throws Exception { assertTrue(emp.jobPositions.contains("Junior Developer")); assertEquals("1993-03-21T00:00:00Z[UTC]", - DateTimeFormatter.ISO_DATE_TIME.format(emp.hireDate.toInstant().atZone(ZoneId.of("UTC"))) + DateTimeFormatter.ISO_DATE_TIME.format(emp.hireDate.toInstant().atZone(ZoneId.of( + "UTC"))) ); } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/Bar.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/Bar.java index e7718e037..9d63ef63c 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/Bar.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/Bar.java @@ -38,7 +38,7 @@ public String name() { return this.name; } - public Bar(Bar.Builder builder) { + public Bar(Builder builder) { this.name = builder.name; } @@ -56,7 +56,7 @@ public static Builder builder() { public static class Builder implements ObjectBuilder { private String name; - public Bar.Builder name(@Nullable String name) { + public Builder name(@Nullable String name) { this.name = name; return this; } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/BoolQuery.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/BoolQuery.java index 116f8d2a0..73d47f3b3 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/BoolQuery.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/BoolQuery.java @@ -38,7 +38,7 @@ public class BoolQuery implements Query.Variant, JsonpSerializable { private final Collection must; private final Union2 minimumShouldMatch; - public BoolQuery(BoolQuery.Builder builder) { + public BoolQuery(Builder builder) { this.should = builder.should; this.must = builder.must; this.minimumShouldMatch = builder.minimumShouldMatch; @@ -97,49 +97,49 @@ public static class Builder implements ObjectBuilder { private Collection must; private Union2 minimumShouldMatch; - public BoolQuery.Builder should(Collection v) { + public Builder should(Collection v) { this.should = v; return this; } - public BoolQuery.Builder add_should(@Nullable Query v) { + public Builder add_should(@Nullable Query v) { if (v == null) return this; if (this.should == null) this.should = new ArrayList<>(); this.should.add(v); return this; } - public BoolQuery.Builder add_should(Function> v) { + public Builder add_should(Function> v) { return add_should(v.apply(new Query.Builder()).build()); } - public BoolQuery.Builder must(Collection v) { + public Builder must(Collection v) { this.must = v; return this; } - public BoolQuery.Builder add_must(@Nullable Query v) { + public Builder add_must(@Nullable Query v) { if (v == null) return this; if (this.must == null) this.must = new ArrayList<>(); this.must.add(v); return this; } - public BoolQuery.Builder add_must(Function> v) { + public Builder add_must(Function> v) { return add_must(v.apply(new Query.Builder()).build()); } // Expand all union members // TODO: check unions with nested structures and fluent builders - public BoolQuery.Builder minimumShouldMatch(int v) { + public Builder minimumShouldMatch(int v) { return minimumShouldMatch(Union2.ofA(v)); } - public BoolQuery.Builder minimumShouldMatch(String v) { + public Builder minimumShouldMatch(String v) { return minimumShouldMatch(Union2.ofB(v)); } - public BoolQuery.Builder minimumShouldMatch(Union2 v) { + public Builder minimumShouldMatch(Union2 v) { this.minimumShouldMatch = v; return this; } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/TermsQuery.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/TermsQuery.java index 3037faced..c7f4afca4 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/TermsQuery.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/api/query2/TermsQuery.java @@ -32,7 +32,7 @@ public class TermsQuery implements JsonpSerializable, Query.Variant { private final String field; private final String term; - public TermsQuery(TermsQuery.Builder builder) { + public TermsQuery(Builder builder) { this.field = builder.field; this.term = builder.term; } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/SomeUnion.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/SomeUnion.java index 905c319e3..9130b960e 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/SomeUnion.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/SomeUnion.java @@ -140,12 +140,12 @@ protected SomeUnion build() { } } - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(SomeUnion.Builder::new, - SomeUnion::setupSomeUnionDeserializer, SomeUnion.Builder::build); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + SomeUnion::setupSomeUnionDeserializer, Builder::build); protected static void setupSomeUnionDeserializer(ObjectDeserializer op) { - op.add(SomeUnion.Builder::variantA, UVariantA._DESERIALIZER, "variant_a"); - op.add(SomeUnion.Builder::variantB, UVariantB._DESERIALIZER, "variant_b"); + op.add(Builder::variantA, UVariantA._DESERIALIZER, "variant_a"); + op.add(Builder::variantB, UVariantB._DESERIALIZER, "variant_b"); op.setTypeProperty("type", null); } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantA.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantA.java index 61aad4942..1deb48e08 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantA.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantA.java @@ -45,7 +45,7 @@ public String name() { return this.name; } - public UVariantA(UVariantA.Builder builder) { + public UVariantA(Builder builder) { this.name = builder.name; } @@ -74,8 +74,8 @@ public UVariantA build() { public static final JsonpDeserializer _DESERIALIZER; static { - ObjectDeserializer op = new ObjectDeserializer<>(UVariantA.Builder::new); - op.add(UVariantA.Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + ObjectDeserializer op = new ObjectDeserializer<>(Builder::new); + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); op.ignore("type"); _DESERIALIZER = new ObjectBuilderDeserializer<>(op); } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantB.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantB.java index 2ddae5dfd..36b91b092 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantB.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/containers/UVariantB.java @@ -45,7 +45,7 @@ public Integer number () { return this.number; } - public UVariantB(UVariantB.Builder builder) { + public UVariantB(Builder builder) { this.number = builder.number; } @@ -74,8 +74,8 @@ public UVariantB build() { public static final JsonpDeserializer _DESERIALIZER; static { - ObjectDeserializer op = new ObjectDeserializer<>(UVariantB.Builder::new); - op.add(UVariantB.Builder::number, JsonpDeserializer.integerDeserializer(), "number"); + ObjectDeserializer op = new ObjectDeserializer<>(Builder::new); + op.add(Builder::number, JsonpDeserializer.integerDeserializer(), "number"); op.ignore("type"); _DESERIALIZER = new ObjectBuilderDeserializer<>(op); } diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/inheritance/final_/FinalClass.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/inheritance/final_/FinalClass.java index 12dfd16ce..e944f8346 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/inheritance/final_/FinalClass.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/experiments/inheritance/final_/FinalClass.java @@ -54,7 +54,7 @@ protected void toJsonpInternal(JsonGenerator generator, JsonpMapper mapper) { //--------------------------------------------------------------------------------------------- public static class Builder - extends ChildClass.AbstractBuilder + extends AbstractBuilder implements ObjectBuilder { private String finalField; diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/model/OverloadsTest.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/model/OverloadsTest.java new file mode 100644 index 000000000..573593ac1 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/model/OverloadsTest.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.model; + +import co.elastic.clients.elasticsearch._types.FieldSort; +import co.elastic.clients.elasticsearch._types.FieldValue; +import co.elastic.clients.elasticsearch._types.aggregations.TopMetrics; +import co.elastic.clients.elasticsearch._types.query_dsl.MatchAllQuery; +import co.elastic.clients.elasticsearch.core.SearchRequest; +import co.elastic.clients.testkit.ModelTestCase; +import org.junit.jupiter.api.Test; + +import java.util.List; + +public class OverloadsTest extends ModelTestCase { + + @Test + public void variantOverloads() { + // A variant can be used when a union or container is expected + MatchAllQuery maq = MatchAllQuery.of(b -> b); + + SearchRequest sr = SearchRequest.of(b -> b.query(maq)); + assertEquals(maq, sr.query().matchAll()); + } + + @Test + public void fieldValueOverloads() { + FieldSort fs = FieldSort.of(b -> b.field("field").missing(42)); + + assertTrue(fs.missing().isLong()); + assertEquals(42, fs.missing().longValue()); + } + + @Test + public void dictionaryAndArrayOverloads() { + TopMetrics tm = TopMetrics.of(b -> b + .metrics("foo", 13.37) + .sort("bar") // Required property + ); + + assertTrue(tm.metrics().get("foo").isDouble()); + assertEquals(13.37, tm.metrics().get("foo").doubleValue()); + } + + @Test + public void arrayOverloads() { + TopMetrics tm = TopMetrics.of(b -> b + .metrics("foo", 0) // Required property + .sort("bar") + .sort(42) + ); + + List sort = tm.sort(); + + assertTrue(sort.get(0).isString()); + assertEquals("bar", sort.get(0).stringValue()); + + assertTrue(sort.get(1).isLong()); + assertEquals(42, sort.get(1).longValue()); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/elasticsearch/spec_issues/SpecIssuesTest.java b/java-client/src/test/java/co/elastic/clients/elasticsearch/spec_issues/SpecIssuesTest.java index 7fae2ea82..7e7d5cba9 100644 --- a/java-client/src/test/java/co/elastic/clients/elasticsearch/spec_issues/SpecIssuesTest.java +++ b/java-client/src/test/java/co/elastic/clients/elasticsearch/spec_issues/SpecIssuesTest.java @@ -302,7 +302,7 @@ public void gettingVersionFromNodes() throws Exception { .nodes().info().nodes().entrySet().forEach(node -> assertNotNull(node.getValue().version())); } - + private T loadRsrc(String res, JsonpDeserializer deser) { InputStream is = this.getClass().getResourceAsStream(res); assertNotNull(is, "Resource not found: " + res); diff --git a/java-client/src/test/java/co/elastic/clients/testkit/MockHttpClient.java b/java-client/src/test/java/co/elastic/clients/testkit/MockHttpClient.java index cc2c0e878..a54c8c876 100644 --- a/java-client/src/test/java/co/elastic/clients/testkit/MockHttpClient.java +++ b/java-client/src/test/java/co/elastic/clients/testkit/MockHttpClient.java @@ -69,7 +69,7 @@ public void close() throws IOException { @Override public Response performRequest( - String endpointId, @Nullable TransportHttpClient.Node node, Request request, TransportOptions option + String endpointId, @Nullable Node node, Request request, TransportOptions option ) throws IOException { Response response = responses.get(request.path()); @@ -82,7 +82,7 @@ public Response performRequest( @Override public CompletableFuture performRequestAsync( - String endpointId, @Nullable TransportHttpClient.Node node, Request request, TransportOptions options + String endpointId, @Nullable Node node, Request request, TransportOptions options ) { CompletableFuture result = new CompletableFuture<>(); try { @@ -98,7 +98,7 @@ public CompletableFuture performRequestAsync( public void close() throws IOException { } - private static class MockResponse implements TransportHttpClient.Response { + private static class MockResponse implements Response { private final int statusCode; private final BinaryData body; diff --git a/java-client/src/test/java/co/elastic/clients/transport/ElasticsearchTransportConfigTest.java b/java-client/src/test/java/co/elastic/clients/transport/ElasticsearchTransportConfigTest.java new file mode 100644 index 000000000..18e1b7852 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/ElasticsearchTransportConfigTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.transport.rest5_client.Rest5ClientOptions; +import co.elastic.clients.transport.rest5_client.Rest5ClientTransport; +import co.elastic.clients.transport.rest_client.RestClientOptions; +import co.elastic.clients.transport.rest_client.RestClientTransport; +import org.elasticsearch.client.RequestOptions; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class ElasticsearchTransportConfigTest extends Assertions { + + @Test + public void buildLegacy() { + + // create client with helper + ElasticsearchClient client = ElasticsearchClient.of(b -> b + .host("http://example.com") + .usernameAndPassword("elastic", "changeme") + .useLegacyTransport(true) + ); + + RestClientOptions options = new RestClientOptions(RequestOptions.DEFAULT, true); + + client = client.withTransportOptions(options); + + // checking options correctness + assertEquals(client._transport().getClass(), RestClientTransport.class); + assertTrue(client._transportOptions().keepResponseBodyOnException()); + assertEquals(3, client._transportOptions().headers().size()); + + // token update utility: not supported on legacy transport + ElasticsearchClient finalClient = client; + assertThrows(UnsupportedOperationException.class, () -> finalClient._transportOptions().updateToken("token")); + } + + @Test + public void buildRest5() { + + // create client with helper + ElasticsearchClient client = ElasticsearchClient.of(b -> b + .host("http://example.com") + .usernameAndPassword("elastic", "changeme") + ); + + Rest5ClientOptions options = new Rest5ClientOptions(co.elastic.clients.transport.rest5_client.low_level.RequestOptions.DEFAULT, + true); + + client = client.withTransportOptions(options); + + // checking options correctness + assertInstanceOf(Rest5ClientTransport.class, client._transport()); + assertTrue(client._transportOptions().keepResponseBodyOnException()); + assertEquals(3, client._transportOptions().headers().size()); + + // token update utility: supported on new transport + client._transportOptions().updateToken("token"); + assertEquals(4, client._transportOptions().headers().size()); + assertTrue(client._transportOptions().headers().stream().anyMatch(h -> h.getKey().equals( + "Authorization"))); + } + + @Test + public void credentialCombinations() { + + // Bare minimum is a host URL + new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .build(); + + assertThrows(IllegalArgumentException.class, () -> new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .usernameAndPassword("elastic", null) + .build() + ); + + assertThrows(IllegalArgumentException.class, () -> new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .usernameAndPassword(null, "password") + .build() + ); + + assertThrows(IllegalArgumentException.class, () -> new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .usernameAndPassword("elastic", "password") + .token("token") + .build() + ); + + assertThrows(IllegalArgumentException.class, () -> new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .usernameAndPassword("elastic", "password") + .apiKey("api_key") + .build() + ); + + assertThrows(IllegalArgumentException.class, () -> new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .apiKey("api_key") + .token("token") + .build() + ); + } + + @Test + public void checkDefaultConfig() throws Exception { + ElasticsearchTransportConfig.Default config = new ElasticsearchTransportConfig.Builder() + .host("http://example.com") + .build(); + + try (var transport = config.buildTransport()) { + assertInstanceOf(Rest5ClientTransport.class, transport); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/RequestOptionsTest.java b/java-client/src/test/java/co/elastic/clients/transport/RequestOptionsTest.java index c7927f980..4ad221c1e 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/RequestOptionsTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/RequestOptionsTest.java @@ -20,14 +20,10 @@ package co.elastic.clients.transport; import co.elastic.clients.elasticsearch.ElasticsearchClient; -import co.elastic.clients.json.jsonb.JsonbJsonpMapper; -import co.elastic.clients.transport.rest_client.RestClientTransport; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -43,10 +39,13 @@ import java.util.Map; import java.util.Properties; +/** + * Request options tests independent of the transport implementation. + */ public class RequestOptionsTest extends Assertions { private static HttpServer httpServer; - private static RestClient restClient; + private static ElasticsearchClient client; @BeforeEach public void classSetup() throws IOException { @@ -59,9 +58,10 @@ public void classSetup() throws IOException { } // Call to info() - // Send back all request headers with a 418 that will cause an exception where we can access the LLRC response + // Send back all request headers with a non-json content type that will cause an exception where we can access the raw response + ex.getResponseHeaders().put("Content-Type", List.of("application/octet-stream")); ex.getResponseHeaders().putAll(ex.getRequestHeaders()); - ex.sendResponseHeaders(418, 0); + ex.sendResponseHeaders(200, 0); OutputStreamWriter out = new OutputStreamWriter(ex.getResponseBody(), StandardCharsets.UTF_8); for (Map.Entry> header: ex.getRequestHeaders().entrySet()) { out.write("header-"); @@ -81,29 +81,32 @@ public void classSetup() throws IOException { }); httpServer.start(); - InetSocketAddress address = httpServer.getAddress(); - restClient = RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) - .build(); + client = ElasticsearchTestClient.createClient(httpServer, null); + + // We need to buffer the response body on exception to retrieve it. + var transport = (ElasticsearchTransport) client._transport() + .withOptions(opt -> opt.keepResponseBodyOnException(true)); + client = new ElasticsearchClient(transport, null); } @AfterEach public void classTearDown() throws IOException { httpServer.stop(0); - restClient.close(); + httpServer = null; + client.close(); + client = null; } private Properties getProps(ElasticsearchClient client) throws IOException { - ResponseException ex = assertThrows(ResponseException.class, client::info); - assertEquals(418, ex.getResponse().getStatusLine().getStatusCode()); + TransportException ex = assertThrows(TransportException.class, client::info); Properties result = new Properties(); - result.load(ex.getResponse().getEntity().getContent()); + result.load(ex.response().body().asInputStream()); return result; } @Test public void testNonNullClientOptions() { - final RestClientTransport trsp = new RestClientTransport(restClient, new JsonbJsonpMapper()); - final ElasticsearchClient client = new ElasticsearchClient(trsp); + final ElasticsearchTransport trsp = client._transport(); assertNotNull(client._transportOptions()); assertSame(trsp.options(), client._transportOptions()); @@ -111,9 +114,6 @@ public void testNonNullClientOptions() { @Test public void testDefaultHeaders() throws IOException { - final RestClientTransport trsp = new RestClientTransport(restClient, new JsonbJsonpMapper()); - final ElasticsearchClient client = new ElasticsearchClient(trsp); - Properties props = getProps(client); assertTrue(props.getProperty("header-user-agent").startsWith("elastic-java/" + Version.VERSION.toString())); @@ -127,21 +127,21 @@ public void testDefaultHeaders() throws IOException { @Test public void testClientHeader() throws IOException { - final RestClientTransport trsp = new RestClientTransport(restClient, new JsonbJsonpMapper()); - final ElasticsearchClient client = new ElasticsearchClient(trsp) + final ElasticsearchClient newClient = client .withTransportOptions(b -> b .addHeader("X-Foo", "Bar") .addHeader("uSer-agEnt", "MegaClient/1.2.3") ); - Properties props = getProps(client); + Properties props = getProps(newClient); assertEquals("Bar", props.getProperty("header-x-foo")); assertEquals("MegaClient/1.2.3", props.getProperty("header-user-agent")); } @Test public void testQueryParameter() throws IOException { - final RestClientTransport trsp = new RestClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchTransport trsp = client._transport(); + final ElasticsearchClient client = new ElasticsearchClient(trsp) .withTransportOptions(trsp.options().with( b -> b.setParameter("format", "pretty") @@ -154,9 +154,6 @@ public void testQueryParameter() throws IOException { @Test public void testMissingProductHeader() { - final RestClientTransport trsp = new RestClientTransport(restClient, new JsonbJsonpMapper()); - final ElasticsearchClient client = new ElasticsearchClient(trsp); - final TransportException ex = assertThrows(TransportException.class, client::ping); assertTrue(ex.getMessage().contains("Missing [X-Elastic-Product] header")); } diff --git a/java-client/src/test/java/co/elastic/clients/transport/TransportTest.java b/java-client/src/test/java/co/elastic/clients/transport/TransportTest.java index d25466bbd..d4f40f080 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/TransportTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/TransportTest.java @@ -20,16 +20,12 @@ package co.elastic.clients.transport; import co.elastic.clients.elasticsearch.ElasticsearchClient; -import co.elastic.clients.json.jackson.JacksonJsonpMapper; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; +import co.elastic.clients.json.JsonpMapper; import co.elastic.clients.transport.http.RepeatableBodyResponse; -import co.elastic.clients.transport.rest_client.RestClientOptions; -import co.elastic.clients.transport.rest_client.RestClientTransport; +import co.elastic.clients.transport.instrumentation.Instrumentation; import co.elastic.clients.util.BinaryData; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -45,6 +41,16 @@ public class TransportTest extends Assertions { + // Make the protected method publicly visible in tests + public static ElasticsearchTransportBase cloneTransportWith( + ElasticsearchTransportBase transport, + TransportOptions options, + JsonpMapper mapper, + Instrumentation instrumentation + ) { + return transport.cloneWith(options, mapper, instrumentation); + } + @Test public void testXMLResponse() throws Exception { HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), @@ -60,14 +66,8 @@ public void testXMLResponse() throws Exception { }); httpServer.start(); - InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient - .builder(new HttpHost(address.getHostString(), address.getPort(), "http")) - .build(); - - ElasticsearchClient esClient = new ElasticsearchClient(new RestClientTransport(restClient, - new JacksonJsonpMapper())); + ElasticsearchClient esClient = ElasticsearchTestClient.createClient(httpServer, null); TransportException ex = Assertions.assertThrows( TransportException.class, @@ -78,13 +78,8 @@ public void testXMLResponse() throws Exception { assertEquals(401, ex.statusCode()); assertEquals("es/cat.indices", ex.endpointId()); - - // Original response is transport-dependent - Response restClientResponse = (Response) ex.response().originalResponse(); - assertEquals(401, restClientResponse.getStatusLine().getStatusCode()); } - @Test public void testOriginalJsonBodyRetrievalException() throws Exception { HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), @@ -105,13 +100,7 @@ public void testOriginalJsonBodyRetrievalException() throws Exception { httpServer.start(); InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient - .builder(new HttpHost(address.getHostString(), address.getPort(), "http")) - .build(); - - // no transport options, response is not RepeatableBodyResponse, original body cannot be retrieved - ElasticsearchClient esClient = new ElasticsearchClient(new RestClientTransport(restClient, - new JacksonJsonpMapper())); + var esClient = ElasticsearchTestClient.createClient(httpServer, null); TransportException ex = Assertions.assertThrows( TransportException.class, @@ -122,12 +111,8 @@ public void testOriginalJsonBodyRetrievalException() throws Exception { assertNotEquals(RepeatableBodyResponse.class, ex.response().getClass()); // setting transport option - RestClientOptions options = new RestClientOptions(RequestOptions.DEFAULT, true); - - ElasticsearchTransport transport = new RestClientTransport( - restClient, new JacksonJsonpMapper(), options); - - ElasticsearchClient esClientOptions = new ElasticsearchClient(transport); + var transport = (ElasticsearchTransport) esClient._transport().withOptions(o -> o.keepResponseBodyOnException(true)); + ElasticsearchClient esClientOptions = new ElasticsearchClient(transport, null); ex = Assertions.assertThrows( TransportException.class, @@ -137,18 +122,18 @@ public void testOriginalJsonBodyRetrievalException() throws Exception { httpServer.stop(0); assertEquals(200, ex.statusCode()); - assertEquals(RepeatableBodyResponse.class, ex.response().getClass()); - - try (RepeatableBodyResponse repeatableResponse = (RepeatableBodyResponse) ex.response()){ - BinaryData body = repeatableResponse.body(); - StringBuilder sb = new StringBuilder(); - BufferedReader br = new BufferedReader(new InputStreamReader(body.asInputStream())); - String read; - - while ((read = br.readLine()) != null) { - sb.append(read); - } - br.close(); + + try (var response = ex.response()){ + BinaryData body = response.body(); + assertTrue(body.isRepeatable()); + StringBuilder sb = new StringBuilder(); + BufferedReader br = new BufferedReader(new InputStreamReader(body.asInputStream())); + String read; + + while ((read = br.readLine()) != null) { + sb.append(read); + } + br.close(); assertEquals("definitely not json",sb.toString()); } } diff --git a/java-client/src/test/java/co/elastic/clients/transport/TransportUtilsTest.java b/java-client/src/test/java/co/elastic/clients/transport/TransportUtilsTest.java index 9b0ee3081..468cfaa7f 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/TransportUtilsTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/TransportUtilsTest.java @@ -19,18 +19,10 @@ package co.elastic.clients.transport; -import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; import co.elastic.clients.elasticsearch.ElasticsearchTestServer; -import co.elastic.clients.json.SimpleJsonpMapper; -import co.elastic.clients.transport.rest_client.RestClientTransport; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import org.testcontainers.elasticsearch.ElasticsearchContainer; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; @@ -85,23 +77,13 @@ public void testCaCertificate() throws Exception { ); } - private void checkConnection(SSLContext sslContext) throws Exception { - ElasticsearchContainer container = ElasticsearchTestServer.global().container(); - - BasicCredentialsProvider credsProv = new BasicCredentialsProvider(); - credsProv.setCredentials( - AuthScope.ANY, new UsernamePasswordCredentials("elastic", "changeme") - ); - - RestClient restClient = RestClient.builder(new HttpHost("localhost", container.getMappedPort(9200), "https")) - .setHttpClientConfigCallback(c -> c - .setSSLContext(sslContext) - .setDefaultCredentialsProvider(credsProv) - ) - .build(); + @Test void testInsecureContext() throws Exception { + checkConnection(TransportUtils.insecureSSLContext()); + } - RestClientTransport transport = new RestClientTransport(restClient, SimpleJsonpMapper.INSTANCE); - ElasticsearchClient esClient = new ElasticsearchClient(transport); + private void checkConnection(SSLContext sslContext) throws Exception { + var server = ElasticsearchTestServer.global(); + var esClient = ElasticsearchTestClient.createClient(server.url(), null, sslContext); assertNotNull(esClient.info()); } diff --git a/java-client/src/test/java/co/elastic/clients/transport/endpoints/BinaryEndpointTest.java b/java-client/src/test/java/co/elastic/clients/transport/endpoints/BinaryEndpointTest.java index de4175b3c..e0adc7ed8 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/endpoints/BinaryEndpointTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/endpoints/BinaryEndpointTest.java @@ -20,15 +20,12 @@ package co.elastic.clients.transport.endpoints; import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; import co.elastic.clients.elasticsearch.ElasticsearchTestServer; import co.elastic.clients.elasticsearch.core.SearchRequest; -import co.elastic.clients.json.SimpleJsonpMapper; import co.elastic.clients.transport.TransportOptions; -import co.elastic.clients.transport.rest_client.RestClientTransport; import com.sun.net.httpserver.HttpServer; import org.apache.commons.io.IOUtils; -import org.apache.http.HttpHost; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; @@ -71,12 +68,7 @@ public static void cleanup() { @Test public void testMvtSearch() throws IOException { - RestClient llrc = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort(), "http") - ).build(); - - RestClientTransport transport = new RestClientTransport(llrc, new SimpleJsonpMapper()); - ElasticsearchClient esClient = new ElasticsearchClient(transport); + var esClient = ElasticsearchTestClient.createClient(httpServer, null); BinaryResponse resp = esClient.searchMvt(s -> s .index("foo") diff --git a/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java b/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java index f9435165d..66879470a 100644 --- a/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java +++ b/java-client/src/test/java/co/elastic/clients/transport/instrumentation/OpenTelemetryForElasticsearchTest.java @@ -21,11 +21,12 @@ import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient; import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.ElasticsearchTestClient; import co.elastic.clients.elasticsearch._types.query_dsl.Query; import co.elastic.clients.elasticsearch.core.SearchRequest; import co.elastic.clients.json.JsonpUtils; -import co.elastic.clients.json.jackson.JacksonJsonpMapper; -import co.elastic.clients.transport.rest_client.RestClientTransport; +import co.elastic.clients.transport.ElasticsearchTransportBase; +import co.elastic.clients.transport.TransportTest; import com.sun.net.httpserver.HttpServer; import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributeKey; @@ -39,8 +40,6 @@ import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; import io.opentelemetry.semconv.trace.attributes.SemanticAttributes; -import org.apache.http.HttpHost; -import org.elasticsearch.client.RestClient; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; @@ -110,8 +109,6 @@ public class OpenTelemetryForElasticsearchTest { private static HttpServer httpServer; private static MockSpanExporter spanExporter; private static OpenTelemetry openTelemetry; - private static RestClient restClient; - private static RestClientTransport transport; private static ElasticsearchClient client; private static ElasticsearchAsyncClient asyncClient; @@ -125,16 +122,15 @@ public static void setup() throws IOException { @AfterAll public static void cleanUp() throws IOException { httpServer.stop(0); - transport.close(); + client.close(); } private static void setupClient() { - restClient = - RestClient.builder(new HttpHost(httpServer.getAddress().getAddress(), httpServer.getAddress().getPort())).build(); Instrumentation instrumentation = new OpenTelemetryForElasticsearch(openTelemetry, false); - transport = new RestClientTransport(restClient, new JacksonJsonpMapper(), null, instrumentation); + var transport = (ElasticsearchTransportBase) ElasticsearchTestClient.createClient(httpServer, null)._transport(); + transport = TransportTest.cloneTransportWith(transport, null, null, instrumentation); client = new ElasticsearchClient(transport); asyncClient = new ElasticsearchAsyncClient(transport); @@ -195,10 +191,10 @@ public void testGetRequest() throws IOException, InterruptedException { Assertions.assertEquals("GET", span.getAttributes().get(AttributeKey.stringKey(HTTP_REQUEST_METHOD))); Assertions.assertEquals("elasticsearch", span.getAttributes().get(SemanticAttributes.DB_SYSTEM)); - String url = "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort() + + String url = "http://" + httpServer.getAddress().getHostString() + ":" + httpServer.getAddress().getPort() + "/" + INDEX + "/_doc/" + DOC_ID + "?refresh=true"; Assertions.assertEquals(url, span.getAttributes().get(AttributeKey.stringKey(URL_FULL))); - Assertions.assertEquals(httpServer.getAddress().getHostName(), span.getAttributes().get(AttributeKey.stringKey(SERVER_ADDRESS))); + Assertions.assertEquals(httpServer.getAddress().getHostString(), span.getAttributes().get(AttributeKey.stringKey(SERVER_ADDRESS))); Assertions.assertEquals(httpServer.getAddress().getPort(), span.getAttributes().get(AttributeKey.longKey(SERVER_PORT))); // Path parts @@ -208,9 +204,10 @@ public void testGetRequest() throws IOException, InterruptedException { @Test public void testSearchRequest() throws IOException, InterruptedException { // A client that will capture requests - ElasticsearchClient client = new ElasticsearchClient(new RestClientTransport( - restClient, this.client._jsonpMapper(), null, new OpenTelemetryForElasticsearch(openTelemetry, true)) - ); + var transport = (ElasticsearchTransportBase) client._transport(); + transport = TransportTest.cloneTransportWith(transport, null, null, new OpenTelemetryForElasticsearch(openTelemetry, true)); + client = new ElasticsearchClient(transport); + SearchRequest req = SearchRequest.of(r -> r.index(INDEX).query(q -> q.term(t -> t.field("x").value("y")))); String queryAsString = JsonpUtils.toJsonString(req, client._jsonpMapper()); client.search(req, Object.class); diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/MultiBufferEntityTest.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/MultiBufferEntityTest.java new file mode 100644 index 000000000..62d4c2ce7 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/MultiBufferEntityTest.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.elasticsearch.core.BulkRequest; +import co.elastic.clients.json.jackson.JacksonJsonpMapper; +import co.elastic.clients.transport.endpoints.BinaryResponse; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import com.sun.net.httpserver.HttpServer; +import org.apache.commons.io.IOUtils; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; + +public class MultiBufferEntityTest { + + @Test + public void testBulkRequest() throws IOException { + + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + + httpServer.createContext("/_bulk", exchange -> { + exchange.getResponseHeaders().set("X-Elastic-Product", "Elasticsearch"); + byte[] bytes = IOUtils.toByteArray(exchange.getRequestBody()); + exchange.sendResponseHeaders(200, 0); + exchange.getResponseBody().write(bytes); + exchange.close(); + }); + + httpServer.start(); + + Rest5Client restClient = + Rest5Client.builder(new HttpHost(httpServer.getAddress().getAddress(), httpServer.getAddress().getPort())).build(); + + BulkRequest req = BulkRequest.of(_0 -> _0 + .operations(_1 -> _1 + .create(_2 -> _2 + .index("foo") + .id("abc") + .document("abc-doc") + )) + .operations(_1 -> _1 + .create(_2 -> _2 + .index("foo") + .id("def") + .document("def-doc") + )) + .operations(_1 -> _1 + .update(_2 -> _2 + .index("foo") + .id("gh") + .action(_3 -> _3 + .docAsUpsert(true) + .doc("gh-doc")) + ) + ) + ); + Rest5ClientTransport transport = new Rest5ClientTransport(restClient, new JacksonJsonpMapper()); + + BinaryResponse binaryResponse = transport.performRequest(req, BulkRequest._ENDPOINT.withBinaryResponse(), null); + + String str = IOUtils.toString(binaryResponse.content(), StandardCharsets.UTF_8); + + httpServer.stop(0); + transport.close(); + + Assertions.assertEquals( + "{\"create\":{\"_id\":\"abc\",\"_index\":\"foo\"}}\n" + + "\"abc-doc\"\n" + + "{\"create\":{\"_id\":\"def\",\"_index\":\"foo\"}}\n" + + "\"def-doc\"\n" + + "{\"update\":{\"_id\":\"gh\",\"_index\":\"foo\"}}\n" + + "{\"doc\":\"gh-doc\",\"doc_as_upsert\":true}\n", + str + ); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/RequestOptionsTest.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/RequestOptionsTest.java new file mode 100644 index 000000000..f4a1d1d1d --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/RequestOptionsTest.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.json.jsonb.JsonbJsonpMapper; +import co.elastic.clients.transport.TransportException; +import co.elastic.clients.transport.Version; +import co.elastic.clients.transport.rest5_client.low_level.ResponseException; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.NameValuePair; +import org.apache.hc.core5.net.URLEncodedUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; + +public class RequestOptionsTest extends Assertions { + + private static HttpServer httpServer; + private static Rest5Client restClient; + + @BeforeEach + public void classSetup() throws IOException { + + httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.createContext("/", ex -> { + if (ex.getRequestMethod().equals("HEAD")) { + // Call to ping() + ex.sendResponseHeaders(200, -1); + } + + // Call to info() + // Send back all request headers with a 501 that will cause an exception where we can access the LLRC response + ex.getResponseHeaders().putAll(ex.getRequestHeaders()); + ex.sendResponseHeaders(501, 0); + OutputStreamWriter out = new OutputStreamWriter(ex.getResponseBody(), StandardCharsets.UTF_8); + for (Map.Entry> header: ex.getRequestHeaders().entrySet()) { + out.write("header-"); + out.write(header.getKey().toLowerCase(Locale.ROOT)); + out.write("="); + out.write(header.getValue().get(0)); + out.write("\n"); + } + final List params = URLEncodedUtils.parse(ex.getRequestURI(), StandardCharsets.UTF_8); + for (NameValuePair param: params) { + out.write("param-"); + out.write(param.getName()); + out.write("="); + out.write(param.getValue()); + } + out.close(); + }); + + httpServer.start(); + restClient = Rest5Client.builder(new HttpHost("http",httpServer.getAddress().getHostString(), + httpServer.getAddress().getPort())).build(); + } + + @AfterEach + public void classTearDown() throws IOException { + httpServer.stop(0); + restClient.close(); + } + + private Properties getProps(ElasticsearchClient client) throws IOException { + ResponseException ex = assertThrows(ResponseException.class, client::info); + assertEquals(501, ex.getResponse().getStatusCode()); + Properties result = new Properties(); + result.load(ex.getResponse().getEntity().getContent()); + return result; + } + + @Test + public void testNonNullClientOptions() { + final Rest5ClientTransport trsp = new Rest5ClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchClient client = new ElasticsearchClient(trsp); + + assertNotNull(client._transportOptions()); + assertSame(trsp.options(), client._transportOptions()); + } + + @Test + public void testDefaultHeaders() throws IOException { + final Rest5ClientTransport trsp = new Rest5ClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchClient client = new ElasticsearchClient(trsp); + + Properties props = getProps(client); + + assertTrue(props.getProperty("header-user-agent").startsWith("elastic-java/" + Version.VERSION.toString())); + assertTrue(props.getProperty("header-x-elastic-client-meta").contains("es=")); + assertTrue(props.getProperty("header-x-elastic-client-meta").contains("hl=2")); + assertEquals( + "application/vnd.elasticsearch+json; compatible-with=" + Version.VERSION.major(), + props.getProperty("header-accept") + ); + } + + @Test + public void testClientHeader() throws IOException { + final Rest5ClientTransport trsp = new Rest5ClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchClient client = new ElasticsearchClient(trsp) + .withTransportOptions(b -> b + .addHeader("X-Foo", "Bar") + .addHeader("uSer-agEnt", "MegaClient/1.2.3") + ); + + Properties props = getProps(client); + assertEquals("Bar", props.getProperty("header-x-foo")); + assertEquals("MegaClient/1.2.3", props.getProperty("header-user-agent")); + } + + @Test + public void testQueryParameter() throws IOException { + final Rest5ClientTransport trsp = new Rest5ClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchClient client = new ElasticsearchClient(trsp) + .withTransportOptions(trsp.options().with( + b -> b.setParameter("format", "pretty") + ) + ); + + Properties props = getProps(client); + assertEquals("pretty", props.getProperty("param-format")); + } + + @Test + public void testMissingProductHeader() { + final Rest5ClientTransport trsp = new Rest5ClientTransport(restClient, new JsonbJsonpMapper()); + final ElasticsearchClient client = new ElasticsearchClient(trsp); + + final TransportException ex = assertThrows(TransportException.class, client::ping); + assertTrue(ex.getMessage().contains("Missing [X-Elastic-Product] header")); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptionsTest.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptionsTest.java new file mode 100644 index 000000000..439acc08e --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/Rest5ClientOptionsTest.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.SimpleJsonpMapper; +import co.elastic.clients.transport.ElasticsearchTransport; +import co.elastic.clients.transport.Version; +import co.elastic.clients.transport.endpoints.BooleanResponse; +import co.elastic.clients.transport.rest5_client.low_level.RequestOptions; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +class Rest5ClientOptionsTest extends Assertions { + + /** Collected headers by test name */ + private static Map collectedHeaders; + private static final AtomicInteger testCounter = new AtomicInteger(); + private static HttpServer httpServer; + + private static final String MIME_TYPE = "application/vnd.elasticsearch+json; compatible-with=" + Version.VERSION.major(); + + @BeforeAll + public static void setup() throws IOException { + collectedHeaders = new ConcurrentHashMap<>(); + httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + + // Register a handler on the core.exists("capture-handler/{name}") endpoint that will capture request headers. + httpServer.createContext("/capture-headers/_doc/", exchange -> { + String testName = exchange.getRequestURI().getPath().substring("/capture-headers/_doc/".length()); + System.out.println(exchange.getResponseHeaders()); + System.out.println(); + collectedHeaders.put(testName, exchange.getRequestHeaders()); + + // Reply with an empty 200 response + exchange.getResponseHeaders().set("X-Elastic-Product", "Elasticsearch"); + exchange.sendResponseHeaders(200, -1); + exchange.close(); + }); + + httpServer.start(); + } + + @AfterAll + public static void cleanup() { + httpServer.stop(0); + httpServer = null; + collectedHeaders = null; + } + + private ElasticsearchTransport newRest5ClientTransport(Rest5Client Rest5Client, JsonpMapper mapper) { + return new Rest5ClientTransport(Rest5Client, mapper, null); + } + + private ElasticsearchTransport newRest5ClientTransport(Rest5Client Rest5Client, JsonpMapper mapper, Rest5ClientOptions options) { + return new Rest5ClientTransport(Rest5Client, mapper, options); + //return new Rest5ClientMonolithTransport(Rest5Client, mapper, options); + } + + /** + * Make a server call, capture request headers and check their consistency. + * + * @return the name of the entry in collectedHeaders for further inspection. + */ + private String checkHeaders(ElasticsearchClient esClient) throws IOException { + String testName = "test-" + testCounter.incrementAndGet(); + BooleanResponse exists = esClient.exists(r -> r.index("capture-headers").id(testName)); + assertTrue(exists.value()); + + Headers headers = collectedHeaders.get(testName); + assertNotNull(headers, "No headers collected for test " + testName); + + assertNotNull(headers.get("X-elastic-client-meta"), "Missing client meta header"); + assertEquals(Rest5ClientOptions.CLIENT_META_VALUE, headers.get("X-elastic-client-meta").get(0)); + assertNotNull(headers.get("Accept"), "Missing 'Accept' header"); + assertEquals(MIME_TYPE, headers.get("Accept").get(0)); + + for (Map.Entry> entry: headers.entrySet()) { + System.out.println(entry.getKey() + " " + entry.getValue()); + } + + return testName; + } + + private void checkUserAgent(String testName, String value) { + Headers headers = collectedHeaders.get(testName); + assertNotNull(headers, "No headers collected for test " + testName); + assertNotNull(headers.get("User-Agent"), "Missing 'User-Agent' header"); + assertEquals(value, headers.get("User-Agent").get(0)); + } + + @Test + void testNoRequestOptions() throws Exception { + Rest5Client llrc = Rest5Client.builder( + new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) + ).build(); + + ElasticsearchTransport transport = newRest5ClientTransport(llrc, new SimpleJsonpMapper()); + ElasticsearchClient esClient = new ElasticsearchClient(transport); + + String id = checkHeaders(esClient); + checkUserAgent(id, Rest5ClientOptions.USER_AGENT_VALUE); + } + + @Test + void testTransportRequestOptions() throws Exception { + Rest5Client llrc = Rest5Client.builder( + new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) + ).build(); + + ElasticsearchTransport transport = newRest5ClientTransport(llrc, new SimpleJsonpMapper(), + new Rest5ClientOptions.Builder(RequestOptions.DEFAULT.toBuilder()).build() + ); + ElasticsearchClient esClient = new ElasticsearchClient(transport); + + String id = checkHeaders(esClient); + checkUserAgent(id, Rest5ClientOptions.USER_AGENT_VALUE); + } + + @Test + void testClientRequestOptions() throws Exception { + Rest5Client llrc = Rest5Client.builder( + new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) + ).build(); + + ElasticsearchTransport transport = newRest5ClientTransport(llrc, new SimpleJsonpMapper()); + ElasticsearchClient esClient = new ElasticsearchClient(transport).withTransportOptions( + new Rest5ClientOptions.Builder(RequestOptions.DEFAULT.toBuilder()).build() + ); + + String id = checkHeaders(esClient); + checkUserAgent(id, Rest5ClientOptions.USER_AGENT_VALUE); + } + + @Test + void testLambdaOptionsBuilder() throws Exception { + Rest5Client llrc = Rest5Client.builder( + new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) + ).build(); + + ElasticsearchTransport transport = newRest5ClientTransport(llrc, new SimpleJsonpMapper()); + ElasticsearchClient esClient = new ElasticsearchClient(transport) + .withTransportOptions(o -> o + .addHeader("Foo", "bar") + .addHeader("x-elastic-client-meta", "foo-bar-client") + ); + + String id = checkHeaders(esClient); + checkUserAgent(id, Rest5ClientOptions.USER_AGENT_VALUE); + } + + @Test + void testRequestOptionsOverridingBuiltin() throws Exception { + RequestOptions options = RequestOptions.DEFAULT.toBuilder() + .addHeader("user-agent", "FooBarAgent/1.0") + .addHeader("x-elastic-client-meta", "foo-bar-client") + .build(); + + Rest5Client llrc = Rest5Client.builder( + new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) + ).build(); + + ElasticsearchTransport transport = newRest5ClientTransport(llrc, new SimpleJsonpMapper(), new Rest5ClientOptions(options,false)); + ElasticsearchClient esClient = new ElasticsearchClient(transport); + // Should not override client meta + String id = checkHeaders(esClient); + // overriding user-agent is ok + checkUserAgent(id, "FooBarAgent/1.0"); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportTest.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportTest.java new file mode 100644 index 000000000..26d314e5c --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportTest.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.json.jackson.JacksonJsonpMapper; +import co.elastic.clients.transport.ElasticsearchTransport; +import co.elastic.clients.transport.TransportException; +import co.elastic.clients.transport.http.RepeatableBodyResponse; +import co.elastic.clients.transport.http.TransportHttpClient; +import co.elastic.clients.transport.rest5_client.low_level.Response; +import co.elastic.clients.transport.rest5_client.low_level.RequestOptions; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.util.BinaryData; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +import static co.elastic.clients.util.ContentType.APPLICATION_JSON; + +public class TransportTest extends Assertions { + + @Test + public void testXMLResponse() throws Exception { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), + 0), 0); + + httpServer.createContext("/_cat/indices", exchange -> { + exchange.sendResponseHeaders(401, 0); + OutputStream out = exchange.getResponseBody(); + out.write( + "Error".getBytes(StandardCharsets.UTF_8) + ); + out.close(); + }); + + httpServer.start(); + InetSocketAddress address = httpServer.getAddress(); + + Rest5Client restClient = Rest5Client + .builder(new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .build(); + + ElasticsearchClient esClient = new ElasticsearchClient(new Rest5ClientTransport(restClient, + new JacksonJsonpMapper())); + + TransportException ex = Assertions.assertThrows( + TransportException.class, + () -> esClient.cat().indices() + ); + + httpServer.stop(0); + + assertEquals(401, ex.statusCode()); + assertEquals("es/cat.indices", ex.endpointId()); + + // Original response is transport-dependent + Response restClientResponse = (Response) ex.response().originalResponse(); + assertEquals(401, restClientResponse.getStatusCode()); + } + + + @Test + public void testOriginalJsonBodyRetrievalException() throws Exception { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), + 0), 0); + + httpServer.createContext("/_cat/indices", exchange -> { + exchange.getResponseHeaders().put("Content-Type", Collections.singletonList(APPLICATION_JSON)); + exchange.getResponseHeaders().put("X-Elastic-Product", Collections.singletonList("Elasticsearch" + )); + exchange.sendResponseHeaders(200, 0); + OutputStream out = exchange.getResponseBody(); + out.write( + "definitely not json".getBytes(StandardCharsets.UTF_8) + ); + out.close(); + }); + + httpServer.start(); + + Rest5Client restClient = Rest5Client + .builder(new HttpHost("http",httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .build(); + + // no transport options, response is not RepeatableBodyResponse, original body cannot be retrieved + ElasticsearchClient esClient = new ElasticsearchClient(new Rest5ClientTransport(restClient, + new JacksonJsonpMapper())); + + TransportException ex = Assertions.assertThrows( + TransportException.class, + () -> esClient.cat().indices() + ); + + assertEquals(200, ex.statusCode()); + assertNotEquals(RepeatableBodyResponse.class, ex.response().getClass()); + + // setting transport option + Rest5ClientOptions options = new Rest5ClientOptions(RequestOptions.DEFAULT, true); + + ElasticsearchTransport transport = new Rest5ClientTransport( + restClient, new JacksonJsonpMapper(), options); + + ElasticsearchClient esClientOptions = new ElasticsearchClient(transport); + + ex = Assertions.assertThrows( + TransportException.class, + () -> esClientOptions.cat().indices() + ); + + httpServer.stop(0); + + assertEquals(200, ex.statusCode()); + //TODO apparently the new byteentity is always repeatable + // no need for the whole RepeatableBodyResponse if true? + //assertEquals(RepeatableBodyResponse.class, ex.response().getClass()); + + try (TransportHttpClient.Response repeatableResponse = ex.response()){ + BinaryData body = repeatableResponse.body(); + StringBuilder sb = new StringBuilder(); + BufferedReader br = new BufferedReader(new InputStreamReader(body.asInputStream())); + String read; + + while ((read = br.readLine()) != null) { + sb.append(read); + } + br.close(); + assertEquals("definitely not json",sb.toString()); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportUtilsTest.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportUtilsTest.java new file mode 100644 index 000000000..82bbb52e3 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/TransportUtilsTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.ElasticsearchTestServer; +import co.elastic.clients.json.SimpleJsonpMapper; +import co.elastic.clients.transport.TransportUtils; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.testcontainers.elasticsearch.ElasticsearchContainer; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.util.Base64; +import java.util.Optional; + +public class TransportUtilsTest extends Assertions { + + @Test + public void testNoSslContext() { + assertThrows( + SSLHandshakeException.class, + () -> checkConnection(null) + ); + } + + @Test + public void testCaCertificate() throws Exception { + byte[] cert = ElasticsearchTestServer.global().container().caCertAsBytes().get(); + + checkConnection( + TransportUtils.sslContextFromHttpCaCrt(new ByteArrayInputStream(cert)) + ); + } + + @Test void testCaFingerprint() throws Exception { + byte[] pemCert = ElasticsearchTestServer.global().container().caCertAsBytes().get(); + + CertificateFactory cf = CertificateFactory.getInstance("X.509"); + Certificate x509cert = cf.generateCertificate(new ByteArrayInputStream(pemCert)); + + // Compute SHA-256 fingerprint, which is what ES outputs at start time + String fingerprint = fingerprint(x509cert.getEncoded(), "SHA-256"); + + checkConnection( + TransportUtils.sslContextFromCaFingerprint(fingerprint) + ); + } + + @Test void testInvalidFingerprint() throws Exception { + // Build a dummy SHA-256 signature + String fingerprint = fingerprint("foobar".getBytes(StandardCharsets.UTF_8), "SHA-256"); + + assertThrows( + SSLHandshakeException.class, + () -> checkConnection( + TransportUtils.sslContextFromCaFingerprint(fingerprint) + ) + ); + } + + private void checkConnection(SSLContext sslContext) throws Exception { + ElasticsearchContainer container = ElasticsearchTestServer.global().container(); + + var creds = Base64.getEncoder().encodeToString("elastic:changeme".getBytes()); + + Rest5Client restClient = Rest5Client.builder(new HttpHost("https", "localhost", + container.getMappedPort(9200))) + .setSSLContext(Optional.ofNullable(sslContext).orElse(SSLContext.getDefault())) + .setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "Basic " + creds) + }).build(); + + Rest5ClientTransport transport = new Rest5ClientTransport(restClient, SimpleJsonpMapper.INSTANCE); + ElasticsearchClient esClient = new ElasticsearchClient(transport); + + assertNotNull(esClient.info()); + } + + private String fingerprint(byte[] bytes, String algorithm) throws Exception { + byte[] fingerprint; + MessageDigest md = MessageDigest.getInstance(algorithm); + md.update(bytes); + fingerprint = md.digest(); + + StringBuilder sb = new StringBuilder(fingerprint.length * 2); + for(byte b: fingerprint) { + sb.append(String.format("%02x", b)); + } + + return sb.toString(); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumerTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumerTests.java new file mode 100644 index 000000000..3b4af2931 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/BasicAsyncResponseConsumerTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.ContentDecoder; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.junit.Assert; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; + +import static org.mockito.Mockito.mock; + +public class BasicAsyncResponseConsumerTests extends RestClientTestCase { + + // maximum buffer that this test ends up allocating is 50MB + private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; + + @Test + public void testResponseProcessing() throws Exception { + ContentDecoder contentDecoder = mock(ContentDecoder.class); + HttpContext httpContext = mock(HttpContext.class); + + AsyncResponseConsumer consumer = + new HttpAsyncResponseConsumerFactory.BasicAsyncResponseConsumerFactory(MAX_TEST_BUFFER_SIZE) + .createHttpAsyncResponseConsumer(); + + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); + ByteArrayEntity entity = new ByteArrayEntity("test".getBytes(), ContentType.APPLICATION_JSON); + httpResponse.setEntity(entity); + + // everything goes well, no exception thrown + consumer.consumeResponse(httpResponse, entity, httpContext, null); + consumer.consume(ByteBuffer.wrap("test".getBytes())); + } + + @Test + public void testBufferLimit() throws Exception { + HttpContext httpContext = mock(HttpContext.class); + + AsyncResponseConsumer consumer = + new HttpAsyncResponseConsumerFactory.BasicAsyncResponseConsumerFactory(MAX_TEST_BUFFER_SIZE) + .createHttpAsyncResponseConsumer(); + + ByteArrayEntity entity = new ByteArrayEntity("test".getBytes(), ContentType.APPLICATION_JSON); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); + httpResponse.setEntity(entity); + + // should throw exception + consumer.consumeResponse(httpResponse, entity, httpContext, null); + Assert.assertThrows(ContentTooLongException.class, + () -> consumer.consume(ByteBuffer.allocate(MAX_TEST_BUFFER_SIZE + 1))); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostStateTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostStateTests.java new file mode 100644 index 000000000..45fedefec --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/DeadHostStateTests.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class DeadHostStateTests extends RestClientTestCase { + + private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, + 1357, 1800}; + + @Test + public void testInitialDeadHostStateDefaultTimeSupplier() { + DeadHostState deadHostState = new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER); + long currentTime = System.nanoTime(); + assertThat(deadHostState.getDeadUntilNanos(), greaterThanOrEqualTo(currentTime)); + assertThat(deadHostState.getFailedAttempts(), equalTo(1)); + } + + @Test + public void testDeadHostStateFromPreviousDefaultTimeSupplier() { + DeadHostState previous = new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER); + int iters = randomIntBetween(5, 30); + for (int i = 0; i < iters; i++) { + DeadHostState deadHostState = new DeadHostState(previous); + assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); + assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); + previous = deadHostState; + } + } + + @Test + public void testCompareToTimeSupplier() { + int numObjects = randomIntBetween(EXPECTED_TIMEOUTS_SECONDS.length, 30); + DeadHostState[] deadHostStates = new DeadHostState[numObjects]; + final AtomicLong time = new AtomicLong(0); + for (int i = 0; i < numObjects; i++) { + if (i == 0) { + // this test requires a strictly increasing timer. This ensures that even if we call this + // time supplier in a very tight + // loop we always notice time moving forward. This does not happen for real timer + // implementations + // (e.g. on Linux clock_gettime provides microsecond resolution). + deadHostStates[i] = new DeadHostState(time::incrementAndGet); + } else { + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]); + } + } + for (int k = 1; k < deadHostStates.length; k++) { + assertThat(deadHostStates[k - 1].getDeadUntilNanos(), + lessThan(deadHostStates[k].getDeadUntilNanos())); + assertThat(deadHostStates[k - 1], lessThan(deadHostStates[k])); + } + } + + @Test + public void testCompareToDifferingTimeSupplier() { + try { + new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER).compareTo(new DeadHostState(() -> 0L)); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals( + "can't compare DeadHostStates holding different time suppliers as they may be based on " + + "different clocks", + e.getMessage() + ); + } + } + + @Test + public void testShallBeRetried() { + final AtomicLong time = new AtomicLong(0); + DeadHostState deadHostState = null; + for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) { + long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i]; + if (i == 0) { + deadHostState = new DeadHostState(time::get); + } else { + deadHostState = new DeadHostState(deadHostState); + } + for (int j = 0; j < expectedTimeoutSecond; j++) { + time.addAndGet(TimeUnit.SECONDS.toNanos(1)); + assertThat(deadHostState.shallBeRetried(), is(false)); + } + int iters = randomIntBetween(5, 30); + for (int j = 0; j < iters; j++) { + time.addAndGet(TimeUnit.SECONDS.toNanos(1)); + assertThat(deadHostState.shallBeRetried(), is(true)); + } + } + } + + @Test + public void testDeadHostStateTimeouts() { + DeadHostState previous = new DeadHostState(() -> 0L); + for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { + assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), + equalTo(expectedTimeoutsSecond)); + previous = new DeadHostState(previous); + } + // check that from here on the timeout does not increase + int iters = randomIntBetween(5, 30); + for (int i = 0; i < iters; i++) { + DeadHostState deadHostState = new DeadHostState(previous); + assertThat( + TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), + equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1]) + ); + previous = deadHostState; + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/FailureTrackingResponseListenerTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/FailureTrackingResponseListenerTests.java new file mode 100644 index 000000000..9e3d03a6f --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/FailureTrackingResponseListenerTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +public class FailureTrackingResponseListenerTests extends RestClientTestCase { + + @Test + public void testOnSuccess() { + MockResponseListener responseListener = new MockResponseListener(); + Rest5Client.FailureTrackingResponseListener listener = + new Rest5Client.FailureTrackingResponseListener(responseListener); + + final Response response = mockResponse(); + listener.onSuccess(response); + assertSame(response, responseListener.lastResponse.get()); + assertNull(responseListener.lastException.get()); + } + + @Test + public void testOnFailure() { + MockResponseListener responseListener = new MockResponseListener(); + Rest5Client.FailureTrackingResponseListener listener = + new Rest5Client.FailureTrackingResponseListener(responseListener); + int numIters = randomIntBetween(1, 10); + Exception[] expectedExceptions = new Exception[numIters]; + for (int i = 0; i < numIters; i++) { + RuntimeException runtimeException = new RuntimeException("test" + i); + expectedExceptions[i] = runtimeException; + listener.trackFailure(runtimeException); + assertNull(responseListener.lastResponse.get()); + assertNull(responseListener.lastException.get()); + } + + if (randomBoolean()) { + Response response = mockResponse(); + listener.onSuccess(response); + assertSame(response, responseListener.lastResponse.get()); + assertNull(responseListener.lastException.get()); + } else { + RuntimeException runtimeException = new RuntimeException("definitive"); + listener.onDefinitiveFailure(runtimeException); + assertNull(responseListener.lastResponse.get()); + Throwable exception = responseListener.lastException.get(); + assertSame(runtimeException, exception); + + int i = numIters - 1; + do { + assertNotNull(exception.getSuppressed()); + assertEquals(1, exception.getSuppressed().length); + assertSame(expectedExceptions[i--], exception.getSuppressed()[0]); + exception = exception.getSuppressed()[0]; + } while (i >= 0); + } + } + + private static class MockResponseListener implements ResponseListener { + private final AtomicReference lastResponse = new AtomicReference<>(); + private final AtomicReference lastException = new AtomicReference<>(); + + @Override + public void onSuccess(Response response) { + if (!this.lastResponse.compareAndSet(null, response)) { + throw new IllegalStateException("onSuccess was called multiple times"); + } + } + + @Override + public void onFailure(Exception exception) { + if (!this.lastException.compareAndSet(null, exception)) { + throw new IllegalStateException("onFailure was called multiple times"); + } + } + } + + private static Response mockResponse() { + ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); + return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelectorTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelectorTests.java new file mode 100644 index 000000000..4a011bb64 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HasAttributeNodeSelectorTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; + +public class HasAttributeNodeSelectorTests extends RestClientTestCase { + + @Test + public void testHasAttribute() { + Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val"))); + Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval"))); + Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval"))); + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(hasAttributeValue); + nodes.add(hasAttributeButNotValue); + nodes.add(hasAttributeValueInList); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(hasAttributeValue); + expected.add(hasAttributeValueInList); + new HasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(Map> attributes) { + final Set roles = new TreeSet<>(); + if (randomBoolean()) { + roles.add("master"); + } + if (randomBoolean()) { + roles.add("data"); + } + if (randomBoolean()) { + roles.add("ingest"); + } + return new Node( + new HttpHost("dummy"), + Collections.emptySet(), + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(roles), + attributes + ); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HostsTrackingFailureListener.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HostsTrackingFailureListener.java new file mode 100644 index 000000000..eb3b350e0 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/HostsTrackingFailureListener.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.elasticsearch.client.RestClient; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +/** + * {@link RestClient.FailureListener} impl that allows to track when it gets called for which host. + */ +class HostsTrackingFailureListener extends Rest5Client.FailureListener { + private volatile Set httpHosts = new HashSet<>(); + + @Override + public void onFailure(Node node) { + httpHosts.add(node.getHost()); + } + + void assertCalled(List nodes) { + HttpHost[] hosts = new HttpHost[nodes.size()]; + for (int i = 0; i < nodes.size(); i++) { + hosts[i] = nodes.get(i).getHost(); + } + assertCalled(hosts); + } + + void assertCalled(HttpHost... hosts) { + assertEquals(hosts.length, this.httpHosts.size()); + assertThat(this.httpHosts, containsInAnyOrder(hosts)); + this.httpHosts.clear(); + } + + void assertNotCalled() { + assertEquals(0, httpHosts.size()); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelectorTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelectorTests.java new file mode 100644 index 000000000..34087adb8 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeSelectorTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; + +public class NodeSelectorTests extends RestClientTestCase { + + @Test + public void testAny() { + List nodes = new ArrayList<>(); + int size = randomIntBetween(2, 5); + for (int i = 0; i < size; i++) { + nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean())); + } + List expected = new ArrayList<>(nodes); + NodeSelector.ANY.select(nodes); + assertEquals(expected, nodes); + } + + @Test + public void testNotMasterOnly() { + Node masterOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node masterAndData = dummyNode(true, true, false); + Node masterAndIngest = dummyNode(true, false, true); + Node coordinatingOnly = dummyNode(false, false, false); + Node ingestOnly = dummyNode(false, false, true); + Node data = dummyNode(false, true, randomBoolean()); + Node dataContent = dummyNode(false, false, false, true, false, false, false, false); + Node dataHot = dummyNode(false, false, false, false, true, false, false, false); + Node dataWarm = dummyNode(false, false, false, false, false, true, false, false); + Node dataCold = dummyNode(false, false, false, false, false, false, true, false); + Node dataFrozen = dummyNode(false, false, false, false, false, false, false, true); + List nodes = new ArrayList<>(); + nodes.add(masterOnly); + nodes.add(all); + nodes.add(masterAndData); + nodes.add(masterAndIngest); + nodes.add(coordinatingOnly); + nodes.add(ingestOnly); + nodes.add(data); + nodes.add(dataContent); + nodes.add(dataHot); + nodes.add(dataWarm); + nodes.add(dataCold); + nodes.add(dataFrozen); + Collections.shuffle(nodes, new Random()); + List expected = new ArrayList<>(nodes); + expected.remove(masterOnly); + NodeSelector.SKIP_DEDICATED_MASTERS.select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(boolean master, boolean data, boolean ingest) { + return dummyNode(master, data, ingest, false, false, false, false, false); + } + + private static Node dummyNode( + boolean master, + boolean data, + boolean ingest, + boolean dataContent, + boolean dataHot, + boolean dataWarm, + boolean dataCold, + boolean dataFrozen + ) { + final Set roles = new TreeSet<>(); + if (master) { + roles.add("master"); + } + if (data) { + roles.add("data"); + } + if (dataContent) { + roles.add("data_content"); + } + if (dataHot) { + roles.add("data_hot"); + } + if (dataWarm) { + roles.add("data_warm"); + } + if (dataCold) { + roles.add("data_cold"); + } + if (dataFrozen) { + roles.add("data_frozen"); + } + if (ingest) { + roles.add("ingest"); + } + return new Node( + new HttpHost("dummy"), + Collections.emptySet(), + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(roles), + Collections.>emptyMap() + ); + } + +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeTests.java new file mode 100644 index 000000000..5cb74461d --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/NodeTests.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class NodeTests extends RestClientTestCase { + + @Test + public void testToString() { + Map> attributes = new HashMap<>(); + attributes.put("foo", singletonList("bar")); + attributes.put("baz", Arrays.asList("bort", "zoom")); + assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals( + "[host=http://1, attributes={foo=[bar], baz=[bort, zoom]}]", + new Node(new HttpHost("1"), null, null, null, null, attributes).toString() + ); + assertEquals( + "[host=http://1, roles=data,ingest,master]", + new Node(new HttpHost("1"), null, null, null, new Node.Roles(new TreeSet<>(Arrays.asList("master", "data", "ingest"))), null) + .toString() + ); + assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), null, null, "ver", null, null).toString()); + assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), null, "nam", null, null, null).toString()); + assertEquals( + "[host=http://1, bound=[http://1, http://2]]", + new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null, null) + .toString() + ); + assertEquals( + "[host=http://1, bound=[http://1, http://2], " + + "name=nam, version=ver, roles=master, attributes={foo=[bar], baz=[bort, zoom]}]", + new Node( + new HttpHost("1"), + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), + "nam", + "ver", + new Node.Roles(Collections.singleton("master")), + attributes + ).toString() + ); + } + + @Test + public void testEqualsAndHashCode() { + HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); + Node node = new Node( + host, + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Node.Roles(new TreeSet<>(Arrays.asList("master", "data", "ingest"))), + randomBoolean() ? null : singletonMap("foo", singletonList("bar")) + ); + assertFalse(node.equals(null)); + assertTrue(node.equals(node)); + assertEquals(node.hashCode(), node.hashCode()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()); + assertTrue(node.equals(copy)); + assertEquals(node.hashCode(), copy.hashCode()); + assertFalse( + node.equals( + new Node( + new HttpHost(host.toHostString() + "changed"), + node.getBoundHosts(), + node.getName(), + node.getVersion(), + node.getRoles(), + node.getAttributes() + ) + ) + ); + assertFalse( + node.equals( + new Node( + host, + new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), + node.getName(), + node.getVersion(), + node.getRoles(), + node.getAttributes() + ) + ) + ); + assertFalse( + node.equals( + new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles(), node.getAttributes()) + ) + ); + assertFalse( + node.equals( + new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles(), node.getAttributes()) + ) + ); + assertFalse( + node.equals( + new Node( + host, + node.getBoundHosts(), + node.getName(), + node.getVersion(), + new Node.Roles(Collections.emptySet()), + node.getAttributes() + ) + ) + ); + assertFalse( + node.equals( + new Node( + host, + node.getBoundHosts(), + node.getName(), + node.getVersion(), + node.getRoles(), + singletonMap("bort", singletonList("bing")) + ) + ) + ); + } + + @Test + public void testDataRole() { + Node.Roles roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_hot"))); + assertTrue(roles.hasDataHotRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_warm"))); + assertTrue(roles.hasDataWarmRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_cold"))); + assertTrue(roles.hasDataColdRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_frozen"))); + assertTrue(roles.hasDataFrozenRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_content"))); + assertTrue(roles.hasDataContentRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data"))); + assertTrue(roles.hasDataRole()); + assertTrue(roles.canContainData()); + roles = new Node.Roles(new TreeSet<>(Arrays.asList("data_foo"))); + assertTrue(roles.canContainData()); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelectorTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelectorTests.java new file mode 100644 index 000000000..c63bcf60d --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/PreferHasAttributeNodeSelectorTests.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; + +public class PreferHasAttributeNodeSelectorTests extends RestClientTestCase { + + @Test + public void testFoundPreferHasAttribute() { + Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val"))); + Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval"))); + Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval"))); + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(hasAttributeValue); + nodes.add(hasAttributeButNotValue); + nodes.add(hasAttributeValueInList); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(hasAttributeValue); + expected.add(hasAttributeValueInList); + new PreferHasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + @Test + public void testNotFoundPreferHasAttribute() { + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(notHasAttribute); + new PreferHasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(Map> attributes) { + final Set roles = new TreeSet<>(); + if (randomBoolean()) { + roles.add("master"); + } + if (randomBoolean()) { + roles.add("data"); + } + if (randomBoolean()) { + roles.add("ingest"); + } + return new Node( + new HttpHost("dummy"), + Collections.emptySet(), + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(roles), + attributes + ); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestLoggerTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestLoggerTests.java new file mode 100644 index 000000000..a9677aaae --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestLoggerTests.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.BasicHeader; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.canHaveBody; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +public class RequestLoggerTests extends RestClientTestCase { + + @Test + public void testTraceRequest() throws IOException, URISyntaxException, ParseException { + HttpHost host = new HttpHost(randomBoolean() ? "http" : "https", "localhost", 9200); + String expectedEndpoint = "/index/type/_api"; + URI uri; + if (randomBoolean()) { + uri = new URI(expectedEndpoint); + } else { + uri = new URI("index/type/_api"); + } + HttpUriRequest request = randomHttpRequest(uri); + String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; + boolean hasBody = canHaveBody((HttpUriRequestBase) request) && randomBoolean(); + String requestBody = "{ \"field\": \"value\" }"; + if (hasBody) { + expected += " -d '" + requestBody + "'"; + HttpEntity entity; + switch (randomIntBetween(0, 4)) { + case 0: + case 2: + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); + break; + case 1: + entity = new InputStreamEntity( + new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ); + break; + case 3: + entity = new ByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), + ContentType.APPLICATION_JSON); + break; + case 4: + // Evil entity without a charset + entity = new StringEntity(requestBody, ContentType.create("application/json", + (Charset) null)); + break; + default: + throw new UnsupportedOperationException(); + } + request.setEntity(entity); + } + String traceRequest = RequestLogger.buildTraceRequest(request, host); + assertThat(traceRequest, equalTo(expected)); + if (hasBody) { + // check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(request.getEntity(), + StandardCharsets.UTF_8); + assertThat(body, equalTo(requestBody)); + } + } + + @Test + public void testTraceResponse() throws IOException, ParseException { + int statusCode = randomIntBetween(200, 599); + String reasonPhrase = "REASON"; + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, reasonPhrase); + String expected = "# " + statusCode; + int numHeaders = randomIntBetween(0, 3); + for (int i = 0; i < numHeaders; i++) { + httpResponse.setHeader("header" + i, "value"); + expected += "\n# header" + i + ": value"; + } + expected += "\n#"; + boolean hasBody = randomBoolean(); + String responseBody = "{\n \"field\": \"value\"\n}"; + if (hasBody) { + expected += "\n# {"; + expected += "\n# \"field\": \"value\""; + expected += "\n# }"; + HttpEntity entity; + switch (randomIntBetween(0, 2)) { + case 0: + entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON); + break; + case 1: + // test a non repeatable entity + entity = new InputStreamEntity( + new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ); + break; + case 2: + // Evil entity without a charset + entity = new StringEntity(responseBody, ContentType.create("application/json", + (Charset) null)); + break; + default: + throw new UnsupportedOperationException(); + } + httpResponse.setEntity(entity); + } + String traceResponse = RequestLogger.buildTraceResponse(httpResponse); + assertThat(traceResponse, equalTo(expected)); + if (hasBody) { + // check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(httpResponse.getEntity(), StandardCharsets.UTF_8); + assertThat(body, equalTo(responseBody)); + } + } + + @Test + public void testResponseWarnings() throws Exception { + HttpHost host = new HttpHost("localhost", 9200); + HttpUriRequest request = randomHttpRequest(new URI("/index/type/_api")); + int numWarnings = randomIntBetween(1, 5); + StringBuilder expected = new StringBuilder("request [").append(request.getMethod()) + .append(" ") + .append(host) + .append("/index/type/_api] returned ") + .append(numWarnings) + .append(" warnings: "); + Header[] warnings = new Header[numWarnings]; + for (int i = 0; i < numWarnings; i++) { + String warning = "this is warning number " + i; + warnings[i] = new BasicHeader("Warning", warning); + if (i > 0) { + expected.append(","); + } + expected.append("[").append(warning).append("]"); + } + assertEquals(expected.toString(), RequestLogger.buildWarningMessage(request, host, warnings)); + } + + private static HttpUriRequest randomHttpRequest(URI uri) { + int requestType = randomIntBetween(0, 7); + switch (requestType) { + case 0: + return new HttpGetWithEntity(uri); + case 1: + return new HttpPost(uri); + case 2: + return new HttpPut(uri); + case 3: + return new HttpDeleteWithEntity(uri); + case 4: + return new HttpHead(uri); + case 5: + return new HttpTrace(uri); + case 6: + return new HttpOptions(uri); + case 7: + return new HttpPatch(uri); + default: + throw new UnsupportedOperationException(); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptionsTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptionsTests.java new file mode 100644 index 000000000..a94e23436 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestOptionsTests.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.util.Timeout; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; + +public class RequestOptionsTests extends RestClientTestCase { + + @Test + public void testDefault() { + assertEquals(Collections.

emptyList(), RequestOptions.DEFAULT.getHeaders()); + assertEquals(Collections.emptyMap(), RequestOptions.DEFAULT.getParameters()); + assertEquals(HttpAsyncResponseConsumerFactory.DEFAULT, + RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory()); + assertEquals(RequestOptions.DEFAULT, RequestOptions.DEFAULT.toBuilder().build()); + } + + @Test + public void testAddHeader() { + try { + randomBuilder().addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("header name cannot be null", e.getMessage()); + } + + try { + randomBuilder().addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("header value cannot be null", e.getMessage()); + } + + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + int numHeaders = randomIntBetween(0, 5); + List
headers = new ArrayList<>(); + for (int i = 0; i < numHeaders; i++) { + Header header = new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), + randomAsciiAlphanumOfLength(3)); + headers.add(header); + builder.addHeader(header.getName(), header.getValue()); + } + RequestOptions options = builder.build(); + assertEquals(headers, options.getHeaders()); + + try { + options.getHeaders() + .add(new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), + randomAsciiAlphanumOfLength(3))); + fail("expected failure"); + } catch (UnsupportedOperationException e) { + assertNull(e.getMessage()); + } + } + + @Test + public void testSetHttpAsyncResponseConsumerFactory() { + try { + RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("httpAsyncResponseConsumerFactory cannot be null", e.getMessage()); + } + + HttpAsyncResponseConsumerFactory factory = mock(HttpAsyncResponseConsumerFactory.class); + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.setHttpAsyncResponseConsumerFactory(factory); + RequestOptions options = builder.build(); + assertSame(factory, options.getHttpAsyncResponseConsumerFactory()); + } + + @Test + public void testAddParameters() { + try { + randomBuilder().addParameter(null, randomAsciiLettersOfLengthBetween(3, 10)); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("parameter key cannot be null", e.getMessage()); + } + + try { + randomBuilder().addParameter(randomAsciiLettersOfLengthBetween(3, 10), null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("parameter value cannot be null", e.getMessage()); + } + + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + int numParameters = randomIntBetween(0, 5); + Map parameters = new HashMap<>(); + for (int i = 0; i < numParameters; i++) { + String key = randomAsciiAlphanumOfLengthBetween(5, 10); + String value = randomAsciiAlphanumOfLength(3); + + parameters.put(key, value); + builder.addParameter(key, value); + } + RequestOptions options = builder.build(); + assertEquals(parameters, options.getParameters()); + } + + @Test + public void testSetRequestBuilder() { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + + RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); + int socketTimeout = 10000; + int connectTimeout = 100; + requestConfigBuilder.setConnectionRequestTimeout(Timeout.ofMilliseconds(socketTimeout)) + .setConnectTimeout(Timeout.ofMilliseconds(connectTimeout)); + RequestConfig requestConfig = requestConfigBuilder.build(); + + builder.setRequestConfig(requestConfig); + RequestOptions options = builder.build(); + assertSame(options.getRequestConfig(), requestConfig); + assertEquals(options.getRequestConfig().getConnectionRequestTimeout(), Timeout.ofMilliseconds(socketTimeout)); + assertEquals(options.getRequestConfig().getConnectTimeout(), Timeout.ofMilliseconds(connectTimeout)); + } + + @Test + public void testEqualsAndHashCode() { + RequestOptions request = randomBuilder().build(); + assertEquals(request, request); + + RequestOptions copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + RequestOptions mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + static RequestOptions.Builder randomBuilder() { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + + if (randomBoolean()) { + int headerCount = randomIntBetween(1, 5); + for (int i = 0; i < headerCount; i++) { + builder.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + } + + if (randomBoolean()) { + builder.setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory.DEFAULT); + } + + if (randomBoolean()) { + builder.setWarningsHandler(randomBoolean() ? WarningsHandler.STRICT : WarningsHandler.PERMISSIVE); + } + + if (randomBoolean()) { + builder.setRequestConfig(RequestConfig.custom().build()); + } + + return builder; + } + + private static RequestOptions copy(RequestOptions options) { + return options.toBuilder().build(); + } + + private static RequestOptions mutate(RequestOptions options) { + RequestOptions.Builder mutant = options.toBuilder(); + int mutationType = randomIntBetween(0, 2); + switch (mutationType) { + case 0: + mutant.addHeader("extra", "m"); + return mutant.build(); + case 1: + mutant.setHttpAsyncResponseConsumerFactory(new HttpAsyncResponseConsumerFactory.BasicAsyncResponseConsumerFactory(5)); + return mutant.build(); + case 2: + mutant.setWarningsHandler(warnings -> { + fail("never called"); + return false; + }); + return mutant.build(); + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestTests.java new file mode 100644 index 000000000..3f0f0e76a --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RequestTests.java @@ -0,0 +1,253 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class RequestTests extends RestClientTestCase { + + @Test + public void testConstructor() { + final String method = randomFrom(new String[] { "GET", "PUT", "POST", "HEAD", "DELETE" }); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + + try { + new Request(null, endpoint); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("method cannot be null", e.getMessage()); + } + + try { + new Request(method, null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("endpoint cannot be null", e.getMessage()); + } + + final Request request = new Request(method, endpoint); + assertEquals(method, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + + @Test + public void testAddParameters() { + final String method = randomFrom(new String[] { "GET", "PUT", "POST", "HEAD", "DELETE" }); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + int parametersCount = randomIntBetween(1, 3); + final Map parameters = new HashMap<>(parametersCount); + while (parameters.size() < parametersCount) { + parameters.put(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5)); + } + Request request = new Request(method, endpoint); + + try { + request.addParameter(null, "value"); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("url parameter name cannot be null", e.getMessage()); + } + + for (Map.Entry entry : parameters.entrySet()) { + request.addParameter(entry.getKey(), entry.getValue()); + } + assertEquals(parameters, request.getParameters()); + + // Test that adding parameters with a null value is ok. + request.addParameter("is_null", null); + parameters.put("is_null", null); + assertEquals(parameters, request.getParameters()); + + // Test that adding a duplicate parameter fails + String firstValue = randomBoolean() ? null : "value"; + request.addParameter("name", firstValue); + try { + request.addParameter("name", randomBoolean() ? firstValue : "second_value"); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals("url parameter [name] has already been set to [" + firstValue + "]", e.getMessage()); + } + } + + @Test + public void testSetEntity() { + final String method = randomFrom(new String[] { "GET", "PUT", "POST", "HEAD", "DELETE" }); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + final HttpEntity entity = randomBoolean() + ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) + : null; + + Request request = new Request(method, endpoint); + request.setEntity(entity); + assertEquals(entity, request.getEntity()); + } + + @Test + public void testSetJsonEntity() throws IOException { + final String method = randomFrom(new String[] { "GET", "PUT", "POST", "HEAD", "DELETE" }); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + + Request request = new Request(method, endpoint); + assertNull(request.getEntity()); + + final String json = randomAsciiLettersOfLengthBetween(1, 100); + request.setJsonEntity(json); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + request.getEntity().writeTo(os); + assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); + } + + @Test + public void testSetOptions() { + final String method = randomFrom(new String[] { "GET", "PUT", "POST", "HEAD", "DELETE" }); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + Request request = new Request(method, endpoint); + + try { + request.setOptions((RequestOptions) null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("options cannot be null", e.getMessage()); + } + + try { + request.setOptions((RequestOptions.Builder) null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("options cannot be null", e.getMessage()); + } + + RequestOptions.Builder builder = RequestOptionsTests.randomBuilder(); + request.setOptions(builder); + assertEquals(builder.build(), request.getOptions()); + + builder = RequestOptionsTests.randomBuilder(); + RequestOptions options = builder.build(); + request.setOptions(options); + assertSame(options, request.getOptions()); + } + + @Test + public void testEqualsAndHashCode() { + Request request = randomRequest(); + assertEquals(request, request); + + Request copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + Request mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + private static Request randomRequest() { + Request request = new Request( + randomFrom(new String[] { "GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS" }), + randomAsciiAlphanumOfLength(5) + ); + + int parameterCount = randomIntBetween(0, 5); + for (int i = 0; i < parameterCount; i++) { + request.addParameter(randomAsciiAlphanumOfLength(i), randomAsciiLettersOfLength(3)); + } + + if (randomBoolean()) { + if (randomBoolean()) { + request.setJsonEntity(randomAsciiAlphanumOfLength(10)); + } else { + request.setEntity( + randomFrom( + new HttpEntity[] { + new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) } + ) + ); + } + } + + if (randomBoolean()) { + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setHttpAsyncResponseConsumerFactory(new HttpAsyncResponseConsumerFactory.BasicAsyncResponseConsumerFactory(1)); + request.setOptions(options); + } + + return request; + } + + private static Request copy(Request request) { + Request copy = new Request(request.getMethod(), request.getEndpoint()); + copyMutables(request, copy); + return copy; + } + + private static Request mutate(Request request) { + if (randomBoolean()) { + // Mutate request or method but keep everything else constant + Request mutant = randomBoolean() + ? new Request(request.getMethod() + "m", request.getEndpoint()) + : new Request(request.getMethod(), request.getEndpoint() + "m"); + copyMutables(request, mutant); + return mutant; + } + Request mutant = copy(request); + int mutationType = randomIntBetween(0, 2); + switch (mutationType) { + case 0: + mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra"); + return mutant; + case 1: + mutant.setJsonEntity("mutant"); // randomRequest can't produce this value + return mutant; + case 2: + RequestOptions.Builder options = mutant.getOptions().toBuilder(); + options.addHeader("extra", "m"); + mutant.setOptions(options); + return mutant; + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } + + private static void copyMutables(Request from, Request to) { + for (Map.Entry param : from.getParameters().entrySet()) { + to.addParameter(param.getKey(), param.getValue()); + } + to.setEntity(from.getEntity()); + to.setOptions(from.getOptions()); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/ResponseExceptionTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/ResponseExceptionTests.java new file mode 100644 index 000000000..12344c739 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/ResponseExceptionTests.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +public class ResponseExceptionTests extends RestClientTestCase { + + + @Test + public void testResponseException() throws IOException, ParseException { + ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(500, "Internal Server Error"); + + String responseBody = "{\"error\":{\"root_cause\": {}}}"; + boolean hasBody = randomBoolean(); + if (hasBody) { + HttpEntity entity; + if (randomBoolean()) { + entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON); + } else { + // test a non repeatable entity + entity = new InputStreamEntity( + new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ); + } + httpResponse.setEntity(entity); + } + + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + HttpHost httpHost = new HttpHost("localhost", 9200); + Response response = new Response(requestLine, httpHost, httpResponse); + ResponseException responseException = new ResponseException(response); + + assertSame(response, responseException.getResponse()); + if (hasBody) { + assertEquals(responseBody, EntityUtils.toString(responseException.getResponse().getEntity())); + } else { + assertNull(responseException.getResponse().getEntity()); + } + + String message = String.format( + Locale.ROOT, + "method [%s], host [%s], URI [%s], status line [%s]", + response.getRequestLine().getMethod(), + response.getHost(), + response.getRequestLine().getUri(), + response.getStatusCode() + ); + + if (hasBody) { + message += "\n" + responseBody; + } + assertEquals(message, responseException.getMessage()); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderIntegTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderIntegTests.java new file mode 100644 index 000000000..e1144ccec --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderIntegTests.java @@ -0,0 +1,227 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpsConfigurator; +import com.sun.net.httpserver.HttpsServer; +import org.apache.hc.core5.http.HttpHost; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.security.KeyFactory; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.security.spec.PKCS8EncodedKeySpec; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +/** + * Integration test to validate the builder builds a client with the correct configuration + */ +public class RestClientBuilderIntegTests extends RestClientTestCase { + + private static HttpsServer httpsServer; + private static String resourcePath = "/co/elastic/clients/transport/rest5_client/low_level"; + + @BeforeAll + public static void startHttpServer() throws Exception { + httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0) + , 0); + httpsServer.setHttpsConfigurator(new HttpsConfigurator(getSslContext())); + httpsServer.createContext("/", new ResponseHandler()); + httpsServer.start(); + } + + private static class ResponseHandler implements HttpHandler { + @Override + public void handle(HttpExchange httpExchange) throws IOException { + httpExchange.sendResponseHeaders(200, -1); + httpExchange.close(); + } + } + + @AfterAll + public static void stopHttpServers() throws IOException { + httpsServer.stop(0); + httpsServer = null; + } + + @Test + public void testBuilderUsesDefaultSSLContext() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); + final SSLContext defaultSSLContext = SSLContext.getDefault(); + try { + try (Rest5Client client = buildRestClient()) { + try { + client.performRequest(new Request("GET", "/")); + fail("connection should have been rejected due to SSL handshake"); + } catch (Exception e) { + assertThat(e, instanceOf(SSLHandshakeException.class)); + } + } + + SSLContext.setDefault(getSslContext()); + try (Rest5Client client = buildRestClient()) { + Response response = client.performRequest(new Request("GET", "/")); + assertEquals(200, response.getStatusCode()); + } + } finally { + SSLContext.setDefault(defaultSSLContext); + } + } + + @Test + public void testBuilderSetsThreadName() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); + final SSLContext defaultSSLContext = SSLContext.getDefault(); + try { + SSLContext.setDefault(getSslContext()); + try (Rest5Client client = buildRestClient()) { + final CountDownLatch latch = new CountDownLatch(1); + client.performRequestAsync(new Request("GET", "/"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + assertThat( + Thread.currentThread().getName(), + allOf( + startsWith(Rest5ClientBuilder.THREAD_NAME_PREFIX), + containsString("elasticsearch"), + containsString("rest-client") + ) + ); + assertEquals(200, response.getStatusCode()); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + throw new AssertionError("unexpected", exception); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + } finally { + SSLContext.setDefault(defaultSSLContext); + } + } + + private Rest5Client buildRestClient() { + InetSocketAddress address = httpsServer.getAddress(); + return Rest5Client.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); + } + + private static SSLContext getSslContext() throws Exception { + SSLContext sslContext = SSLContext.getInstance(getProtocol()); + try ( + InputStream certFile = RestClientBuilderIntegTests.class.getResourceAsStream(resourcePath + "/test.crt"); + InputStream keyStoreFile = RestClientBuilderIntegTests.class.getResourceAsStream( + resourcePath + "/test_truststore.jks") + ) { + // Build a keystore of default type programmatically since we can't use JKS keystores to + // init a KeyManagerFactory in FIPS 140 JVMs. + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(null, "password".toCharArray()); + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec( + Files.readAllBytes(Paths.get(RestClientBuilderIntegTests.class.getResource(resourcePath + "/test.der").toURI())) + ); + KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + keyStore.setKeyEntry( + "mykey", + keyFactory.generatePrivate(privateKeySpec), + "password".toCharArray(), + new Certificate[]{certFactory.generateCertificate(certFile)} + ); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, "password".toCharArray()); + KeyStore trustStore = KeyStore.getInstance("JKS"); + trustStore.load(keyStoreFile, "password".toCharArray()); + TrustManagerFactory tmf = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(trustStore); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + } + return sslContext; + } + + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK + */ + private static String getProtocol() { + String version = System.getProperty("java.version"); + String[] parts = version.split("-"); + String[] numericComponents; + if (parts.length == 1) { + numericComponents = version.split("\\."); + } else if (parts.length == 2) { + numericComponents = parts[0].split("\\."); + } else { + throw new IllegalArgumentException("Java version string [" + version + "] could not be parsed."); + } + if (numericComponents.length > 0) { + final int major = Integer.valueOf(numericComponents[0]); + if (major > 12) { + return "TLS"; + } else if (major == 12 && numericComponents.length > 2) { + final int minor = Integer.valueOf(numericComponents[1]); + if (minor > 0) { + return "TLS"; + } else { + String patch = numericComponents[2]; + final int index = patch.indexOf("_"); + if (index > -1) { + patch = patch.substring(0, index); + } + + if (Integer.valueOf(patch) >= 1) { + return "TLS"; + } + } + } + } + return "TLSv1.2"; + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderTests.java new file mode 100644 index 000000000..956bb303f --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientBuilderTests.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClients; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +public class RestClientBuilderTests extends RestClientTestCase { + + @Test + public void testBuild() throws IOException { + try { + Rest5Client.builder((HttpHost[]) null); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost[]{}); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + + try { + Rest5Client.builder((Node[]) null); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + Rest5Client.builder(new Node[]{}); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + Rest5Client.builder(new Node(new HttpHost("localhost", 9200)), null); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200), null); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("host cannot be null", e.getMessage()); + } + + try (Rest5Client restClient = Rest5Client.builder(new HttpHost("localhost", 9200)).build()) { + assertNotNull(restClient); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("defaultHeaders must not be null", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(new Header[]{null}); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("default header must not be null", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setFailureListener(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("failureListener must not be null", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setHttpClient(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("custom rest client must not be null", e.getMessage()); + } + + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setSSLContext(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("ssl context must not be null", e.getMessage()); + } + + int numNodes = randomIntBetween(1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + Rest5ClientBuilder builder = Rest5Client.builder(hosts); + CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom() + .build(); + if (randomBoolean()) { + builder.setHttpClient(httpclient); + } + if (randomBoolean()) { + int numHeaders = randomIntBetween(1, 5); + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + headers[i] = new BasicHeader("header" + i, "value"); + } + builder.setDefaultHeaders(headers); + } + if (randomBoolean()) { + String pathPrefix = (randomBoolean() ? "/" : "") + randomAsciiLettersOfLengthBetween(2, 5); + while (pathPrefix.length() < 20 && randomBoolean()) { + pathPrefix += "/" + randomAsciiLettersOfLengthBetween(3, 6); + } + builder.setPathPrefix(pathPrefix + (randomBoolean() ? "/" : "")); + } + try (Rest5Client restClient = builder.build()) { + assertNotNull(restClient); + } + } + + public void testSetPathPrefixNull() { + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setPathPrefix(null); + fail("pathPrefix set to null should fail!"); + } catch (final NullPointerException e) { + assertEquals("pathPrefix must not be null", e.getMessage()); + } + } + + @Test + public void testSetPathPrefixEmpty() { + assertSetPathPrefixThrows(""); + } + + @Test + public void testSetPathPrefixMalformed() { + assertSetPathPrefixThrows("//"); + assertSetPathPrefixThrows("base/path//"); + } + + private static void assertSetPathPrefixThrows(final String pathPrefix) { + try { + Rest5Client.builder(new HttpHost("localhost", 9200)).setPathPrefix(pathPrefix); + fail("path prefix [" + pathPrefix + "] should have failed"); + } catch (final IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(pathPrefix)); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientGzipCompressionTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientGzipCompressionTests.java new file mode 100644 index 000000000..c1bc150ba --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientGzipCompressionTests.java @@ -0,0 +1,257 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.junit.Assert; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CompletableFuture; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +import static org.apache.hc.core5.http.HttpHeaders.CONTENT_ENCODING; +import static org.apache.hc.core5.http.HttpHeaders.CONTENT_LENGTH; + +public class RestClientGzipCompressionTests extends RestClientTestCase { + + private static HttpServer httpServer; + + @BeforeAll + public static void startHttpServer() throws Exception { + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.createContext("/", new GzipResponseHandler()); + httpServer.start(); + } + + @AfterAll + public static void stopHttpServers() throws IOException { + httpServer.stop(0); + httpServer = null; + } + + /** + * A response handler that accepts gzip-encoded data and replies request and response encoding values + * followed by the request body. The response is compressed if "Accept-Encoding" is "gzip". + */ + private static class GzipResponseHandler implements HttpHandler { + @Override + public void handle(HttpExchange exchange) throws IOException { + + // Decode body (if any) + String contentEncoding = exchange.getRequestHeaders().getFirst("Content-Encoding"); + InputStream body = exchange.getRequestBody(); + if ("gzip".equals(contentEncoding)) { + body = new GZIPInputStream(body); + } + byte[] bytes = readAll(body); + + boolean compress = "gzip".equals(exchange.getRequestHeaders().getFirst("Accept-Encoding")); + if (compress) { + exchange.getResponseHeaders().add("Content-Encoding", "gzip"); + } + + // Encode response if needed + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + OutputStream out = bao; + if (compress) { + out = new GZIPOutputStream(out); + } + + // Outputs ## + out.write(String.valueOf(contentEncoding).getBytes(StandardCharsets.UTF_8)); + out.write('#'); + out.write((compress ? "gzip" : "null").getBytes(StandardCharsets.UTF_8)); + out.write('#'); + out.write(bytes); + out.close(); + + bytes = bao.toByteArray(); + + exchange.sendResponseHeaders(200, bytes.length); + + exchange.getResponseBody().write(bytes); + exchange.close(); + } + } + + /** + * Read all bytes of an input stream and close it. + */ + private static byte[] readAll(InputStream in) throws IOException { + byte[] buffer = new byte[1024]; + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int len = 0; + while ((len = in.read(buffer)) > 0) { + bos.write(buffer, 0, len); + } + in.close(); + return bos.toByteArray(); + } + + private Rest5Client createClient(boolean enableCompression) { + InetSocketAddress address = httpServer.getAddress(); + return Rest5Client.builder(new HttpHost("http", address.getHostString(), address.getPort())) + .setCompressionEnabled(enableCompression) + .build(); + } + + @Test + public void testUncompressedSync() throws Exception { + Rest5Client restClient = createClient(false); + + // Send non-compressed request, expect non-compressed response + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("plain request, plain response", ContentType.TEXT_PLAIN)); + + Response response = restClient.performRequest(request); + + // Server sends a content-length which should be kept + Assert.assertTrue(response.getEntity().getContentLength() > 0); + checkResponse("null#null#plain request, plain response", response); + + restClient.close(); + } + + @Test + public void testGzipHeaderSync() throws Exception { + Rest5Client restClient = createClient(false); + + // Send non-compressed request, expect compressed response + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("plain request, gzip response", ContentType.TEXT_PLAIN)); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Accept-Encoding", "gzip").build()); + + Response response = restClient.performRequest(request); + + // Content-length is unknown because of ungzip. Do not just test -1 as it returns "a negative + // number if unknown" + Assert.assertTrue(response.getEntity().getContentLength() < 0); + checkResponse("null#gzip#plain request, gzip response", response); + + restClient.close(); + } + + @Test + public void testGzipHeaderAsync() throws Exception { + Rest5Client restClient = createClient(false); + + // Send non-compressed request, expect compressed response + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("plain request, gzip response", ContentType.TEXT_PLAIN)); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Accept-Encoding", "gzip").build()); + + FutureResponse futureResponse = new FutureResponse(); + restClient.performRequestAsync(request, futureResponse); + Response response = futureResponse.get(); + + Assert.assertTrue(response.getEntity().getContentLength() < 0); + checkResponse("null#gzip#plain request, gzip response", response); + + restClient.close(); + } + + @Test + public void testCompressingClientSync() throws Exception { + Rest5Client restClient = createClient(true); + + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("compressing client", ContentType.TEXT_PLAIN)); + + Response response = restClient.performRequest(request); + + Assert.assertTrue(response.getEntity().getContentLength() < 0); + checkResponse("gzip#gzip#compressing client", response); + + restClient.close(); + } + + @Test + public void testCompressingClientAsync() throws Exception { + InetSocketAddress address = httpServer.getAddress(); + Rest5Client restClient = Rest5Client.builder(new HttpHost("http", address.getHostString(), + address.getPort())) + .setCompressionEnabled(true) + .build(); + + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("compressing client", ContentType.TEXT_PLAIN)); + + FutureResponse futureResponse = new FutureResponse(); + restClient.performRequestAsync(request, futureResponse); + Response response = futureResponse.get(); + + // Server should report it had a compressed request and sent back a compressed response + Assert.assertTrue(response.getEntity().getContentLength() < 0); + checkResponse("gzip#gzip#compressing client", response); + + restClient.close(); + } + + public static class FutureResponse extends CompletableFuture implements ResponseListener { + @Override + public void onSuccess(Response response) { + this.complete(response); + } + + @Override + public void onFailure(Exception exception) { + this.completeExceptionally(exception); + } + } + + private static void checkResponse(String expected, Response response) throws Exception { + HttpEntity entity = response.getEntity(); + Assert.assertNotNull(entity); + + String content = new String(readAll(entity.getContent()), StandardCharsets.UTF_8); + Assert.assertEquals(expected, content); + + // Original Content-Encoding should be removed on both entity and response + Assert.assertNull(entity.getContentEncoding()); + Assert.assertNull(response.getHeader(CONTENT_ENCODING)); + + // Content-length must be consistent between entity and response + long entityContentLength = entity.getContentLength(); + String headerContentLength = response.getHeader(CONTENT_LENGTH); + + if (entityContentLength < 0) { + Assert.assertNull(headerContentLength); + } else { + Assert.assertEquals(String.valueOf(entityContentLength), headerContentLength); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsIntegTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsIntegTests.java new file mode 100644 index 000000000..89e00f778 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsIntegTests.java @@ -0,0 +1,393 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.core5.http.HttpHost; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.getAllStatusCodes; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomErrorNoRetryStatusCode; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Integration test to check interaction between {@link Rest5Client} and {@link org.apache.hc.client5.http.async.HttpAsyncClient}. + * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. + */ +public class RestClientMultipleHostsIntegTests extends RestClientTestCase { + + private static WaitForCancelHandler waitForCancelHandler; + private static HttpServer[] httpServers; + private static HttpHost[] httpHosts; + private static boolean stoppedFirstHost = false; + private static String pathPrefixWithoutLeadingSlash; + private static String pathPrefix; + private static Rest5Client restClient; + + @BeforeAll + public static void startHttpServer() throws Exception { + if (randomBoolean()) { + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5); + pathPrefix = "/" + pathPrefixWithoutLeadingSlash; + } else { + pathPrefix = pathPrefixWithoutLeadingSlash = ""; + } + int numHttpServers = randomIntBetween(2, 4); + httpServers = new HttpServer[numHttpServers]; + httpHosts = new HttpHost[numHttpServers]; + waitForCancelHandler = new WaitForCancelHandler(); + for (int i = 0; i < numHttpServers; i++) { + HttpServer httpServer = createHttpServer(); + httpServers[i] = httpServer; + httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + } + restClient = buildRestClient(NodeSelector.ANY); + } + + private static Rest5Client buildRestClient(NodeSelector nodeSelector) { + return buildRestClient(nodeSelector, null); + } + + private static Rest5Client buildRestClient(NodeSelector nodeSelector, Rest5Client.FailureListener failureListener) { + Rest5ClientBuilder restClientBuilder = Rest5Client.builder(httpHosts); + if (pathPrefix.length() > 0) { + restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); + } + if (failureListener != null) { + restClientBuilder.setFailureListener(failureListener); + } + restClientBuilder.setNodeSelector(nodeSelector); + return restClientBuilder.build(); + } + + private static HttpServer createHttpServer() throws Exception { + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + // returns a different status code depending on the path + for (int statusCode : getAllStatusCodes()) { + httpServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); + } + httpServer.createContext(pathPrefix + "/20bytes", new ResponseHandlerWithContent()); + httpServer.createContext(pathPrefix + "/wait", waitForCancelHandler); + return httpServer; + } + + private static WaitForCancelHandler resetWaitHandlers() { + WaitForCancelHandler handler = new WaitForCancelHandler(); + for (HttpServer httpServer : httpServers) { + httpServer.removeContext(pathPrefix + "/wait"); + httpServer.createContext(pathPrefix + "/wait", handler); + } + return handler; + } + + private static class WaitForCancelHandler implements HttpHandler { + private final CountDownLatch requestCameInLatch = new CountDownLatch(1); + private final CountDownLatch cancelHandlerLatch = new CountDownLatch(1); + + void cancelDone() { + cancelHandlerLatch.countDown(); + } + + void awaitRequest() throws InterruptedException { + requestCameInLatch.await(); + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + requestCameInLatch.countDown(); + try { + cancelHandlerLatch.await(); + } catch (InterruptedException ignore) {} finally { + exchange.sendResponseHeaders(200, 0); + exchange.close(); + } + } + } + + private static class ResponseHandler implements HttpHandler { + private final int statusCode; + + ResponseHandler(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, -1); + httpExchange.close(); + } + } + + private static class ResponseHandlerWithContent implements HttpHandler { + @Override + public void handle(HttpExchange httpExchange) throws IOException { + byte[] body = "01234567890123456789".getBytes(StandardCharsets.UTF_8); + httpExchange.sendResponseHeaders(200, body.length); + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(body); + } + httpExchange.close(); + } + } + + @AfterAll + public static void stopHttpServers() throws IOException { + restClient.close(); + restClient = null; + for (HttpServer httpServer : httpServers) { + httpServer.stop(0); + } + httpServers = null; + } + + @BeforeEach + public void stopRandomHost() { + // verify that shutting down some hosts doesn't matter as long as one working host is left behind + if (httpServers.length > 1 && randomBoolean()) { + List updatedHttpServers = new ArrayList<>(httpServers.length - 1); + int nodeIndex = randomIntBetween(0, httpServers.length - 1); + if (0 == nodeIndex) { + stoppedFirstHost = true; + } + for (int i = 0; i < httpServers.length; i++) { + HttpServer httpServer = httpServers[i]; + if (i == nodeIndex) { + httpServer.stop(0); + } else { + updatedHttpServers.add(httpServer); + } + } + httpServers = updatedHttpServers.toArray(new HttpServer[0]); + } + } + + @Test + public void testSyncRequests() throws IOException { + int numRequests = randomIntBetween(5, 20); + for (int i = 0; i < numRequests; i++) { + final String method = RestClientTestUtil.randomHttpMethod(); + // we don't test status codes that are subject to retries as they interfere with hosts being stopped + final int statusCode = randomBoolean() ? randomOkStatusCode() : randomErrorNoRetryStatusCode(); + Response response; + try { + response = restClient.performRequest(new Request(method, "/" + statusCode)); + } catch (ResponseException responseException) { + response = responseException.getResponse(); + } + assertEquals(method, response.getRequestLine().getMethod()); + assertEquals(statusCode, response.getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, response.getRequestLine().getUri()); + } + } + + @Test + public void testAsyncRequests() throws Exception { + int numRequests = randomIntBetween(5, 20); + final CountDownLatch latch = new CountDownLatch(numRequests); + final List responses = new CopyOnWriteArrayList<>(); + for (int i = 0; i < numRequests; i++) { + final String method = RestClientTestUtil.randomHttpMethod(); + // we don't test status codes that are subject to retries as they interfere with hosts being stopped + final int statusCode = randomBoolean() ? randomOkStatusCode() : randomErrorNoRetryStatusCode(); + restClient.performRequestAsync(new Request(method, "/" + statusCode), new ResponseListener() { + @Override + public void onSuccess(Response response) { + responses.add(new TestResponse(method, statusCode, response)); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + responses.add(new TestResponse(method, statusCode, exception)); + latch.countDown(); + } + }); + } + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + assertEquals(numRequests, responses.size()); + for (TestResponse testResponse : responses) { + Response response = testResponse.getResponse(); + assertEquals(testResponse.method, response.getRequestLine().getMethod()); + assertEquals(testResponse.statusCode, response.getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + testResponse.statusCode, response.getRequestLine().getUri()); + } + } + + @Test + public void testCancelAsyncRequests() throws Exception { + int numRequests = randomIntBetween(5, 20); + final List responses = new CopyOnWriteArrayList<>(); + final List exceptions = new CopyOnWriteArrayList<>(); + for (int i = 0; i < numRequests; i++) { + CountDownLatch latch = new CountDownLatch(1); + waitForCancelHandler = resetWaitHandlers(); + Cancellable cancellable = restClient.performRequestAsync(new Request("GET", "/wait"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + responses.add(response); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + exceptions.add(exception); + latch.countDown(); + } + }); + if (randomBoolean()) { + // we wait for the request to get to the server-side otherwise we almost always cancel + // the request artificially on the client-side before even sending it + waitForCancelHandler.awaitRequest(); + } + cancellable.cancel(); + waitForCancelHandler.cancelDone(); + assertTrue(latch.await(5, TimeUnit.SECONDS)); + } + assertEquals(0, responses.size()); + assertEquals(numRequests, exceptions.size()); + for (Exception exception : exceptions) { + assertThat(exception, instanceOf(CancellationException.class)); + } + } + + /** + * Test host selector against a real server and + * test what happens after calling + */ + @Test + public void testNodeSelector() throws Exception { + try (Rest5Client restClient = buildRestClient(firstPositionNodeSelector())) { + Request request = new Request("GET", "/200"); + int rounds = randomIntBetween(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + if (stoppedFirstHost) { + try { + RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + fail("expected to fail to connect"); + } catch (ConnectException e) { + // Windows isn't consistent here. Sometimes the message is even null! + if (false == System.getProperty("os.name").startsWith("Windows")) { + assertTrue(e.getMessage().contains("Connection refused")); + } + } + } else { + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(httpHosts[0], response.getHost()); + } + } + } + } + + @Disabled("https://github.com/elastic/elasticsearch/issues/87314") + @Test + public void testNonRetryableException() throws Exception { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.setHttpAsyncResponseConsumerFactory( + // Limit to very short responses to trigger a ContentTooLongException + () -> new BasicAsyncResponseConsumer(new BufferedByteConsumer(10)) + ); + + AtomicInteger failureCount = new AtomicInteger(); + Rest5Client client = buildRestClient(NodeSelector.ANY, new Rest5Client.FailureListener() { + @Override + public void onFailure(Node node) { + failureCount.incrementAndGet(); + } + }); + + failureCount.set(0); + Request request = new Request("POST", "/20bytes"); + request.setOptions(options); + try { + RestClientSingleHostTests.performRequestSyncOrAsync(client, request); + fail("Request should not succeed"); + } catch (IOException e) { + assertEquals(stoppedFirstHost ? 2 : 1, failureCount.intValue()); + } + + client.close(); + } + + private static class TestResponse { + private final String method; + private final int statusCode; + private final Object response; + + TestResponse(String method, int statusCode, Object response) { + this.method = method; + this.statusCode = statusCode; + this.response = response; + } + + Response getResponse() { + if (response instanceof Response) { + return (Response) response; + } + if (response instanceof ResponseException) { + return ((ResponseException) response).getResponse(); + } + throw new AssertionError("unexpected response " + response.getClass()); + } + } + + private NodeSelector firstPositionNodeSelector() { + return nodes -> { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if (httpHosts[0] != itr.next().getHost()) { + itr.remove(); + } + } + }; + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsTests.java new file mode 100644 index 000000000..71dfd014b --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientMultipleHostsTests.java @@ -0,0 +1,338 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.junit.After; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomErrorNoRetryStatusCode; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomErrorRetryStatusCode; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomHttpMethod; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Tests for {@link Rest5Client} behaviour against multiple hosts: fail-over, blacklisting etc. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientMultipleHostsTests extends RestClientTestCase { + + private ExecutorService exec = Executors.newFixedThreadPool(1); + private List nodes; + private HostsTrackingFailureListener failureListener; + + public Rest5Client createRestClient(NodeSelector nodeSelector) { + CloseableHttpAsyncClient httpClient = RestClientSingleHostTests.mockHttpClient(exec); + int numNodes = randomIntBetween(2, 5); + nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("localhost", 9200 + i))); + } + nodes = Collections.unmodifiableList(nodes); + failureListener = new HostsTrackingFailureListener(); + return new Rest5Client(httpClient, new Header[0], nodes, null, failureListener, nodeSelector, false, false, false); + } + + /** + * Shutdown the executor so we don't leak threads into other test runs. + */ + @After + public void shutdownExec() { + exec.shutdown(); + } + + @Test + public void testRoundRobinOkStatusCodes() throws Exception { + Rest5Client restClient = createRestClient(NodeSelector.ANY); + int numIters = randomIntBetween(1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { + int statusCode = randomOkStatusCode(); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync( + restClient, + new Request(randomHttpMethod(), "/" + statusCode) + ); + assertEquals(statusCode, response.getStatusCode()); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + @Test + public void testRoundRobinNoRetryErrors() throws Exception { + Rest5Client restClient = createRestClient(NodeSelector.ANY); + int numIters = randomIntBetween(1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { + String method = randomHttpMethod(); + int statusCode = randomErrorNoRetryStatusCode(); + try { + Response response = RestClientSingleHostTests.performRequestSyncOrAsync( + restClient, + new Request(method, "/" + statusCode) + ); + if (method.equals("HEAD") && statusCode == 404) { + // no exception gets thrown although we got a 404 + assertEquals(404, response.getStatusCode()); + assertEquals(statusCode, response.getStatusCode()); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } else { + fail("request should have failed"); + } + } catch (ResponseException e) { + if (method.equals("HEAD") && statusCode == 404) { + throw e; + } + Response response = e.getResponse(); + assertEquals(statusCode, response.getStatusCode()); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + @Test + public void testRoundRobinRetryErrors() throws Exception { + Rest5Client restClient = createRestClient(NodeSelector.ANY); + String retryEndpoint = randomErrorRetryEndpoint(); + try { + RestClientSingleHostTests.performRequestSyncOrAsync(restClient, new Request(randomHttpMethod(), retryEndpoint)); + fail("request should have failed"); + } catch (ResponseException e) { + Set hostsSet = hostsSet(); + // first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(nodes); + do { + Response response = e.getResponse(); + assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusCode()); + assertTrue( + "host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost()) + ); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(ResponseException.class)); + e = (ResponseException) suppressed; + } else { + e = null; + } + } while (e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } catch (IOException e) { + Set hostsSet = hostsSet(); + // first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(nodes); + do { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(IOException.class)); + e = (IOException) suppressed; + } else { + e = null; + } + } while (e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + + int numIters = randomIntBetween(2, 5); + for (int i = 1; i <= numIters; i++) { + // check that one different host is resurrected at each new attempt + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + RestClientSingleHostTests.performRequestSyncOrAsync( + restClient, + new Request(randomHttpMethod(), retryEndpoint) + ); + fail("request should have failed"); + } catch (ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertTrue( + "host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost()) + ); + // after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(response.getHost()); + assertEquals(0, e.getSuppressed().length); + } catch (IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + // after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(httpHost); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + if (randomBoolean()) { + // mark one host back alive through a successful request and check that all requests after that are sent to it + HttpHost selectedHost = null; + int iters = randomIntBetween(2, 10); + for (int y = 0; y < iters; y++) { + int statusCode = randomErrorNoRetryStatusCode(); + Response response; + try { + response = RestClientSingleHostTests.performRequestSyncOrAsync( + restClient, + new Request(randomHttpMethod(), "/" + statusCode) + ); + } catch (ResponseException e) { + response = e.getResponse(); + } + assertThat(response.getStatusCode(), equalTo(statusCode)); + if (selectedHost == null) { + selectedHost = response.getHost(); + } else { + assertThat(response.getHost(), equalTo(selectedHost)); + } + } + failureListener.assertNotCalled(); + // let the selected host catch up on number of failures, it gets selected a consecutive number of times as it's the one + // selected to be retried earlier (due to lower number of failures) till all the hosts have the same number of failures + for (int y = 0; y < i + 1; y++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + RestClientSingleHostTests.performRequestSyncOrAsync( + restClient, + new Request(randomHttpMethod(), retryEndpoint) + ); + fail("request should have failed"); + } catch (ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertThat(response.getHost(), equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } catch (IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertThat(httpHost, equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } + } + } + } + } + + @Test + public void testNodeSelector() throws Exception { + NodeSelector firstPositionOnly = restClientNodes -> { + boolean found = false; + for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { + if (nodes.get(0) == itr.next()) { + found = true; + } else { + itr.remove(); + } + } + assertTrue(found); + }; + Rest5Client restClient = createRestClient(firstPositionOnly); + int rounds = randomIntBetween(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(nodes.get(0).getHost(), response.getHost()); + } + } + + @Test + public void testSetNodes() throws Exception { + Rest5Client restClient = createRestClient(NodeSelector.SKIP_DEDICATED_MASTERS); + List newNodes = new ArrayList<>(nodes.size()); + for (int i = 0; i < nodes.size(); i++) { + Node.Roles roles = i == 0 + ? new Node.Roles(new TreeSet<>(Arrays.asList("data", "ingest"))) + : new Node.Roles(new TreeSet<>(Arrays.asList("master"))); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null)); + } + restClient.setNodes(newNodes); + int rounds = randomIntBetween(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(newNodes.get(0).getHost(), response.getHost()); + } + } + + private static String randomErrorRetryEndpoint() { + switch (randomIntBetween(0, 3)) { + case 0: + return "/" + randomErrorRetryStatusCode(); + case 1: + return "/coe"; + case 2: + return "/soe"; + case 3: + return "/ioe"; + } + throw new UnsupportedOperationException(); + } + + /** + * Build a mutable {@link Set} containing all the {@link Node#getHost() hosts} + * in use by the test. + */ + private Set hostsSet() { + Set hosts = new HashSet<>(); + for (Node node : nodes) { + hosts.add(node.getHost()); + } + return hosts; + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostIntegTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostIntegTests.java new file mode 100644 index 000000000..834e949f7 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostIntegTests.java @@ -0,0 +1,453 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; +import org.apache.hc.client5.http.async.methods.SimpleHttpResponse; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.async.HttpAsyncClients; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.getAllStatusCodes; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomHttpMethod; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomStatusCode; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/* + * Integration test to check interaction between {@link Rest5Client} and {@link org.apache.hc.client5.http + * .async.HttpAsyncClient}. + * Works against a real http server, one single host. + */ + +public class RestClientSingleHostIntegTests extends RestClientTestCase { + + private HttpServer httpServer; + private Rest5Client restClient; + private String pathPrefix; + private Header[] defaultHeaders; + private WaitForCancelHandler waitForCancelHandler; + + @BeforeEach + public void startHttpServer() throws Exception { + // set version.properties, just for testing, version won't be updated + System.setProperty("versions.elasticsearch","8.17.0"); + pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5) : ""; + httpServer = createHttpServer(); + defaultHeaders = RestClientTestUtil.randomHeaders("Header-default"); + restClient = createRestClient(false, true, true); + } + + private HttpServer createHttpServer() throws Exception { + HttpServer mockServer = + MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + mockServer.start(); + // returns a different status code depending on the path + for (int statusCode : getAllStatusCodes()) { + mockServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); + } + waitForCancelHandler = new WaitForCancelHandler(); + mockServer.createContext(pathPrefix + "/wait", waitForCancelHandler); + return mockServer; + } + + private static class WaitForCancelHandler implements HttpHandler { + + private final CountDownLatch cancelHandlerLatch = new CountDownLatch(1); + + void cancelDone() { + cancelHandlerLatch.countDown(); + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + try { + cancelHandlerLatch.await(); + } catch (InterruptedException ignore) { + } finally { + exchange.sendResponseHeaders(200, 0); + exchange.close(); + } + } + } + + private static class ResponseHandler implements HttpHandler { + private final int statusCode; + + ResponseHandler(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + // copy request body to response body so we can verify it was sent + StringBuilder body = new StringBuilder(); + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), UTF_8)) { + char[] buffer = new char[256]; + int read; + while ((read = reader.read(buffer)) != -1) { + body.append(buffer, 0, read); + } + } + // copy request headers to response headers so we can verify they were sent + Headers requestHeaders = httpExchange.getRequestHeaders(); + Headers responseHeaders = httpExchange.getResponseHeaders(); + for (Map.Entry> header : requestHeaders.entrySet()) { + responseHeaders.put(header.getKey(), header.getValue()); + } + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); + if (body.length() > 0) { + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(body.toString().getBytes(UTF_8)); + } + } + httpExchange.close(); + } + } + + private Rest5Client createRestClient(final boolean useAuth, final boolean usePreemptiveAuth, + final boolean enableMetaHeader) { + HttpHost host = new HttpHost(httpServer.getAddress().getHostString(), + httpServer.getAddress().getPort()); + + final Rest5ClientBuilder restClientBuilder = + Rest5Client.builder(host).setDefaultHeaders(defaultHeaders); + if (pathPrefix.length() > 0) { + restClientBuilder.setPathPrefix(pathPrefix); + } + + restClientBuilder.setMetaHeaderEnabled(enableMetaHeader); + + + if (useAuth) { + // provide the username/password for every request + var creds = Base64.getEncoder().encodeToString("user:pass".getBytes()); + + HttpAsyncClientBuilder httpclientBuilder = HttpAsyncClients.custom(); + + httpclientBuilder.setDefaultHeaders(Arrays.asList(new BasicHeader("Authorization", + "Basic " + creds))); + + if (!usePreemptiveAuth) { + httpclientBuilder + .disableAuthCaching() + .setTargetAuthenticationStrategy(new DefaultAuthenticationStrategy()); + } + restClientBuilder.setHttpClient(httpclientBuilder.build()); + } + + return restClientBuilder.build(); + } + + @AfterEach + public void stopHttpServers() throws IOException { + restClient.close(); + restClient = null; + httpServer.stop(0); + httpServer = null; + } + + /* + * Tests sending a bunch of async requests works well (e.g. no TimeoutException from the leased pool) + * See https://github.com/elastic/elasticsearch/issues/24069 + */ + @Test + public void testManyAsyncRequests() throws Exception { + int iters = randomIntBetween(500, 1000); + final CountDownLatch latch = new CountDownLatch(iters); + final List exceptions = new CopyOnWriteArrayList<>(); + for (int i = 0; i < iters; i++) { + Request request = new Request("PUT", "/200"); + request.setEntity(new StringEntity("{}", ContentType.APPLICATION_JSON)); + restClient.performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + exceptions.add(exception); + latch.countDown(); + } + }); + } + + assertTrue("timeout waiting for requests to be sent", latch.await(10, TimeUnit.SECONDS)); + if (exceptions.isEmpty() == false) { + AssertionError error = new AssertionError( + "expected no failures but got some. see suppressed for first 10 of [" + exceptions.size() + "] failures" + ); + for (Exception exception : exceptions.subList(0, Math.min(10, exceptions.size()))) { + error.addSuppressed(exception); + } + throw error; + } + } + + @Test + public void testCancelAsyncRequest() throws Exception { + Request request = new Request(randomHttpMethod(), "/wait"); + CountDownLatch requestLatch = new CountDownLatch(1); + AtomicReference error = new AtomicReference<>(); + Cancellable cancellable = restClient.performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + throw new AssertionError("onResponse called unexpectedly"); + } + + @Override + public void onFailure(Exception exception) { + error.set(exception); + requestLatch.countDown(); + } + }); + cancellable.cancel(); + waitForCancelHandler.cancelDone(); + assertTrue(requestLatch.await(5, TimeUnit.SECONDS)); + assertThat(error.get(), instanceOf(CancellationException.class)); + } + + /** + * This test verifies some assumptions that we rely upon around the way the async http client works + * when reusing the same request + * throughout multiple retries, and the use of the + * {@link org.apache.hc.client5.http.classic.methods.HttpUriRequestBase#abort()} method. + * In fact the low-level REST client reuses the same request instance throughout multiple retries, and + * relies on the http client + * to set the future ref to the request properly so that when abort is called, the proper future gets + * cancelled. + */ + @Test + public void testRequestResetAndAbort() throws Exception { + try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { + client.start(); + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), + httpServer.getAddress().getPort()); + HttpGet httpGet = new HttpGet(httpHost.toURI() + pathPrefix + "/200"); + + // calling abort before the request is sent is a no-op + httpGet.abort(); + assertTrue(httpGet.isAborted()); + + { + httpGet.reset(); + assertFalse(httpGet.isAborted()); + httpGet.abort(); + Future future = client.execute(SimpleHttpRequest.copy(httpGet), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + try { + future.get(); + fail("expected cancellation exception"); + } catch (CancellationException e) { + // expected + } + assertTrue(future.isCancelled()); + } + { + httpGet.reset(); + Future future = client.execute(SimpleHttpRequest.copy(httpGet), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + + assertFalse(httpGet.isAborted()); + httpGet.abort(); + assertTrue(httpGet.isAborted()); + try { + assertTrue(future.isDone()); + future.get(); + } catch (CancellationException e) { + // expected sometimes - if the future was cancelled before executing successfully + } + } + { + httpGet.reset(); + assertFalse(httpGet.isAborted()); + Future future = client.execute(SimpleHttpRequest.copy(httpGet), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + + assertFalse(httpGet.isAborted()); + assertEquals(200, future.get().getCode()); + assertFalse(future.isCancelled()); + } + } + } + + /** + * End to end test for delete with body. We test it explicitly as it is not supported + * out of the box by {@link HttpAsyncClients}. + * Exercises the test http server ability to send back whatever body it received. + */ + @Test + public void testDeleteWithBody() throws Exception { + bodyTest("DELETE"); + } + + /** + * End to end test for get with body. We test it explicitly as it is not supported + * out of the box by {@link HttpAsyncClients}. + * Exercises the test http server ability to send back whatever body it received. + */ + @Test + public void testGetWithBody() throws Exception { + bodyTest("GET"); + } + + @Test + public void testEncodeParams() throws Exception { + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "this/is/the/routing"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=this%2Fis%2Fthe%2Frouting", + response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "this|is|the|routing"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=this%7Cis%7Cthe%7Crouting", + response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "routing#1"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=routing%231", response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "中文"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=%E4%B8%AD%E6%96%87", response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "foo bar"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=foo%20bar", response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "foo+bar"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=foo%2Bbar", response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "foo/bar"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=foo%2Fbar", response.getRequestLine().getUri()); + } + { + Request request = new Request("PUT", "/200"); + request.addParameter("routing", "foo^bar"); + Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); + assertEquals(pathPrefix + "/200?routing=foo%5Ebar", response.getRequestLine().getUri()); + } + } + + /** + * Verify that credentials are sent on the first request with preemptive auth enabled (default when + * provided with credentials). + */ + @Test + public void testPreemptiveAuthEnabled() throws Exception { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; + + try (Rest5Client restClient = createRestClient(true, true, true)) { + for (final String method : methods) { + final Response response = bodyTest(restClient, method); + + assertThat(response.getHeader("Authorization"), startsWith("Basic")); + } + } + } + + private Response bodyTest(final String method) throws Exception { + return bodyTest(restClient, method); + } + + private Response bodyTest(final Rest5Client client, final String method) throws Exception { + int statusCode = randomStatusCode(); + return bodyTest(client, method, statusCode, new Header[0]); + } + + private Response bodyTest(Rest5Client client, String method, int statusCode, Header[] headers) throws Exception { + String requestBody = "{ \"field\": \"value\" }"; + Request request = new Request(method, "/" + statusCode); + request.setJsonEntity(requestBody); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : headers) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); + Response esResponse; + try { + esResponse = RestClientSingleHostTests.performRequestSyncOrAsync(client, request); + } catch (ResponseException e) { + esResponse = e.getResponse(); + } + assertEquals(method, esResponse.getRequestLine().getMethod()); + assertEquals(statusCode, esResponse.getStatusCode()); + assertEquals(pathPrefix + "/" + statusCode, esResponse.getRequestLine().getUri()); + assertEquals(requestBody, EntityUtils.toString(esResponse.getEntity())); + + return esResponse; + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostTests.java new file mode 100644 index 000000000..89f90f913 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientSingleHostTests.java @@ -0,0 +1,692 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpRequest; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncDataProducer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; + +import javax.net.ssl.SSLHandshakeException; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.lang.reflect.Field; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.canHaveBody; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.getAllErrorStatusCodes; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.getHttpMethods; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.getOkStatusCodes; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestUtil.randomStatusCode; +import static java.util.Collections.singletonList; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests for basic functionality of {@link Rest5Client} against one single host: tests http requests being + * sent, headers, + * body, different status codes and corresponding responses/exceptions. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientSingleHostTests extends RestClientTestCase { + private static final Log logger = LogFactory.getLog(RestClientSingleHostTests.class); + + private ExecutorService exec = Executors.newFixedThreadPool(1); + private Rest5Client restClient; + private Header[] defaultHeaders; + private Node node; + private CloseableHttpAsyncClient httpClient; + private HostsTrackingFailureListener failureListener; + private boolean strictDeprecationMode; + + @BeforeEach + public void createRestClient() { + httpClient = mockHttpClient(exec); + defaultHeaders = RestClientTestUtil.randomHeaders("Header-default"); + node = new Node(new HttpHost("localhost", 9200)); + failureListener = new HostsTrackingFailureListener(); + strictDeprecationMode = randomBoolean(); + restClient = new Rest5Client( + this.httpClient, + defaultHeaders, + singletonList(node), + null, + failureListener, + NodeSelector.ANY, + strictDeprecationMode, + false, + false + ); + } + + @SuppressWarnings("unchecked") + static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { + CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); + when( + httpClient.execute( + any(AsyncRequestProducer.class), + any(AsyncResponseConsumer.class), + any(HttpClientContext.class), + nullable(FutureCallback.class) + ) + ).thenAnswer((Answer>) invocationOnMock -> { + final AsyncRequestProducer requestProducer = + (AsyncRequestProducer) invocationOnMock.getArguments()[0]; + final FutureCallback futureCallback = + (FutureCallback) invocationOnMock.getArguments()[3]; + // Call the callback asynchronous to better simulate how async http client works + return exec.submit(() -> { + if (futureCallback != null) { + try { + HttpResponse httpResponse = responseOrException(requestProducer); + futureCallback.completed(httpResponse); + } catch (Exception e) { + futureCallback.failed(e); + } + return null; + } + return responseOrException(requestProducer); + }); + }); + return httpClient; + } + + private static HttpResponse responseOrException(AsyncDataProducer requestProducer) throws Exception { + // request is private in BasicRequestProducer, need to make it accessible first + Field requestField = requestProducer.getClass().getDeclaredField("request"); + requestField.setAccessible(true); + final HttpRequest request = (HttpRequest) requestField.get(requestProducer); + final HttpHost httpHost = new HttpHost(request.getAuthority().getHostName(), + request.getAuthority().getPort()); + // return the desired status code or exception depending on the path + switch (request.getPath()) { + case "/soe": + throw new SocketTimeoutException(httpHost.toString()); + case "/coe": + throw new ConnectTimeoutException(httpHost.toString()); + case "/ioe": + throw new IOException(httpHost.toString()); + case "/closed": + throw new ConnectionClosedException(); + case "/handshake": + throw new SSLHandshakeException(""); + case "/uri": + throw new URISyntaxException("", ""); + case "/runtime": + throw new RuntimeException(); + default: + int statusCode = Integer.parseInt(request.getPath().substring(1)); + + final BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, ""); + Optional entity = retrieveEntity(requestProducer); + + // return the same body that was sent + if (entity.isPresent()) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", + entity.get().isRepeatable()); + httpResponse.setEntity(entity.get()); + } + // return the same headers that were sent + httpResponse.setHeaders(request.getHeaders()); + return httpResponse; + } + } + + private static Optional retrieveEntity(AsyncDataProducer requestProducer) + throws NoSuchFieldException, IllegalAccessException { + // entity is in the dataProducer field, both are private + Field dataProducerField = requestProducer.getClass().getDeclaredField("dataProducer"); + dataProducerField.setAccessible(true); + final BasicAsyncEntityProducer dataProducer = + (BasicAsyncEntityProducer) dataProducerField.get(requestProducer); + + if (dataProducer != null) { + Field entityField = dataProducer.getClass().getDeclaredField("entity"); + entityField.setAccessible(true); + return Optional.ofNullable((HttpEntity) entityField.get(dataProducer)); + } + return Optional.empty(); + } + + /** + * Shutdown the executor so we don't leak threads into other test runs. + */ + @AfterEach + public void shutdownExec() { + exec.shutdown(); + } + + /** + * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the + * http client + */ + @SuppressWarnings("unchecked") + @Test + public void testInternalHttpRequest() throws Exception { + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass + (AsyncRequestProducer.class); + int times = 0; + for (String httpMethod : getHttpMethods()) { + HttpRequest expectedRequest = performRandomRequest(httpMethod); + verify(httpClient, times(++times)).execute( + requestArgumentCaptor.capture(), + any(AsyncResponseConsumer.class), + any(HttpClientContext.class), + nullable(FutureCallback.class) + ); + AsyncRequestProducer requestProducer = requestArgumentCaptor.getValue(); + Field requestField = requestProducer.getClass().getDeclaredField("request"); + requestField.setAccessible(true); + final HttpRequest actualRequest = (HttpRequest) requestField.get(requestProducer); + assertEquals(expectedRequest.getRequestUri(), actualRequest.getRequestUri()); + assertArrayEquals(expectedRequest.getHeaders(), actualRequest.getHeaders()); + if (canHaveBody(expectedRequest) && expectedRequest instanceof BasicClassicHttpRequest) { + Optional actualEntity = retrieveEntity(requestProducer); + if (actualEntity.isPresent()) { + HttpEntity expectedEntity = ((BasicClassicHttpRequest) expectedRequest).getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), + EntityUtils.toString(actualEntity.get())); + } + } + } + } + + /** + * End to end test for ok status codes + */ + @Test + public void testOkStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + for (int okStatusCode : getOkStatusCodes()) { + Response response = performRequestSyncOrAsync(restClient, new Request(method, + "/" + okStatusCode)); + assertThat(response.getStatusCode(), equalTo(okStatusCode)); + } + } + failureListener.assertNotCalled(); + } + + /** + * End to end test for error status codes: they should cause an exception to be thrown + */ + @Test + public void testErrorStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + // error status codes should cause an exception to be thrown + for (int errorStatusCode : getAllErrorStatusCodes()) { + try { + Request request = new Request(method, "/" + errorStatusCode); + Response response = restClient.performRequest(request); + fail("request should have failed"); + } catch (ResponseException e) { + assertEquals(errorStatusCode, e.getResponse().getStatusCode()); + assertExceptionStackContainsCallingMethod(e); + } + } + } + } + + @Test + public void testPerformRequestIOExceptions() throws Exception { + for (String method : getHttpMethods()) { + // IOExceptions should be let bubble up + try { + restClient.performRequest(new Request(method, "/ioe")); + fail("request should have failed"); + } catch (IOException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + try { + restClient.performRequest(new Request(method, "/coe")); + fail("request should have failed"); + } catch (ConnectTimeoutException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + try { + restClient.performRequest(new Request(method, "/soe")); + fail("request should have failed"); + } catch (SocketTimeoutException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + try { + restClient.performRequest(new Request(method, "/closed")); + fail("request should have failed"); + } catch (ConnectionClosedException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + try { + restClient.performRequest(new Request(method, "/handshake")); + fail("request should have failed"); + } catch (SSLHandshakeException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + } + } + + @Test + public void testPerformRequestRuntimeExceptions() throws Exception { + for (String method : getHttpMethods()) { + try { + restClient.performRequest(new Request(method, "/runtime")); + fail("request should have failed"); + } catch (RuntimeException e) { + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + } + } + + @Test + public void testPerformRequestExceptions() throws Exception { + for (String method : getHttpMethods()) { + try { + restClient.performRequest(new Request(method, "/uri")); + fail("request should have failed"); + } catch (RuntimeException e) { + assertThat(e.getCause(), instanceOf(URISyntaxException.class)); + // And we do all that so the thrown exception has our method in the stacktrace + assertExceptionStackContainsCallingMethod(e); + } + failureListener.assertCalled(singletonList(node)); + } + } + + /** + * End to end test for request and response body. Exercises the mock http client ability to send back + * whatever body it has received. + */ + @Test + public void testBody() throws Exception { + String body = "{ \"field\": \"value\" }"; + StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON); + for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { + for (int okStatusCode : getOkStatusCodes()) { + Request request = new Request(method, "/" + okStatusCode); + request.setEntity(entity); + Response response = restClient.performRequest(request); + assertThat(response.getStatusCode(), equalTo(okStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + } + for (int errorStatusCode : getAllErrorStatusCodes()) { + Request request = new Request(method, "/" + errorStatusCode); + request.setEntity(entity); + try { + restClient.performRequest(request); + fail("request should have failed"); + } catch (ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusCode(), equalTo(errorStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + assertExceptionStackContainsCallingMethod(e); + } + } + } + for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + Request request = new Request(method, "/" + randomStatusCode()); + request.setEntity(entity); + try { + performRequestSyncOrAsync(restClient, request); + fail("request should have failed"); + } catch (UnsupportedOperationException e) { + assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } + } + } + + /** + * End to end test for request and response headers. Exercises the mock http client ability to send back + * whatever headers it has received. + */ + @Test + public void testHeaders() throws Exception { + for (String method : getHttpMethods()) { + final Header[] requestHeaders = RestClientTestUtil.randomHeaders( "Header"); + final int statusCode = randomStatusCode(); + Request request = new Request(method, "/" + statusCode); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header requestHeader : requestHeaders) { + options.addHeader(requestHeader.getName(), requestHeader.getValue()); + } + request.setOptions(options); + Response esResponse; + try { + esResponse = performRequestSyncOrAsync(restClient, request); + } catch (ResponseException e) { + esResponse = e.getResponse(); + } + assertThat(esResponse.getStatusCode(), equalTo(statusCode)); + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), + Collections.emptySet()); + assertFalse(esResponse.hasWarnings()); + } + } + + @Test + public void testDeprecationWarnings() throws Exception { + String chars = randomAsciiAlphanumOfLength(5); + assertDeprecationWarnings(singletonList("poorly formatted " + chars), singletonList("poorly " + + "formatted " + chars)); + assertDeprecationWarnings(singletonList(formatWarningWithoutDate(chars)), singletonList(chars)); + assertDeprecationWarnings(singletonList(formatWarning(chars)), singletonList(chars)); + assertDeprecationWarnings( + Arrays.asList(formatWarning(chars), "another one", "and another"), + Arrays.asList(chars, "another one", "and another") + ); + assertDeprecationWarnings(Arrays.asList("ignorable one", "and another"), Arrays.asList("ignorable " + + "one", "and another")); + assertDeprecationWarnings(singletonList("exact"), singletonList("exact")); + assertDeprecationWarnings(Collections.emptyList(), Collections.emptyList()); + + String proxyWarning = "112 - \"network down\" \"Sat, 25 Aug 2012 23:34:45 GMT\""; + assertDeprecationWarnings(singletonList(proxyWarning), singletonList(proxyWarning)); + } + + private enum DeprecationWarningOption { + PERMISSIVE { + protected WarningsHandler warningsHandler() { + return WarningsHandler.PERMISSIVE; + } + }, + STRICT { + protected WarningsHandler warningsHandler() { + return WarningsHandler.STRICT; + } + }, + FILTERED { + protected WarningsHandler warningsHandler() { + return new WarningsHandler() { + @Override + public boolean warningsShouldFailRequest(List warnings) { + for (String warning : warnings) { + if (false == warning.startsWith("ignorable")) { + return true; + } + } + return false; + } + }; + } + }, + EXACT { + protected WarningsHandler warningsHandler() { + return new WarningsHandler() { + @Override + public boolean warningsShouldFailRequest(List warnings) { + return false == warnings.equals(Arrays.asList("exact")); + } + }; + } + }; + + protected abstract WarningsHandler warningsHandler(); + } + + private void assertDeprecationWarnings(List warningHeaderTexts, List warningBodyTexts) throws Exception { + String method = randomFrom(getHttpMethods()); + Request request = new Request(method, "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (String warningHeaderText : warningHeaderTexts) { + options.addHeader("Warning", warningHeaderText); + } + + final boolean expectFailure; + if (randomBoolean()) { + logger.info("checking strictWarningsMode=[" + strictDeprecationMode + "] and warnings=" + warningBodyTexts); + expectFailure = strictDeprecationMode && false == warningBodyTexts.isEmpty(); + } else { + DeprecationWarningOption warningOption = randomFrom(DeprecationWarningOption.values()); + logger.info("checking warningOption=" + warningOption + " and warnings=" + warningBodyTexts); + options.setWarningsHandler(warningOption.warningsHandler()); + expectFailure = warningOption.warningsHandler().warningsShouldFailRequest(warningBodyTexts); + } + request.setOptions(options); + + Response response; + if (expectFailure) { + try { + performRequestSyncOrAsync(restClient, request); + fail("expected WarningFailureException from warnings"); + return; + } catch (WarningFailureException e) { + if (false == warningBodyTexts.isEmpty()) { + assertThat(e.getMessage(), containsString("\nWarnings: " + warningBodyTexts)); + } + response = e.getResponse(); + } + } else { + response = performRequestSyncOrAsync(restClient, request); + } + assertEquals(false == warningBodyTexts.isEmpty(), response.hasWarnings()); + assertEquals(warningBodyTexts, response.getWarnings()); + } + + /** + * Emulates Elasticsearch's HeaderWarningLogger.formatWarning in simple + * cases. We don't have that available because we're testing against 1.7. + */ + private static String formatWarningWithoutDate(String warningBody) { + final String hash = new String(new byte[40], StandardCharsets.UTF_8).replace('\0', 'e'); + return "299 Elasticsearch-1.2.2-SNAPSHOT-" + hash + " \"" + warningBody + "\""; + } + + private static String formatWarning(String warningBody) { + return formatWarningWithoutDate(warningBody) + " \"Mon, 01 Jan 2001 00:00:00 GMT\""; + } + + private HttpUriRequest performRandomRequest(String method) throws Exception { + String uriAsString = "/" + randomStatusCode(); + Request request = new Request(method, uriAsString); + URIBuilder uriBuilder = new URIBuilder(uriAsString); + if (randomBoolean()) { + int numParams = randomIntBetween(1, 3); + for (int i = 0; i < numParams; i++) { + String name = "param-" + i; + String value = randomAsciiAlphanumOfLengthBetween(3, 10); + request.addParameter(name, value); + uriBuilder.addParameter(name, value); + } + } + URI uri = uriBuilder.build(); + + HttpUriRequestBase expectedRequest; + switch (method) { + case "DELETE": + expectedRequest = new HttpDeleteWithEntity(uri); + break; + case "GET": + expectedRequest = new HttpGetWithEntity(uri); + break; + case "HEAD": + expectedRequest = new HttpHead(uri); + break; + case "OPTIONS": + expectedRequest = new HttpOptions(uri); + break; + case "PATCH": + expectedRequest = new HttpPatch(uri); + break; + case "POST": + expectedRequest = new HttpPost(uri); + break; + case "PUT": + expectedRequest = new HttpPut(uri); + break; + case "TRACE": + expectedRequest = new HttpTrace(uri); + break; + default: + throw new UnsupportedOperationException("method not supported: " + method); + } + + if (canHaveBody(expectedRequest) && randomBoolean()) { + HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), + ContentType.APPLICATION_JSON); + (expectedRequest).setEntity(entity); + request.setEntity(entity); + } + + final Set uniqueNames = new HashSet<>(); + if (randomBoolean()) { + Header[] headers = RestClientTestUtil.randomHeaders("Header"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : headers) { + options.addHeader(header.getName(), header.getValue()); + expectedRequest.addHeader(new RequestOptions.ReqHeader(header.getName(), header.getValue())); + uniqueNames.add(header.getName()); + } + request.setOptions(options); + } + for (Header defaultHeader : defaultHeaders) { + // request level headers override default headers + if (uniqueNames.contains(defaultHeader.getName()) == false) { + expectedRequest.addHeader(defaultHeader); + } + } + try { + performRequestSyncOrAsync(restClient, request); + } catch (Exception e) { + // all good + } + return expectedRequest; + } + + static Response performRequestSyncOrAsync(Rest5Client restClient, Request request) throws Exception { + // randomize between sync and async methods + if (randomBoolean()) { + return restClient.performRequest(request); + } else { + final AtomicReference exceptionRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + restClient.performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + responseRef.set(response); + latch.countDown(); + + } + + @Override + public void onFailure(Exception exception) { + exceptionRef.set(exception); + latch.countDown(); + } + }); + latch.await(); + if (exceptionRef.get() != null) { + throw exceptionRef.get(); + } + return responseRef.get(); + } + } + + /** + * Asserts that the provided {@linkplain Exception} contains the method + * that called this somewhere on its stack. This is + * normally the case for synchronous calls but {@link Rest5Client} performs + * synchronous calls by performing asynchronous calls and blocking the + * current thread until the call returns so it has to take special care + * to make sure that the caller shows up in the exception. We use this + * assertion to make sure that we don't break that "special care". + */ + private static void assertExceptionStackContainsCallingMethod(Throwable t) { + // 0 is getStackTrace + // 1 is this method + // 2 is the caller, what we want + StackTraceElement myMethod = Thread.currentThread().getStackTrace()[2]; + for (StackTraceElement se : t.getStackTrace()) { + if (se.getClassName().equals(myMethod.getClassName()) && se.getMethodName().equals(myMethod.getMethodName())) { + return; + } + } + StringWriter stack = new StringWriter(); + t.printStackTrace(new PrintWriter(stack)); + fail("didn't find the calling method (looks like " + myMethod + ") in:\n" + stack); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestCase.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestCase.java new file mode 100644 index 000000000..3d1a7dd31 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestCase.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hc.core5.http.Header; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public abstract class RestClientTestCase { + + /** + * Assert that the actual headers are the expected ones given the original default and request headers. + * Some headers can be ignored, + * for instance in case the http client is adding its own automatically. + * + * @param defaultHeaders the default headers set to the REST client instance + * @param requestHeaders the request headers sent with a particular request + * @param actualHeaders the actual headers as a result of the provided default and request headers + * @param ignoreHeaders header keys to be ignored as they are not part of default nor request headers, + * yet they + * will be part of the actual ones + */ + protected static void assertHeaders( + final Header[] defaultHeaders, + final Header[] requestHeaders, + final Header[] actualHeaders, + final Set ignoreHeaders + ) { + final Map> expectedHeaders = new HashMap<>(); + final Set requestHeaderKeys = new HashSet<>(); + for (final Header header : requestHeaders) { + final String name = header.getName(); + addValueToListEntry(expectedHeaders, name, header.getValue()); + requestHeaderKeys.add(name); + } + for (final Header defaultHeader : defaultHeaders) { + final String name = defaultHeader.getName(); + if (requestHeaderKeys.contains(name) == false) { + addValueToListEntry(expectedHeaders, name, defaultHeader.getValue()); + } + } + Set actualIgnoredHeaders = new HashSet<>(); + for (Header responseHeader : actualHeaders) { + final String name = responseHeader.getName(); + if (ignoreHeaders.contains(name)) { + expectedHeaders.remove(name); + actualIgnoredHeaders.add(name); + continue; + } + final String value = responseHeader.getValue(); + final List values = expectedHeaders.get(name); + assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, + values); + assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); + if (values.isEmpty()) { + expectedHeaders.remove(name); + } + } + assertEquals("some headers meant to be ignored were not part of the actual headers", ignoreHeaders, + actualIgnoredHeaders); + assertTrue("some headers that were sent weren't returned " + expectedHeaders, + expectedHeaders.isEmpty()); + } + + private static void addValueToListEntry(final Map> map, final String name, + final String value) { + List values = map.get(name); + if (values == null) { + values = new ArrayList<>(); + map.put(name, values); + } + values.add(value); + } + + public static boolean inFipsJvm() { + return Boolean.parseBoolean(System.getProperty("tests.fips.enabled")); + } + + public static Random getRandom() { + return ThreadLocalRandom.current(); + } + + public static int randomIntBetween(int min, int max) { + return getRandom().ints(min, max) + .findFirst() + .getAsInt(); + } + + public static long randomLongBetween(long min, long max) { + return getRandom().longs(min, max) + .findFirst() + .getAsLong(); + } + + public static boolean randomBoolean() { + return randomIntBetween(0, 1) == 0; + } + + public static String randomAsciiAlphanumOfLength(int length) { + return RandomStringUtils.randomAlphanumeric(length); + } + + public static String randomAsciiAlphanumOfLengthBetween(int min, int max) { + return RandomStringUtils.randomAlphanumeric(min, max); + } + + public static String randomAsciiLettersOfLength(int lentgh) { + return RandomStringUtils.randomAlphabetic(lentgh); + } + + public static String randomAsciiLettersOfLengthBetween(int min, int max) { + return RandomStringUtils.randomAlphabetic(min, max); + } + + public static T randomFrom(T[] array) { + checkZeroLength(array.length); + int index = randomIntBetween(0, array.length); + return array[index]; + } + + public static T randomFrom(List list) { + int index = randomIntBetween(0, list.size()); + return list.get(index); + } + + public static byte[] randomBytesOfLength(int length) { + byte[] b = new byte[length]; + new Random().nextBytes(b); + return b; + } + + public static boolean rarely() { + return randomIntBetween(0, 100) >= 90; + } + + public static boolean frequently() { + return !rarely(); + } + + private static void checkZeroLength(int length) { + if (length == 0) { + throw new IllegalArgumentException("Can't pick a random object from an empty array."); + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestUtil.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestUtil.java new file mode 100644 index 000000000..13c2822ac --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTestUtil.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.message.BasicHeader; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase.randomAsciiLettersOfLengthBetween; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase.randomBoolean; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase.randomFrom; +import static co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase.randomIntBetween; + +final class RestClientTestUtil { + + private static final String[] HTTP_METHODS = new String[]{"DELETE", "HEAD", "GET", "OPTIONS", "PATCH", + "POST", "PUT", "TRACE"}; + private static final List ALL_STATUS_CODES; + private static final List OK_STATUS_CODES = Arrays.asList(200, 201); + private static final List ALL_ERROR_STATUS_CODES; + private static List ERROR_NO_RETRY_STATUS_CODES = Arrays.asList(500,501); + private static List ERROR_RETRY_STATUS_CODES = Arrays.asList(502, 503, 504); + + static { + ALL_ERROR_STATUS_CODES = new ArrayList<>(ERROR_RETRY_STATUS_CODES); + ALL_ERROR_STATUS_CODES.addAll(ERROR_NO_RETRY_STATUS_CODES); + ALL_STATUS_CODES = new ArrayList<>(ALL_ERROR_STATUS_CODES); + ALL_STATUS_CODES.addAll(OK_STATUS_CODES); + } + + private RestClientTestUtil() { + + } + + static boolean canHaveBody(HttpRequest httpRequest) { + return httpRequest.getMethod().contains("PUT") || httpRequest.getMethod().contains("POST") || + httpRequest.getMethod().contains("PATCH") || + httpRequest instanceof HttpDeleteWithEntity || httpRequest instanceof HttpGetWithEntity; + } + + static String[] getHttpMethods() { + return HTTP_METHODS; + } + + static String randomHttpMethod() { + return randomFrom(HTTP_METHODS); + } + + static int randomStatusCode() { + return randomFrom(ALL_STATUS_CODES); + } + + static int randomOkStatusCode() { + return randomFrom(OK_STATUS_CODES); + } + + static int randomErrorNoRetryStatusCode() { + return randomFrom(List.of(500,501)); + } + + static int randomErrorRetryStatusCode() { + return randomFrom(ERROR_RETRY_STATUS_CODES); + } + + static List getOkStatusCodes() { + return OK_STATUS_CODES; + } + + static List getAllErrorStatusCodes() { + return ALL_ERROR_STATUS_CODES; + } + + static List getAllStatusCodes() { + return ALL_STATUS_CODES; + } + + /** + * Create a random number of {@link Header}s. + * Generated header names will either be the {@code baseName} plus its index, or exactly the provided + * {@code baseName} so that the + * we test also support for multiple headers with same key and different values. + */ + static Header[] randomHeaders(final String baseName) { + int numHeaders = randomIntBetween(0, 5); + final Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = baseName; + // randomly exercise the code path that supports multiple headers with same key + if (randomBoolean()) { + headerName = headerName + i; + } + headers[i] = new BasicHeader(headerName, randomAsciiLettersOfLengthBetween(3, 10)); + } + return headers; + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTests.java new file mode 100644 index 000000000..dc12c386a --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/RestClientTests.java @@ -0,0 +1,458 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level; + +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; + +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RestClientTests extends RestClientTestCase { + + @Test + public void testCloseIsIdempotent() throws IOException { + List nodes = singletonList(new Node(new HttpHost("localhost", 9200))); + CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); + Rest5Client restClient = new Rest5Client(closeableHttpAsyncClient, new Header[0], nodes, null, null, + null, false, false, false); + restClient.close(); + verify(closeableHttpAsyncClient, times(1)).close(); + restClient.close(); + verify(closeableHttpAsyncClient, times(2)).close(); + restClient.close(); + verify(closeableHttpAsyncClient, times(3)).close(); + } + + @Test + public void testPerformAsyncWithUnsupportedMethod() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + try (Rest5Client restClient = createRestClient()) { + restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), + new ResponseListener() { + @Override + public void onSuccess(Response response) { + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked" + + " http client"); + } + + @Override + public void onFailure(Exception exception) { + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } + } + }); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); + } + } + + @Test + public void testPerformAsyncWithWrongEndpoint() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + try (Rest5Client restClient = createRestClient()) { + restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked" + + " http client"); + } + + @Override + public void onFailure(Exception exception) { + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } + } + }); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); + } + } + + @Test + public void testBuildUriLeavesPathUntouched() { + final Map emptyMap = Collections.emptyMap(); + { + URI uri = Rest5Client.buildUri("/foo$bar", "/index/type/id", emptyMap); + assertEquals("/foo$bar/index/type/id", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri("/", "/*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri("/", "*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri(null, "*", emptyMap); + assertEquals("*", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri("", "*", emptyMap); + assertEquals("*", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri(null, "/*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri(null, "/foo$bar/ty/pe/i/d", emptyMap); + assertEquals("/foo$bar/ty/pe/i/d", uri.getPath()); + } + { + URI uri = Rest5Client.buildUri(null, "/index/type/id", Collections.singletonMap("foo$bar", "x/y" + + "/z")); + assertEquals("/index/type/id", uri.getPath()); + assertEquals("foo$bar=x/y/z", uri.getQuery()); + } + } + + @Test + public void testSetNodesWrongArguments() throws IOException { + try (Rest5Client restClient = createRestClient()) { + restClient.setNodes(null); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("node list must not be null or empty", e.getMessage()); + } + try (Rest5Client restClient = createRestClient()) { + restClient.setNodes(Collections.emptyList()); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("node list must not be null or empty", e.getMessage()); + } + try (Rest5Client restClient = createRestClient()) { + restClient.setNodes(Collections.singletonList(null)); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + try (Rest5Client restClient = createRestClient()) { + restClient.setNodes(Arrays.asList(new Node(new HttpHost("localhost", 9200)), null, + new Node(new HttpHost("localhost", 9201)))); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + } + + @Test + public void testSetNodesPreservesOrdering() throws Exception { + try (Rest5Client restClient = createRestClient()) { + List nodes = randomNodes(); + restClient.setNodes(nodes); + assertEquals(nodes, restClient.getNodes()); + } + } + + private static List randomNodes() { + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("host-" + i, 9200))); + } + return nodes; + } + + @Test + public void testSetNodesDuplicatedHosts() throws Exception { + try (Rest5Client restClient = createRestClient()) { + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + Node node = new Node(new HttpHost("host", 9200)); + for (int i = 0; i < numNodes; i++) { + nodes.add(node); + } + restClient.setNodes(nodes); + assertEquals(1, restClient.getNodes().size()); + assertEquals(node, restClient.getNodes().get(0)); + } + } + + @Test + public void testSelectHosts() throws IOException { + Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null, null); + + NodeSelector not1 = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext(); ) { + if ("1".equals(itr.next().getVersion())) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "NOT 1"; + } + }; + NodeSelector noNodes = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext(); ) { + itr.next(); + itr.remove(); + } + } + + @Override + public String toString() { + return "NONE"; + } + }; + + List nodes = Arrays.asList(n1, n2, n3); + + Map emptyBlacklist = Collections.emptyMap(); + + // Normal cases where the node selector doesn't reject all living nodes + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodes, emptyBlacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodes, emptyBlacklist, not1); + + /* + * Try a NodeSelector that excludes all nodes. This should + * throw an exception + */ + { + String message = "NodeSelector [NONE] rejected all nodes, living: [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]] and dead: null"; + assertEquals(message, assertSelectAllRejected(nodes, emptyBlacklist, noNodes)); + } + + // Mark all the nodes dead for a few test cases + { + final AtomicLong time = new AtomicLong(0L); + Supplier timeSupplier = time::get; + Map blacklist = new HashMap<>(); + blacklist.put(n1.getHost(), new DeadHostState(timeSupplier)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier))); + blacklist.put(n3.getHost(), + new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier)))); + + /* + * case when fewer nodes than blacklist, won't result in any IllegalCapacityException + */ + { + List fewerNodeTuple = Arrays.asList(n1, n2); + assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, blacklist, not1); + } + + /* + * selectHosts will revive a single host regardless of + * blacklist time. It'll revive the node that is closest + * to being revived that the NodeSelector is ok with. + */ + assertEquals(singletonList(n1), Rest5Client.selectNodes(nodes, blacklist, new AtomicInteger(), + NodeSelector.ANY)); + assertEquals(singletonList(n2), Rest5Client.selectNodes(nodes, blacklist, new AtomicInteger(), + not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * return a failure, but a different failure than when the + * blacklist is empty so that the caller knows that all of + * their nodes are blacklisted AND blocked. + */ + String message = "NodeSelector [NONE] rejected all nodes, living: [] and dead: [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]]"; + assertEquals(message, assertSelectAllRejected(nodes, blacklist, noNodes)); + + /* + * Now lets wind the clock forward, past the timeout for one of + * the dead nodes. We should return it. + */ + time.set(new DeadHostState(timeSupplier).getDeadUntilNanos()); + assertSelectLivingHosts(Arrays.asList(n1), nodes, blacklist, NodeSelector.ANY); + + /* + * But if the NodeSelector rejects that node then we'll pick the + * first on that the NodeSelector doesn't reject. + */ + assertSelectLivingHosts(Arrays.asList(n2), nodes, blacklist, not1); + + /* + * If we wind the clock way into the future, past any of the + * blacklist timeouts then we function as though the nodes aren't + * in the blacklist at all. + */ + time.addAndGet(DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS); + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodes, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodes, blacklist, not1); + } + } + + private void assertSelectLivingHosts( + List expectedNodes, + List nodes, + Map blacklist, + NodeSelector nodeSelector + ) throws IOException { + int iterations = 1000; + AtomicInteger lastNodeIndex = new AtomicInteger(0); + assertEquals(expectedNodes, Rest5Client.selectNodes(nodes, blacklist, lastNodeIndex, nodeSelector)); + // Calling it again rotates the set of results + for (int i = 1; i < iterations; i++) { + Collections.rotate(expectedNodes, 1); + assertEquals("iteration " + i, expectedNodes, Rest5Client.selectNodes(nodes, blacklist, + lastNodeIndex, nodeSelector)); + } + } + + /** + * Assert that {@link Rest5Client#selectNodes} fails on the provided arguments. + * + * @return the message in the exception thrown by the failure + */ + private static String assertSelectAllRejected( + List nodes, + Map blacklist, + NodeSelector nodeSelector + ) { + try { + Rest5Client.selectNodes(nodes, blacklist, new AtomicInteger(0), nodeSelector); + throw new AssertionError("expected selectHosts to fail"); + } catch (IOException e) { + return e.getMessage(); + } + } + + private static Rest5Client createRestClient() { + List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); + return new Rest5Client(mock(CloseableHttpAsyncClient.class), new Header[]{}, nodes, null, null, null + , false, false, false); + } + + @Test + public void testRoundRobin() throws IOException { + int numNodes = randomIntBetween(2, 10); + AuthCache authCache = new BasicAuthCache(); + List nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + Node node = new Node(new HttpHost("localhost", 9200 + i)); + nodes.add(node); + authCache.put(node.getHost(), new BasicScheme()); + } + + // test the transition from negative to positive values + AtomicInteger lastNodeIndex = new AtomicInteger(-numNodes); + assertNodes(nodes, lastNodeIndex, 50); + assertEquals(-numNodes + 50, lastNodeIndex.get()); + + // test the highest positive values up to MAX_VALUE + lastNodeIndex.set(Integer.MAX_VALUE - numNodes * 10); + assertNodes(nodes, lastNodeIndex, numNodes * 10); + assertEquals(Integer.MAX_VALUE, lastNodeIndex.get()); + + // test the transition from MAX_VALUE to MIN_VALUE + // this is the only time where there is most likely going to be a jump from a node + // to another one that's not necessarily the next one. + assertEquals(Integer.MIN_VALUE, lastNodeIndex.incrementAndGet()); + assertNodes(nodes, lastNodeIndex, 50); + assertEquals(Integer.MIN_VALUE + 50, lastNodeIndex.get()); + } + + @Test + public void testIsRunning() { + List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); + CloseableHttpAsyncClient client = mock(CloseableHttpAsyncClient.class); + Rest5Client restClient = new Rest5Client(client, new Header[]{}, nodes, null, null, null, false, + false, false); + + when(client.getStatus()).thenReturn(IOReactorStatus.ACTIVE); + assertTrue(restClient.isRunning()); + + when(client.getStatus()).thenReturn(IOReactorStatus.INACTIVE); + assertFalse(restClient.isRunning()); + } + + private static void assertNodes(List nodes, AtomicInteger lastNodeIndex, int runs) throws IOException { + int distance = lastNodeIndex.get() % nodes.size(); + /* + * Collections.rotate is not super intuitive: distance 1 means that the last element will become + * the first and so on, + * while distance -1 means that the second element will become the first and so on. + */ + int expectedOffset = distance > 0 ? nodes.size() - distance : Math.abs(distance); + for (int i = 0; i < runs; i++) { + Iterable selectedNodes = Rest5Client.selectNodes( + nodes, + Collections.emptyMap(), + lastNodeIndex, + NodeSelector.ANY + ); + List expectedNodes = nodes; + int index = 0; + for (Node actualNode : selectedNodes) { + Node expectedNode = expectedNodes.get((index + expectedOffset) % expectedNodes.size()); + assertSame(expectedNode, actualNode); + index++; + } + expectedOffset--; + if (expectedOffset < 0) { + expectedOffset += nodes.size(); + } + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/documentation/RestClientDocumentation.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/documentation/RestClientDocumentation.java new file mode 100644 index 000000000..e9c030f7e --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/documentation/RestClientDocumentation.java @@ -0,0 +1,461 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.documentation; + +import co.elastic.clients.transport.rest5_client.low_level.Cancellable; +import co.elastic.clients.transport.rest5_client.low_level.HttpAsyncResponseConsumerFactory; +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.NodeSelector; +import co.elastic.clients.transport.rest5_client.low_level.Request; +import co.elastic.clients.transport.rest5_client.low_level.RequestOptions; +import co.elastic.clients.transport.rest5_client.low_level.Response; +import co.elastic.clients.transport.rest5_client.low_level.ResponseListener; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.Rest5ClientBuilder; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClients; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.ssl.SSLContextBuilder; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; + +import javax.net.ssl.SSLContext; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.util.Base64; +import java.util.Iterator; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * This class is used to generate the Java low-level REST client documentation. + * You need to wrap your code between two tags like: + * // tag::example[] + * // end::example[] + *

+ * Where example is your tag name. + *

+ * Then in the documentation, you can extract what is between tag and end tags with + * ["source","java",subs="attributes,callouts,macros"] + * -------------------------------------------------- + * include-tagged::{doc-tests}/RestClientDocumentation.java[example] + * -------------------------------------------------- + *

+ * Note that this is not a test class as we are only interested in testing that docs snippets compile. We + * don't want + * to send requests to a node and we don't even have the tools to do it. + */ +@SuppressWarnings("unused") +public class RestClientDocumentation { + private static final String TOKEN = "DUMMY"; + + // tag::rest-client-options-singleton + private static final RequestOptions COMMON_OPTIONS; + + static { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> + builder.setHttpAsyncResponseConsumerFactory( // <2> + HttpAsyncResponseConsumerFactory.DEFAULT); + COMMON_OPTIONS = builder.build(); + } + // end::rest-client-options-singleton + + @SuppressWarnings("unused") + public void usage() throws IOException, InterruptedException, ParseException { + + //tag::rest-client-init + Rest5Client restClient = Rest5Client.builder( + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201)).build(); + //end::rest-client-init + + //tag::rest-client-close + restClient.close(); + //end::rest-client-close + + { + //tag::rest-client-init-default-headers + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("http", "localhost", 9200)); + Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")}; + builder.setDefaultHeaders(defaultHeaders); // <1> + //end::rest-client-init-default-headers + } + { + //tag::rest-client-init-node-selector + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("http", "localhost", 9200)); + builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); // <1> + //end::rest-client-init-node-selector + } + { + //tag::rest-client-init-allocation-aware-selector + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("http", "localhost", 9200)); + // <1> + builder.setNodeSelector(nodes -> { // <1> + /* + * Prefer any node that belongs to rack_one. If none is around + * we will go to another rack till it's time to try and revive + * some of the nodes that belong to rack_one. + */ + boolean foundOne = false; + for (Node node : nodes) { + String rackId = node.getAttributes().get("rack_id").get(0); + if ("rack_one".equals(rackId)) { + foundOne = true; + break; + } + } + if (foundOne) { + Iterator nodesIt = nodes.iterator(); + while (nodesIt.hasNext()) { + Node node = nodesIt.next(); + String rackId = node.getAttributes().get("rack_id").get(0); + if ("rack_one".equals(rackId) == false) { + nodesIt.remove(); + } + } + } + }); + //end::rest-client-init-allocation-aware-selector + } + { + //tag::rest-client-init-failure-listener + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("http", "localhost", 9200)); + builder.setFailureListener(new Rest5Client.FailureListener() { + @Override + public void onFailure(Node node) { + // <1> + } + }); + //end::rest-client-init-failure-listener + } + { + //tag::rest-client-init-request-custom-client + PoolingAsyncClientConnectionManager connectionManager = + PoolingAsyncClientConnectionManagerBuilder.create() + .setMaxConnPerRoute(5) + .build(); + + CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom() + .setConnectionManager(connectionManager) + .build(); + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("http", "localhost", 9200)); + builder.setHttpClient(httpclient); + //end::rest-client-init-request-config-callback + } + + { + //tag::rest-client-sync + Request request = new Request( + "GET", // <1> + "/"); // <2> + Response response = restClient.performRequest(request); + //end::rest-client-sync + } + { + //tag::rest-client-async + Request request = new Request( + "GET", // <1> + "/"); // <2> + Cancellable cancellable = restClient.performRequestAsync(request, + new ResponseListener() { + @Override + public void onSuccess(Response response) { + // <3> + } + + @Override + public void onFailure(Exception exception) { + // <4> + } + }); + //end::rest-client-async + } + { + Request request = new Request("GET", "/"); + //tag::rest-client-parameters + request.addParameter("pretty", "true"); + //end::rest-client-parameters + //tag::rest-client-body + request.setEntity(new StringEntity( + "{\"json\":\"text\"}", + ContentType.APPLICATION_JSON)); + //end::rest-client-body + //tag::rest-client-body-shorter + request.setJsonEntity("{\"json\":\"text\"}"); + //end::rest-client-body-shorter + //tag::rest-client-options-set-singleton + request.setOptions(COMMON_OPTIONS); + //end::rest-client-options-set-singleton + { + //tag::rest-client-options-customize-header + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize-header + } + } + { + HttpEntity[] documents = new HttpEntity[10]; + //tag::rest-client-async-example + final CountDownLatch latch = new CountDownLatch(documents.length); + for (int i = 0; i < documents.length; i++) { + Request request = new Request("PUT", "/posts/doc/" + i); + //let's assume that the documents are stored in an HttpEntity array + request.setEntity(documents[i]); + restClient.performRequestAsync( + request, + new ResponseListener() { + @Override + public void onSuccess(Response response) { + // <1> + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + // <2> + latch.countDown(); + } + } + ); + } + latch.await(); + //end::rest-client-async-example + } + { + //tag::rest-client-async-cancel + Request request = new Request("GET", "/posts/_search"); + Cancellable cancellable = restClient.performRequestAsync( + request, + new ResponseListener() { + @Override + public void onSuccess(Response response) { + // <1> + } + + @Override + public void onFailure(Exception exception) { + // <2> + } + } + ); + cancellable.cancel(); + //end::rest-client-async-cancel + } + { + //tag::rest-client-response2 + Response response = restClient.performRequest(new Request("GET", "/")); + RequestLine requestLine = response.getRequestLine(); // <1> + HttpHost host = response.getHost(); // <2> + int statusCode = response.getStatusCode(); // <3> + Header[] headers = response.getHeaders(); // <4> + String responseBody = EntityUtils.toString(response.getEntity()); // <5> + //end::rest-client-response2 + } + } + + @SuppressWarnings("unused") + public void commonConfiguration() throws Exception { + { + //tag::rest-client-config-timeouts + RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() + .setConnectTimeout(Timeout.of(5000, TimeUnit.MILLISECONDS)); + + CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom() + .setDefaultRequestConfig(requestConfigBuilder.build()) + .build(); + + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .setHttpClient(httpclient); + //end::rest-client-config-timeouts + } + { + //tag::rest-client-config-request-options-timeouts + RequestConfig requestConfig = RequestConfig.custom() + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setConnectionRequestTimeout(Timeout.ofMilliseconds(60000)) + .build(); + RequestOptions options = RequestOptions.DEFAULT.toBuilder() + .setRequestConfig(requestConfig) + .build(); + //end::rest-client-config-request-options-timeouts + } + { + //tag::rest-client-config-threads + CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom() + .setIOReactorConfig(IOReactorConfig.custom() + .setIoThreadCount(1).build()) + .build(); + + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .setHttpClient(httpclient); + //end::rest-client-config-threads + } + { + //tag::rest-client-config-basic-auth + + var creds = Base64.getEncoder().encodeToString("user:test-user-password".getBytes()); + + Rest5ClientBuilder restClient = Rest5Client.builder(new HttpHost("https", "localhost", + 9200)) + .setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "Basic " + creds) + }); + + //end::rest-client-config-basic-auth + } + { + //tag::rest-client-config-disable-preemptive-auth + HttpHost host = new HttpHost("http", "localhost", 9200); + + var creds = Base64.getEncoder().encodeToString("user:test-user-password".getBytes()); + + CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom() + .disableAuthCaching() + .build(); + + Rest5ClientBuilder restClient = Rest5Client.builder(new HttpHost("https", "localhost", + 9200)) + .setHttpClient(httpclient) + .setDefaultHeaders(new Header[]{ + new BasicHeader("Authorization", "Basic " + creds) + }); + //end::rest-client-config-disable-preemptive-auth + } + { + String keyStorePass = ""; + //tag::rest-client-config-encrypted-communication + Path trustStorePath = Paths.get("/path/to/truststore.p12"); + KeyStore truststore = KeyStore.getInstance("pkcs12"); + try (InputStream is = Files.newInputStream(trustStorePath)) { + truststore.load(is, keyStorePass.toCharArray()); + } + SSLContextBuilder sslBuilder = SSLContexts.custom() + .loadTrustMaterial(truststore, null); + final SSLContext sslContext = sslBuilder.build(); + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("https", "localhost", + 9200)) + .setSSLContext(sslContext); + //end::rest-client-config-encrypted-communication + } + { + //tag::rest-client-config-trust-ca-pem + Path caCertificatePath = Paths.get("/path/to/ca.crt"); + CertificateFactory factory = + CertificateFactory.getInstance("X.509"); + Certificate trustedCa; + try (InputStream is = Files.newInputStream(caCertificatePath)) { + trustedCa = factory.generateCertificate(is); + } + KeyStore trustStore = KeyStore.getInstance("pkcs12"); + trustStore.load(null, null); + trustStore.setCertificateEntry("ca", trustedCa); + SSLContextBuilder sslContextBuilder = SSLContexts.custom() + .loadTrustMaterial(trustStore, null); + final SSLContext sslContext = sslContextBuilder.build(); + Rest5Client.builder( + new HttpHost("https", "localhost", + 9200)) + .setSSLContext(sslContext); + //end::rest-client-config-trust-ca-pem + } + { + String trustStorePass = ""; + String keyStorePass = ""; + //tag::rest-client-config-mutual-tls-authentication + Path trustStorePath = Paths.get("/path/to/your/truststore.p12"); + Path keyStorePath = Paths.get("/path/to/your/keystore.p12"); + KeyStore trustStore = KeyStore.getInstance("pkcs12"); + KeyStore keyStore = KeyStore.getInstance("pkcs12"); + try (InputStream is = Files.newInputStream(trustStorePath)) { + trustStore.load(is, trustStorePass.toCharArray()); + } + try (InputStream is = Files.newInputStream(keyStorePath)) { + keyStore.load(is, keyStorePass.toCharArray()); + } + SSLContextBuilder sslBuilder = SSLContexts.custom() + .loadTrustMaterial(trustStore, null) + .loadKeyMaterial(keyStore, keyStorePass.toCharArray()); + final SSLContext sslContext = sslBuilder.build(); + Rest5Client.builder( + new HttpHost("https", "localhost", + 9200)) + .setSSLContext(sslContext); + //end::rest-client-config-mutual-tls-authentication + } + { + //tag::rest-client-auth-bearer-token + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("https", "localhost", + 9200)); + Header[] defaultHeaders = + new Header[]{new BasicHeader("Authorization", + "Bearer u6iuAxZ0RG1Kcm5jVFI4eU4tZU9aVFEwT2F3")}; + builder.setDefaultHeaders(defaultHeaders); + //end::rest-client-auth-bearer-token + } + { + //tag::rest-client-auth-api-key + String apiKeyId = "uqlEyn8B_gQ_jlvwDIvM"; + String apiKeySecret = "HxHWk2m4RN-V_qg9cDpuX"; + String apiKeyAuth = + Base64.getEncoder().encodeToString( + (apiKeyId + ":" + apiKeySecret) + .getBytes(StandardCharsets.UTF_8)); + Rest5ClientBuilder builder = Rest5Client.builder( + new HttpHost("https", "localhost", + 9200)); + Header[] defaultHeaders = + new Header[]{new BasicHeader("Authorization", + "ApiKey " + apiKeyAuth)}; + builder.setDefaultHeaders(defaultHeaders); + //end::rest-client-auth-api-key + } + + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferParseTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferParseTests.java new file mode 100644 index 000000000..e2ecc3c24 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferParseTests.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase; +import com.fasterxml.jackson.core.JsonFactory; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +/** + * Test parsing the response from the {@code /_nodes/http} API from fixed + * versions of Elasticsearch. + */ +public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { + + private void checkFile(String file, Node... expected) throws IOException { + InputStream in = this.getClass().getResourceAsStream(file); + if (in == null) { + throw new IllegalArgumentException("Couldn't find [" + file + "]"); + } + try { + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, ElasticsearchNodesSniffer.Scheme.HTTP, new JsonFactory()); + /* + * Use these assertions because the error messages are nicer + * than hasItems and we know the results are in order because + * that is how we generated the file. + */ + assertThat(nodes, hasSize(expected.length)); + for (int i = 0; i < expected.length; i++) { + assertEquals(expected[i], nodes.get(i)); + } + } finally { + in.close(); + } + } + + @Test + public void test2x() throws IOException { + checkFile( + "2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", true, false, false), + node(9201, "m2", "2.0.0", true, true, false), + node(9202, "m3", "2.0.0", true, false, false), + node(9203, "d1", "2.0.0", false, true, false), + node(9204, "d2", "2.0.0", false, true, false), + node(9205, "d3", "2.0.0", false, true, false), + node(9206, "c1", "2.0.0", false, false, false), + node(9207, "c2", "2.0.0", false, false, false) + ); + } + + @Test + public void test5x() throws IOException { + checkFile( + "5.0.0_nodes_http.json", + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true), + node(9203, "d1", "5.0.0", false, true, true), + node(9204, "d2", "5.0.0", false, true, true), + node(9205, "d3", "5.0.0", false, true, true), + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true) + ); + } + + @Test + public void test6x() throws IOException { + checkFile( + "6.0.0_nodes_http.json", + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true), + node(9203, "d1", "6.0.0", false, true, true), + node(9204, "d2", "6.0.0", false, true, true), + node(9205, "d3", "6.0.0", false, true, true), + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true) + ); + } + + @Test + public void test7x() throws IOException { + checkFile( + "7.3.0_nodes_http.json", + node(9200, "m1", "7.3.0", "master", "ingest"), + node(9201, "m2", "7.3.0", "master", "data", "ingest"), + node(9202, "m3", "7.3.0", "master", "ingest"), + node(9203, "d1", "7.3.0", "data", "ingest", "ml"), + node(9204, "d2", "7.3.0", "data", "ingest"), + node(9205, "d3", "7.3.0", "data", "ingest"), + node(9206, "c1", "7.3.0", "ingest"), + node(9207, "c2", "7.3.0", "ingest") + ); + } + + @Test + public void testParsingPublishAddressWithPreES7Format() throws IOException { + InputStream in = this.getClass().getResourceAsStream("es6_nodes_publication_address_format.json"); + + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, ElasticsearchNodesSniffer.Scheme.HTTP, new JsonFactory()); + + assertEquals("127.0.0.1", nodes.get(0).getHost().getHostName()); + assertEquals(9200, nodes.get(0).getHost().getPort()); + assertEquals("http", nodes.get(0).getHost().getSchemeName()); + } + + @Test + public void testParsingPublishAddressWithES7Format() throws IOException { + InputStream in = this.getClass().getResourceAsStream("es7_nodes_publication_address_format.json"); + + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, ElasticsearchNodesSniffer.Scheme.HTTP, new JsonFactory()); + + assertEquals("elastic.test", nodes.get(0).getHost().getHostName()); + assertEquals(9200, nodes.get(0).getHost().getPort()); + assertEquals("http", nodes.get(0).getHost().getSchemeName()); + } + + private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { + final Set roles = new TreeSet<>(); + if (master) { + roles.add("master"); + } + if (data) { + roles.add("data"); + } + if (ingest) { + roles.add("ingest"); + } + return node(port, name, version, roles); + } + + private Node node(int port, String name, String version, String... roles) { + return node(port, name, version, new TreeSet<>(Arrays.asList(roles))); + } + + private Node node(int port, String name, String version, Set roles) { + HttpHost host = new HttpHost("127.0.0.1", port); + Set boundHosts = new HashSet<>(2); + boundHosts.add(host); + boundHosts.add(new HttpHost("[::1]", port)); + Map> attributes = new HashMap<>(); + attributes.put("dummy", singletonList("everyone_has_me")); + attributes.put("number", singletonList(name.substring(1))); + attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); + return new Node(host, boundHosts, name, version, new Node.Roles(new TreeSet<>(roles)), attributes); + } + +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java new file mode 100644 index 000000000..a7694eac2 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/ElasticsearchNodesSnifferTests.java @@ -0,0 +1,378 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Response; +import co.elastic.clients.transport.rest5_client.low_level.ResponseException; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.StringWriter; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +public class ElasticsearchNodesSnifferTests extends RestClientTestCase { + + private static int sniffRequestTimeout; + private static ElasticsearchNodesSniffer.Scheme scheme; + private static SniffResponse sniffResponse; + private static HttpServer httpServer; + + @BeforeAll + public static void startHttpServer() throws IOException { + sniffRequestTimeout = randomIntBetween(1000, 10000); + scheme = randomFrom(ElasticsearchNodesSniffer.Scheme.values()); + if (rarely()) { + sniffResponse = SniffResponse.buildFailure(); + } else { + sniffResponse = buildSniffResponse(scheme); + } + httpServer = createHttpServer(sniffResponse, sniffRequestTimeout); + httpServer.start(); + } + + @AfterAll + public static void stopHttpServer() throws IOException { + httpServer.stop(0); + } + + @Test + public void testConstructorValidation() throws IOException { + try { + new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("restClient cannot be null", e.getMessage()); + } + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + try (Rest5Client restClient = Rest5Client.builder(httpHost).build()) { + try { + new ElasticsearchNodesSniffer(restClient, 1, null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + try { + new ElasticsearchNodesSniffer( + restClient, + randomIntBetween(Integer.MIN_VALUE, 0), + ElasticsearchNodesSniffer.Scheme.HTTP + ); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); + } + } + } + + @Test + public void testSniffNodes() throws IOException { + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + try (Rest5Client restClient = Rest5Client.builder(httpHost).build()) { + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme); + try { + List sniffedNodes = sniffer.sniff(); + if (sniffResponse.isFailure) { + fail("sniffNodes should have failed"); + } + assertEquals(sniffResponse.result, sniffedNodes); + } catch (ResponseException e) { + Response response = e.getResponse(); + if (sniffResponse.isFailure) { + final String errorPrefix = "method [GET], host [" + + httpHost + + "], URI [/_nodes/http?timeout=" + + sniffRequestTimeout + + "ms], status line [HTTP/1.1"; + assertThat(e.getMessage(), startsWith(errorPrefix)); + assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); + assertThat(response.getHost(), equalTo(httpHost)); + assertThat(response.getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat( + response.getRequestLine().toString(), + equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1") + ); + } else { + fail("sniffNodes should have succeeded: " + response.getStatusCode()); + } + } + } + } + + private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); + return httpServer; + } + + private static class ResponseHandler implements HttpHandler { + private final int sniffTimeoutMillis; + private final SniffResponse sniffResponse; + + ResponseHandler(int sniffTimeoutMillis, SniffResponse sniffResponse) { + this.sniffTimeoutMillis = sniffTimeoutMillis; + this.sniffResponse = sniffResponse; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) { + if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeoutMillis + "ms")) { + String nodesInfoBody = sniffResponse.nodesInfoBody; + httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(nodesInfoBody.getBytes(StandardCharsets.UTF_8)); + return; + } + } + } + httpExchange.sendResponseHeaders(404, 0); + httpExchange.close(); + } + } + + private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException { + int numNodes = randomIntBetween(1, 5); + List nodes = new ArrayList<>(numNodes); + JsonFactory jsonFactory = new JsonFactory(); + StringWriter writer = new StringWriter(); + JsonGenerator generator = jsonFactory.createGenerator(writer); + generator.writeStartObject(); + if (getRandom().nextBoolean()) { + generator.writeStringField("cluster_name", "elasticsearch"); + } + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeObjectFieldStart("nodes"); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAsciiLettersOfLengthBetween(5, 10); + String host = "host" + i; + int port = randomIntBetween(9200, 9299); + HttpHost publishHost = new HttpHost( scheme.toString(), host, port); + Set boundHosts = new HashSet<>(); + boundHosts.add(publishHost); + + if (randomBoolean()) { + int bound = randomIntBetween(1, 5); + for (int b = 0; b < bound; b++) { + boundHosts.add(new HttpHost(scheme.toString(), host + b, port)); + } + } + + int numAttributes = randomIntBetween(0, 5); + Map> attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + int numValues = frequently() ? 1 : randomIntBetween(2, 5); + List values = new ArrayList<>(); + for (int v = 0; v < numValues; v++) { + values.add(j + "value" + v); + } + attributes.put("attr" + j, values); + } + + final Set nodeRoles = new TreeSet<>(); + if (randomBoolean()) { + nodeRoles.add("master"); + } + if (randomBoolean()) { + nodeRoles.add("data"); + } + if (randomBoolean()) { + nodeRoles.add("data_content"); + } + if (randomBoolean()) { + nodeRoles.add("data_hot"); + } + if (randomBoolean()) { + nodeRoles.add("data_warm"); + } + if (randomBoolean()) { + nodeRoles.add("data_cold"); + } + if (randomBoolean()) { + nodeRoles.add("data_frozen"); + } + if (randomBoolean()) { + nodeRoles.add("ingest"); + } + + Node node = new Node( + publishHost, + boundHosts, + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(nodeRoles), + attributes + ); + + generator.writeObjectFieldStart(nodeId); + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + if (getRandom().nextBoolean()) { + generator.writeArrayFieldStart("bogus_array"); + generator.writeStartObject(); + generator.writeEndObject(); + generator.writeEndArray(); + } + boolean isHttpEnabled = rarely() == false; + if (isHttpEnabled) { + nodes.add(node); + generator.writeObjectFieldStart("http"); + generator.writeArrayFieldStart("bound_address"); + for (HttpHost bound : boundHosts) { + generator.writeString(bound.toHostString()); + } + generator.writeEndArray(); + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeStringField("publish_address", publishHost.toHostString()); + if (getRandom().nextBoolean()) { + generator.writeNumberField("max_content_length_in_bytes", 104857600); + } + generator.writeEndObject(); + } + + List roles = Arrays.asList( + new String[] { "master", "data", "ingest", "data_content", "data_hot", "data_warm", "data_cold", "data_frozen" } + ); + Collections.shuffle(roles, getRandom()); + generator.writeArrayFieldStart("roles"); + for (String role : roles) { + if ("master".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("master"); + } + if ("data".equals(role) && node.getRoles().hasDataRole()) { + generator.writeString("data"); + } + if ("data_content".equals(role) && node.getRoles().hasDataContentRole()) { + generator.writeString("data_content"); + } + if ("data_hot".equals(role) && node.getRoles().hasDataHotRole()) { + generator.writeString("data_hot"); + } + if ("data_warm".equals(role) && node.getRoles().hasDataWarmRole()) { + generator.writeString("data_warm"); + } + if ("data_cold".equals(role) && node.getRoles().hasDataColdRole()) { + generator.writeString("data_cold"); + } + if ("data_frozen".equals(role) && node.getRoles().hasDataFrozenRole()) { + generator.writeString("data_frozen"); + } + if ("ingest".equals(role) && node.getRoles().isIngest()) { + generator.writeString("ingest"); + } + } + generator.writeEndArray(); + + generator.writeFieldName("version"); + generator.writeString(node.getVersion()); + generator.writeFieldName("name"); + generator.writeString(node.getName()); + + if (numAttributes > 0) { + generator.writeObjectFieldStart("attributes"); + for (Map.Entry> entry : attributes.entrySet()) { + if (entry.getValue().size() == 1) { + generator.writeStringField(entry.getKey(), entry.getValue().get(0)); + } else { + for (int v = 0; v < entry.getValue().size(); v++) { + generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); + } + } + } + generator.writeEndObject(); + } + generator.writeEndObject(); + } + generator.writeEndObject(); + generator.writeEndObject(); + generator.close(); + return SniffResponse.buildResponse(writer.toString(), nodes); + } + + private static class SniffResponse { + private final String nodesInfoBody; + private final int nodesInfoResponseCode; + private final List result; + private final boolean isFailure; + + SniffResponse(String nodesInfoBody, List result, boolean isFailure) { + this.nodesInfoBody = nodesInfoBody; + this.result = result; + this.isFailure = isFailure; + if (isFailure) { + this.nodesInfoResponseCode = randomErrorResponseCode(); + } else { + this.nodesInfoResponseCode = 200; + } + } + + static SniffResponse buildFailure() { + return new SniffResponse("", Collections.emptyList(), true); + } + + static SniffResponse buildResponse(String nodesInfoBody, List nodes) { + return new SniffResponse(nodesInfoBody, nodes, false); + } + } + + private static int randomErrorResponseCode() { + return randomIntBetween(400, 599); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/MockNodesSniffer.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/MockNodesSniffer.java new file mode 100644 index 000000000..8e54410d9 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/MockNodesSniffer.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import org.apache.hc.core5.http.HttpHost; + +import java.util.Collections; +import java.util.List; + +/** + * Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc. + */ +class MockNodesSniffer implements NodesSniffer { + @Override + public List sniff() { + return Collections.singletonList(new Node(new HttpHost("localhost", 9200))); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListenerTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListenerTests.java new file mode 100644 index 000000000..9f826cae4 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SniffOnFailureListenerTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class SniffOnFailureListenerTests extends RestClientTestCase { + + @Test + public void testSetSniffer() throws Exception { + SniffOnFailureListener listener = new SniffOnFailureListener(); + + try { + listener.onFailure(null); + fail("should have failed"); + } catch (IllegalStateException e) { + assertEquals("sniffer was not set, unable to sniff on failure", e.getMessage()); + } + + try { + listener.setSniffer(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("sniffer must not be null", e.getMessage()); + } + + try (Rest5Client restClient = Rest5Client.builder(new HttpHost("localhost", 9200)).build()) { + try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) { + listener.setSniffer(sniffer); + try { + listener.setSniffer(sniffer); + fail("should have failed"); + } catch (IllegalStateException e) { + assertEquals("sniffer can only be set once", e.getMessage()); + } + listener.onFailure(new Node(new HttpHost("localhost", 9200))); + } + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilderTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilderTests.java new file mode 100644 index 000000000..3322b7f69 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferBuilderTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class SnifferBuilderTests extends RestClientTestCase { + + @Test + public void testBuild() throws Exception { + int numNodes = randomIntBetween(1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + try (Rest5Client client = Rest5Client.builder(hosts).build()) { + try { + Sniffer.builder(null).build(); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("restClient cannot be null", e.getMessage()); + } + + try { + Sniffer.builder(client).setSniffIntervalMillis(randomIntBetween(Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); + } + + try { + Sniffer.builder(client).setSniffAfterFailureDelayMillis(randomIntBetween(Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); + } + + try { + Sniffer.builder(client).setNodesSniffer(null); + fail("should have failed"); + } catch (NullPointerException e) { + assertEquals("nodesSniffer cannot be null", e.getMessage()); + } + + try (Sniffer sniffer = Sniffer.builder(client).build()) { + assertNotNull(sniffer); + } + + SnifferBuilder builder = Sniffer.builder(client); + if (getRandom().nextBoolean()) { + builder.setSniffIntervalMillis(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (getRandom().nextBoolean()) { + builder.setSniffAfterFailureDelayMillis(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (getRandom().nextBoolean()) { + builder.setNodesSniffer(new MockNodesSniffer()); + } + + try (Sniffer sniffer = builder.build()) { + assertNotNull(sniffer); + } + } + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferTests.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferTests.java new file mode 100644 index 000000000..f7d543f05 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/SnifferTests.java @@ -0,0 +1,662 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer; + +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.RestClientTestCase; +import org.apache.hc.core5.http.HttpHost; +import org.junit.jupiter.api.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class SnifferTests extends RestClientTestCase { + + /** + * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link NodesSniffer} implementation + * to retrieve nodes and set them (when not empty) to the provided {@link Rest5Client} instance. + */ + @Test + public void testSniff() throws IOException { + Node initialNode = new Node(new HttpHost("localhost", 9200)); + try (Rest5Client restClient = Rest5Client.builder(initialNode).build()) { + Sniffer.Scheduler noOpScheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); + int iters = randomIntBetween(5, 30); + try (Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 1000L, -1)) { + { + assertEquals(1, restClient.getNodes().size()); + Node node = restClient.getNodes().get(0); + assertEquals("localhost", node.getHost().getHostName()); + assertEquals(9200, node.getHost().getPort()); + } + int emptyList = 0; + int failures = 0; + int runs = 0; + List lastNodes = Collections.singletonList(initialNode); + for (int i = 0; i < iters; i++) { + try { + runs++; + sniffer.sniff(); + if (nodesSniffer.failures.get() > failures) { + failures++; + fail("should have failed given that nodesSniffer says it threw an exception"); + } else if (nodesSniffer.emptyList.get() > emptyList) { + emptyList++; + assertEquals(lastNodes, restClient.getNodes()); + } else { + assertNotEquals(lastNodes, restClient.getNodes()); + List expectedNodes = CountingNodesSniffer.buildNodes(runs); + assertEquals(expectedNodes, restClient.getNodes()); + lastNodes = restClient.getNodes(); + } + } catch (IOException e) { + if (nodesSniffer.failures.get() > failures) { + failures++; + assertEquals("communication breakdown", e.getMessage()); + } + } + } + assertEquals(nodesSniffer.emptyList.get(), emptyList); + assertEquals(nodesSniffer.failures.get(), failures); + assertEquals(nodesSniffer.runs.get(), runs); + } + } + } + + /** + * Test multiple sniffing rounds by mocking the {@link Sniffer.Scheduler} as well as the {@link NodesSniffer}. + * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. + * The {@link CountingNodesSniffer} doesn't make any network connection but may throw exception or return no nodes, which makes + * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. + * The {@link Sniffer.Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the + * requested sniff delays while allowing to assert that the requested delays for each requested run and the following one are the + * expected values. + */ + @Test + public void testOrdinarySniffRounds() throws Exception { + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + Rest5Client restClient = mock(Rest5Client.class); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); + final int iters = randomIntBetween(30, 100); + final Set> futures = new CopyOnWriteArraySet<>(); + final CountDownLatch completionLatch = new CountDownLatch(1); + final AtomicInteger runs = new AtomicInteger(iters); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + final AtomicReference> lastFuture = new AtomicReference<>(); + final AtomicReference lastTask = new AtomicReference<>(); + Sniffer.Scheduler scheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + assertEquals(sniffInterval, task.nextTaskDelay); + int numberOfRuns = runs.getAndDecrement(); + if (numberOfRuns == iters) { + // the first call is to schedule the first sniff round from the Sniffer constructor, with delay O + assertEquals(0L, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + } else { + // all of the subsequent times "schedule" is called with delay set to the configured sniff interval + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + if (numberOfRuns == 0) { + completionLatch.countDown(); + return null; + } + } + // we submit rather than scheduling to make the test quick and not depend on time + Future future = executor.submit(task); + futures.add(future); + if (numberOfRuns == 1) { + lastFuture.set(future); + lastTask.set(task); + } + return future; + } + + @Override + public void shutdown() { + // the executor is closed externally, shutdown is tested separately + } + }; + try { + new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertEquals(iters, futures.size()); + // the last future is the only one that may not be completed yet, as the count down happens + // while scheduling the next round which is still part of the execution of the runnable itself. + assertTrue(lastTask.get().hasStarted()); + lastFuture.get().get(); + for (Future future : futures) { + assertTrue(future.isDone()); + future.get(); + } + } finally { + executor.shutdown(); + assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); + } + int totalRuns = nodesSniffer.runs.get(); + assertEquals(iters, totalRuns); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollection()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test that {@link Sniffer#close()} shuts down the underlying {@link Sniffer.Scheduler}, and that such calls are idempotent. + * Also verifies that the next scheduled round gets cancelled. + */ + @Test + public void testClose() { + final Future future = mock(Future.class); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + Rest5Client restClient = mock(Rest5Client.class); + final AtomicInteger shutdown = new AtomicInteger(0); + final AtomicBoolean initialized = new AtomicBoolean(false); + Sniffer.Scheduler scheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + if (initialized.compareAndSet(false, true)) { + // run from the same thread so the sniffer gets for sure initialized and the scheduled task gets cancelled on close + task.run(); + } + return future; + } + + @Override + public void shutdown() { + shutdown.incrementAndGet(); + } + }; + + Sniffer sniffer = new Sniffer(restClient, new MockNodesSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); + assertEquals(0, shutdown.get()); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + sniffer.close(); + verify(future, times(i)).cancel(false); + assertEquals(i, shutdown.get()); + } + } + + @Test + public void testSniffOnFailureNotInitialized() { + Rest5Client restClient = mock(Rest5Client.class); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + final AtomicInteger scheduleCalls = new AtomicInteger(0); + Sniffer.Scheduler scheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + scheduleCalls.incrementAndGet(); + return null; + } + + @Override + public void shutdown() {} + }; + + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + for (int i = 0; i < 10; i++) { + sniffer.sniffOnFailure(); + } + assertEquals(1, scheduleCalls.get()); + int totalRuns = nodesSniffer.runs.get(); + assertEquals(0, totalRuns); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollection()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always + * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round + * (either afterFailure or ordinary) gets cancelled. + */ + @Test + public void testSniffOnFailure() throws Exception { + Rest5Client restClient = mock(Rest5Client.class); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); + final AtomicBoolean initializing = new AtomicBoolean(true); + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + int minNumOnFailureRounds = randomIntBetween(5, 10); + final CountDownLatch initializingLatch = new CountDownLatch(1); + final Set ordinaryRoundsTasks = new CopyOnWriteArraySet<>(); + final AtomicReference> initializingFuture = new AtomicReference<>(); + final Set onFailureTasks = new CopyOnWriteArraySet<>(); + final Set afterFailureTasks = new CopyOnWriteArraySet<>(); + final AtomicBoolean onFailureCompleted = new AtomicBoolean(false); + final CountDownLatch completionLatch = new CountDownLatch(1); + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + Sniffer.Scheduler scheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(final Sniffer.Task task, long delayMillis) { + if (initializing.compareAndSet(true, false)) { + assertEquals(0L, delayMillis); + Future future = executor.submit(new Runnable() { + @Override + public void run() { + try { + task.run(); + } finally { + // we need to make sure that the sniffer is initialized, so the sniffOnFailure + // call does what it needs to do. Otherwise nothing happens until initialized. + initializingLatch.countDown(); + } + } + }); + assertTrue(initializingFuture.compareAndSet(null, future)); + return future; + } + if (delayMillis == 0L) { + Future future = executor.submit(task); + onFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + if (delayMillis == sniffAfterFailureDelay) { + Future future = scheduleOrSubmit(task); + afterFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + + if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) { + completionLatch.countDown(); + return mock(Future.class); + } + + Future future = scheduleOrSubmit(task); + ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + private Future scheduleOrSubmit(Sniffer.Task task) { + if (randomBoolean()) { + return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + return executor.submit(task); + } + } + + @Override + public void shutdown() {} + }; + final Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); + + ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); + Set> onFailureFutures = new CopyOnWriteArraySet<>(); + try { + // with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped + // as another round is already running. We retry till enough runs get through as that's what we want to test. + while (onFailureTasks.size() < minNumOnFailureRounds) { + onFailureFutures.add(onFailureExecutor.submit(new Runnable() { + @Override + public void run() { + sniffer.sniffOnFailure(); + } + })); + } + assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + for (Future onFailureFuture : onFailureFutures) { + assertNull(onFailureFuture.get()); + } + onFailureCompleted.set(true); + } finally { + onFailureExecutor.shutdown(); + onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + + assertFalse(initializingFuture.get().isCancelled()); + assertTrue(initializingFuture.get().isDone()); + assertNull(initializingFuture.get().get()); + + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + assertEquals(onFailureTasks.size(), afterFailureTasks.size()); + + for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) { + assertFalse(onFailureTask.future.isCancelled()); + assertTrue(onFailureTask.future.isDone()); + assertNull(onFailureTask.future.get()); + assertTrue(onFailureTask.task.hasStarted()); + assertFalse(onFailureTask.task.isSkipped()); + } + + int cancelledTasks = 0; + int completedTasks = onFailureTasks.size() + 1; + for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) { + if (assertTaskCancelledOrCompleted(afterFailureTask)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + + assertThat(ordinaryRoundsTasks.size(), greaterThan(0)); + for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) { + if (assertTaskCancelledOrCompleted(task)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + assertEquals(onFailureTasks.size(), cancelledTasks); + + assertEquals(completedTasks, nodesSniffer.runs.get()); + int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollection()); + verifyNoMoreInteractions(restClient); + } finally { + executor.shutdown(); + executor.awaitTermination(1000L, TimeUnit.MILLISECONDS); + } + } + + private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task) throws ExecutionException, InterruptedException { + if (task.task.isSkipped()) { + assertTrue(task.future.isCancelled()); + try { + task.future.get(); + fail("cancellation exception should have been thrown"); + } catch (CancellationException ignore) {} + return false; + } else { + try { + assertNull(task.future.get()); + } catch (CancellationException ignore) { + assertTrue(task.future.isCancelled()); + } + assertTrue(task.future.isDone()); + assertTrue(task.task.hasStarted()); + return true; + } + } + + @Test + public void testTaskCancelling() throws Exception { + Rest5Client restClient = mock(Rest5Client.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); + Sniffer.Scheduler noOpScheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return null; + } + + @Override + public void shutdown() {} + }; + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + int numIters = randomIntBetween(50, 100); + for (int i = 0; i < numIters; i++) { + Sniffer.Task task = sniffer.new Task(0L); + TaskWrapper wrapper = new TaskWrapper(task); + Future future; + if (rarely()) { + future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + future = executor.submit(wrapper); + } + Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future); + boolean skip = scheduledTask.skip(); + try { + assertNull(future.get()); + } catch (CancellationException ignore) { + assertTrue(future.isCancelled()); + } + + if (skip) { + // the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel), + // or skipped, in which case it will run but do nothing (thanks to Task#skip). + // Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything, + // otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to + // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on. + assertFalse(task.hasStarted()); + assertTrue(task.isSkipped()); + assertTrue(future.isCancelled()); + assertTrue(future.isDone()); + } else { + // if a future is cancelled when its execution has already started, future#get throws CancellationException before + // completion. The execution continues though so we use a latch to try and wait for the task to be completed. + // Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be + // missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing. + assertTrue(wrapper.await()); + // the future may or may not be cancelled but the task has for sure started and completed + assertTrue(task.toString(), task.hasStarted()); + assertFalse(task.isSkipped()); + assertTrue(future.isDone()); + } + // subsequent cancel calls return false for sure + int cancelCalls = randomIntBetween(1, 10); + for (int j = 0; j < cancelCalls; j++) { + assertFalse(scheduledTask.skip()); + } + } + } finally { + executor.shutdown(); + executor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Wraps a {@link Sniffer.Task} and allows to wait for its completion. This is needed to verify + * that tasks are either never started or always completed. Calling {@link Future#get()} against a cancelled future will + * throw {@link CancellationException} straight-away but the execution of the task will continue if it had already started, + * in which case {@link Future#cancel(boolean)} returns true which is not very helpful. + */ + private static final class TaskWrapper implements Runnable { + final Sniffer.Task task; + final CountDownLatch completionLatch = new CountDownLatch(1); + + TaskWrapper(Sniffer.Task task) { + this.task = task; + } + + @Override + public void run() { + try { + task.run(); + } finally { + completionLatch.countDown(); + } + } + + boolean await() throws InterruptedException { + return completionLatch.await(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Mock {@link NodesSniffer} implementation used for testing, which most of the times return a fixed node. + * It rarely throws exception or return an empty list of nodes, to make sure that such situations are properly handled. + * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run + * at a given point in time. + */ + private static class CountingNodesSniffer implements NodesSniffer { + private final AtomicInteger runs = new AtomicInteger(0); + private final AtomicInteger failures = new AtomicInteger(0); + private final AtomicInteger emptyList = new AtomicInteger(0); + + @Override + public List sniff() throws IOException { + int run = runs.incrementAndGet(); + if (rarely()) { + failures.incrementAndGet(); + // check that if communication breaks, sniffer keeps on working + throw new IOException("communication breakdown"); + } + if (rarely()) { + emptyList.incrementAndGet(); + return Collections.emptyList(); + } + return buildNodes(run); + } + + private static List buildNodes(int run) { + int size = run % 5 + 1; + assert size > 0; + List nodes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + nodes.add(new Node(new HttpHost("sniffed-" + run, 9200 + i))); + } + return nodes; + } + } + + @Test + public void testDefaultSchedulerSchedule() { + Rest5Client restClient = mock(Rest5Client.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); + Sniffer.Scheduler noOpScheduler = new Sniffer.Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); + Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); + + ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); + final ScheduledFuture mockedFuture = mock(ScheduledFuture.class); + when(scheduledExecutorService.schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class))).then( + new Answer>() { + @Override + public ScheduledFuture answer(InvocationOnMock invocationOnMock) { + return mockedFuture; + } + } + ); + Sniffer.DefaultScheduler scheduler = new Sniffer.DefaultScheduler(scheduledExecutorService); + long delay = randomLongBetween(1, Long.MAX_VALUE); + Future future = scheduler.schedule(task, delay); + assertSame(mockedFuture, future); + verify(scheduledExecutorService).schedule(task, delay, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(scheduledExecutorService, mockedFuture); + } + + @Test + public void testDefaultSchedulerThreadFactory() { + Sniffer.DefaultScheduler defaultScheduler = new Sniffer.DefaultScheduler(); + try { + ScheduledExecutorService executorService = defaultScheduler.executor; + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) executorService; + assertTrue(executor.getRemoveOnCancelPolicy()); + assertFalse(executor.getContinueExistingPeriodicTasksAfterShutdownPolicy()); + assertTrue(executor.getExecuteExistingDelayedTasksAfterShutdownPolicy()); + assertThat(executor.getThreadFactory(), instanceOf(Sniffer.SnifferThreadFactory.class)); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + Thread thread = executor.getThreadFactory().newThread(new Runnable() { + @Override + public void run() { + + } + }); + assertThat(thread.getName(), equalTo("es_rest_client_sniffer[T#" + i + "]")); + assertThat(thread.isDaemon(), is(true)); + } + } finally { + defaultScheduler.shutdown(); + } + } + + @Test + public void testDefaultSchedulerShutdown() throws Exception { + ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class); + Sniffer.DefaultScheduler defaultScheduler = new Sniffer.DefaultScheduler(executor); + defaultScheduler.shutdown(); + verify(executor).shutdown(); + verify(executor).awaitTermination(1000, TimeUnit.MILLISECONDS); + verify(executor).shutdownNow(); + verifyNoMoreInteractions(executor); + + when(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)).thenReturn(true); + defaultScheduler.shutdown(); + verify(executor, times(2)).shutdown(); + verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(executor); + } +} diff --git a/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/documentation/SnifferDocumentation.java b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/documentation/SnifferDocumentation.java new file mode 100644 index 000000000..995d16670 --- /dev/null +++ b/java-client/src/test/java/co/elastic/clients/transport/rest5_client/low_level/sniffer/documentation/SnifferDocumentation.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.transport.rest5_client.low_level.sniffer.documentation; + +import org.apache.hc.core5.http.HttpHost; +import co.elastic.clients.transport.rest5_client.low_level.Node; +import co.elastic.clients.transport.rest5_client.low_level.Rest5Client; +import co.elastic.clients.transport.rest5_client.low_level.sniffer.ElasticsearchNodesSniffer; +import co.elastic.clients.transport.rest5_client.low_level.sniffer.NodesSniffer; +import co.elastic.clients.transport.rest5_client.low_level.sniffer.SniffOnFailureListener; +import co.elastic.clients.transport.rest5_client.low_level.sniffer.Sniffer; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * This class is used to generate the Java low-level REST client documentation. + * You need to wrap your code between two tags like: + * // tag::example[] + * // end::example[] + * + * Where example is your tag name. + * + * Then in the documentation, you can extract what is between tag and end tags with + * ["source","java",subs="attributes,callouts,macros"] + * -------------------------------------------------- + * include-tagged::{doc-tests}/SnifferDocumentation.java[example] + * -------------------------------------------------- + * + * Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want + * to send requests to a node and we don't even have the tools to do it. + */ +@SuppressWarnings("unused") +public class SnifferDocumentation { + + @SuppressWarnings("unused") + public void usage() throws IOException, URISyntaxException { + { + //tag::sniffer-init + Rest5Client restClient = Rest5Client.builder( + HttpHost.create("http://localhost:9200")) + .build(); + Sniffer sniffer = Sniffer.builder(restClient).build(); + //end::sniffer-init + + //tag::sniffer-close + sniffer.close(); + restClient.close(); + //end::sniffer-close + } + { + //tag::sniffer-interval + Rest5Client restClient = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .build(); + Sniffer sniffer = Sniffer.builder(restClient) + .setSniffIntervalMillis(60000).build(); + //end::sniffer-interval + } + { + //tag::sniff-on-failure + SniffOnFailureListener sniffOnFailureListener = + new SniffOnFailureListener(); + Rest5Client restClient = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .setFailureListener(sniffOnFailureListener) // <1> + .build(); + Sniffer sniffer = Sniffer.builder(restClient) + .setSniffAfterFailureDelayMillis(30000) // <2> + .build(); + sniffOnFailureListener.setSniffer(sniffer); // <3> + //end::sniff-on-failure + } + { + //tag::sniffer-https + Rest5Client restClient = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .build(); + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( + restClient, + ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, + ElasticsearchNodesSniffer.Scheme.HTTPS); + Sniffer sniffer = Sniffer.builder(restClient) + .setNodesSniffer(nodesSniffer).build(); + //end::sniffer-https + } + { + //tag::sniff-request-timeout + Rest5Client restClient = Rest5Client.builder( + new HttpHost("localhost", 9200)) + .build(); + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( + restClient, + TimeUnit.SECONDS.toMillis(5), + ElasticsearchNodesSniffer.Scheme.HTTP); + Sniffer sniffer = Sniffer.builder(restClient) + .setNodesSniffer(nodesSniffer).build(); + //end::sniff-request-timeout + } + { + //tag::custom-nodes-sniffer + Rest5Client restClient = Rest5Client.builder( + HttpHost.create("http://localhost:9200")) + .build(); + NodesSniffer nodesSniffer = new NodesSniffer() { + @Override + public List sniff() throws IOException { + return null; // <1> + } + }; + Sniffer sniffer = Sniffer.builder(restClient) + .setNodesSniffer(nodesSniffer).build(); + //end::custom-nodes-sniffer + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/2.0.0_nodes_http.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/2.0.0_nodes_http.json new file mode 100644 index 000000000..22dc4ec13 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/2.0.0_nodes_http.json @@ -0,0 +1,201 @@ +{ + "cluster_name": "elasticsearch", + "nodes": { + "qr-SOrELSaGW8SlU8nflBw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9200", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "data": "false", + "array.1": "1", + "master": "true" + }, + "http": { + "bound_address": [ + "127.0.0.1:9200", + "[::1]:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + }, + "osfiXxUOQzCVIs-eepgSCA": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9201", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2", + "master": "true" + }, + "http": { + "bound_address": [ + "127.0.0.1:9201", + "[::1]:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 + } + }, + "lazeJFiIQ8eHHV4GeIdMPg": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9202", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "data": "false", + "array.1": "3", + "master": "true" + }, + "http": { + "bound_address": [ + "127.0.0.1:9202", + "[::1]:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "t9WxK-fNRsqV5G0Mm09KpQ": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9203", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9203", + "[::1]:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "wgoDzluvTViwUjEsmVesKw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9204", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9204", + "[::1]:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "6j_t3pPhSm-oRTyypTzu5g": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9205", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9205", + "[::1]:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "PaEkm0z7Ssiuyfkh3aASag": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9206", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "data": "false", + "array.1": "1", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9206", + "[::1]:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "LAFKr2K_QmupqnM_atJqkQ": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9207", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "data": "false", + "array.1": "2", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9207", + "[::1]:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/5.0.0_nodes_http.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/5.0.0_nodes_http.json new file mode 100644 index 000000000..135843823 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/5.0.0_nodes_http.json @@ -0,0 +1,217 @@ +{ + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, + "cluster_name": "elasticsearch", + "nodes": { + "0S4r3NurTYSFSb8R9SxwWA": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + }, + "k_CBrMXARkS57Qb5-3Mw5g": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "master", + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 + } + }, + "6eynRPQ1RleJTeGDuTR9mw": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "cbGC-ay1QNWaESvEh5513w": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "LexndPpXR2ytYsU5fTElnQ": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "SbNG1DKYSBu20zfOz2gDZQ": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "fM4H-m2WTDWmsGsL7jIJew": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "pFoh7d0BTbqqI3HKd9na5A": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/6.0.0_nodes_http.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/6.0.0_nodes_http.json new file mode 100644 index 000000000..f0535dfdf --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/6.0.0_nodes_http.json @@ -0,0 +1,217 @@ +{ + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + }, + "TMHa34w4RqeuYoHCfJGXZg": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 + } + }, + "lzaMRJTVT166sgVZdQ5thA": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "tGP5sUecSd6BLTWk1NWF8Q": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "c1UgW5ROTkSa2YnM_T56tw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "QM9yjqjmS72MstpNYV_trg": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "wLtzAssoQYeX_4TstgCj0Q": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "ONOzpst8TH-ZebG7fxGwaA": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/7.3.0_nodes_http.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/7.3.0_nodes_http.json new file mode 100644 index 000000000..893aa4885 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/7.3.0_nodes_http.json @@ -0,0 +1,218 @@ +{ + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + }, + "TMHa34w4RqeuYoHCfJGXZg": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 + } + }, + "lzaMRJTVT166sgVZdQ5thA": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "tGP5sUecSd6BLTWk1NWF8Q": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest", + "ml" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "c1UgW5ROTkSa2YnM_T56tw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "QM9yjqjmS72MstpNYV_trg": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "wLtzAssoQYeX_4TstgCj0Q": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "ONOzpst8TH-ZebG7fxGwaA": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "7.3.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/create_test_nodes_info.bash b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/create_test_nodes_info.bash new file mode 100644 index 000000000..aa20ddffc --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/create_test_nodes_info.bash @@ -0,0 +1,107 @@ +#!/bin/bash + +# Recreates the v_nodes_http.json files in this directory. This is +# meant to be an "every once in a while" thing that we do only when +# we want to add a new version of Elasticsearch or configure the +# nodes differently. That is why we don't do this in gradle. It also +# allows us to play fast and loose with error handling. If something +# goes wrong you have to manually clean up which is good because it +# leaves around the kinds of things that we need to debug the failure. + +# I built this file so the next time I have to regenerate these +# v_nodes_http.json files I won't have to reconfigure Elasticsearch +# from scratch. While I was at it I took the time to make sure that +# when we do rebuild the files they don't jump around too much. That +# way the diffs are smaller. + +set -e + +script_path="$( cd "$(dirname "$0")" ; pwd -P )" +work=$(mktemp -d) +pushd ${work} >> /dev/null +echo Working in ${work} + +wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz +wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz +wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz +sha1sum -c - << __SHAs +e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz +d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz +__SHAs +sha512sum -c - << __SHAs +25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +__SHAs + + +function do_version() { + local version=$1 + local nodes='m1 m2 m3 d1 d2 d3 c1 c2' + rm -rf ${version} + mkdir -p ${version} + pushd ${version} >> /dev/null + + tar xf ../elasticsearch-${version}.tar.gz + local http_port=9200 + for node in ${nodes}; do + mkdir ${node} + cp -r elasticsearch-${version}/* ${node} + local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) + local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) + # m2 is always master and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) + local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local transport_port=$((http_port+100)) + + cat >> ${node}/config/elasticsearch.yml << __ES_YML +node.name: ${node} +node.master: ${master} +node.data: ${data} +node${attr}.dummy: everyone_has_me +node${attr}.number: ${node:1} +node${attr}.array: [${node:0:1}, ${node:1}] +http.port: ${http_port} +transport.tcp.port: ${transport_port} +discovery.zen.minimum_master_nodes: 3 +discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__ES_YML + + if [ ${version} != '2.0.0' ]; then + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options + fi + + echo "starting ${version}/${node}..." + ${node}/bin/elasticsearch -d -p ${node}/pidfile + + ((http_port++)) + done + + echo "waiting for cluster to form" + # got to wait for all the nodes + until curl -s localhost:9200; do + sleep .25 + done + + echo "waiting for all nodes to join" + until [ $(echo ${nodes} | wc -w) -eq $(curl -s localhost:9200/_cat/nodes | wc -l) ]; do + sleep .25 + done + + # jq sorts the nodes by their http host so the file doesn't jump around when we regenerate it + curl -s localhost:9200/_nodes/http?pretty \ + | jq '[to_entries[] | ( select(.key == "nodes").value|to_entries|sort_by(.value.http.publish_address)|from_entries|{"key": "nodes", "value": .} ) // .] | from_entries' \ + > ${script_path}/${version}_nodes_http.json + + for node in ${nodes}; do + echo "stopping ${version}/${node}..." + kill $(cat ${node}/pidfile) + done + + popd >> /dev/null +} + +JAVA_HOME=$JAVA8_HOME do_version 2.0.0 +JAVA_HOME=$JAVA8_HOME do_version 5.0.0 +JAVA_HOME=$JAVA8_HOME do_version 6.0.0 + +popd >> /dev/null +rm -rf ${work} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es6_nodes_publication_address_format.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es6_nodes_publication_address_format.json new file mode 100644 index 000000000..7ded043b8 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es6_nodes_publication_address_format.json @@ -0,0 +1,30 @@ +{ + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { }, + "http": { + "bound_address": [ + "127.0.0.1:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es7_nodes_publication_address_format.json b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es7_nodes_publication_address_format.json new file mode 100644 index 000000000..295bf3cbd --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/es7_nodes_publication_address_format.json @@ -0,0 +1,30 @@ +{ + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { }, + "http": { + "bound_address": [ + "elastic.test:9200" + ], + "publish_address": "elastic.test/127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + } + } +} diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/readme.txt b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/readme.txt new file mode 100644 index 000000000..c6dd32a04 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/sniffer/readme.txt @@ -0,0 +1,6 @@ +`*_node_http.json` contains files created by spinning up toy clusters with a +few nodes in different configurations locally at various versions. They are +for testing `ElasticsearchNodesSniffer` against different versions of +Elasticsearch. + +See create_test_nodes_info.bash for how to create these. diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.crt b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.crt new file mode 100644 index 000000000..b5edfcfa3 --- /dev/null +++ b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEATCCAumgAwIBAgIEObhDZDANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDU1vdW50YWluIFZpZXcxEDAOBgNVBAoT +B2VsYXN0aWMxDTALBgNVBAsTBHRlc3QxEjAQBgNVBAMTCXRlc3Qgbm9kZTAeFw0x +NzA3MTcxNjEyNTZaFw0yNzA3MTUxNjEyNTZaMGcxCzAJBgNVBAYTAlVTMQswCQYD +VQQIEwJDQTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4GA1UEChMHZWxhc3Rp +YzENMAsGA1UECxMEdGVzdDESMBAGA1UEAxMJdGVzdCBub2RlMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnXtuGIgAq6vWzUD34HXkYF+0u103hb8d1h35 +kjeuNApkUhS6x/VbuNp7TpWmprfDgG5w9TourHvyiqcQMDEWrBunS6rmKo1jK1Wm +le3qA3F2l9VIZSNeeYQgezmzuElEPPmBjN8XBByIWKYjZcGd5u7DiquPUh9QLIev +itgB2jfi9D8ewyvaSbVAQuQwyIaDN9L74wKyMC8EuzzAWNSDjgIhhwcR5qg17msa +ItyM44/3hik+ObIGpMlLSxQu2V1U9bOaq48JjQBLHVg1vzC9VzGuNdEb8haFnhJN +UrdESdHymbtBSUvy30iB+kHq5R8wQ4pC+WxChQnbA2GskuFrMQIDAQABo4G0MIGx +MIGPBgNVHREEgYcwgYSHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAGCCWxvY2FsaG9z +dIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2NhbGhvc3Q0 +LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRvbWFp +bjYwHQYDVR0OBBYEFFwNcqIKfGBCBGo9faQJ3TsHmp0SMA0GCSqGSIb3DQEBCwUA +A4IBAQBvUJTRjSOf/+vtyS3OokwRilg1ZGF3psg0DWhjH2ehIRfNibU1Y8FVQo3I +VU8LjcIUK1cN85z+AsYqLXo/C4qmJPydQ1tGpQL7uIrPD4h+Xh3tY6A2DKRJRQFO +w2LjswPidGufMztpPbXxLREqvkvn80VkDnc44UPxYfHvZFqYwYyxZccA5mm+BhYu +IerjfvgX+8zMWIQZOd+jRq8EaVTmVK2Azwwhc5ImWfc0DA3pmGPdECzE4N0VVoIJ +N8PCVltXXP3F7K3LoT6CLSiJ3c/IDVNoVS4pRV6R6Y4oIKD9T/T1kAgAvOrUGRWY +ejWQ41GdUmkmxrqCaMbVCO4s72BC +-----END CERTIFICATE----- diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.der b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/test.der new file mode 100644 index 0000000000000000000000000000000000000000..454bfd286bd97937020cb8d1535c7cb3eec43557 GIT binary patch literal 1218 zcmV;z1U>sOf&{(-0RS)!1_>&LNQUrs4#*Aqyhl|0)hbn0G)eo7>EF? ztJcjx_uzHpU|+PmT{nfl9o8NBk~gk23S?3gy2tffxY~P8m8Pb*!+>sZ^*S!Bd-95> z5HK+otQ)6Gs^%(XizNwq*i zg`N^kQny4&(ejzQK}k#U-$;S_LF(lnFhhz$`D{Xk3EKlajxI^3zY*DMf42|U+NcycGN5Ii(&fd7Vo1LaD{Jj zk^&xUtM1`nB%&;_e1ejavA5F)$JNV-kx=7#3=1 zNR_IxrXGQvev4;>7K$A&AgJXrpFZ9tBu-XU3Uf%X^y(KqMsL0Z-C#fGDTJnM2BQ?+ zKE>d@a-CM0qU0BWzEYKKBGDEJ^dka+fdITy>*NbhtzpqD5?=qK)<@mW-;493$$ORU z8{^b}t5lA~o?5U70h1-GV?nHhZXY@>$0xT2_4l9h5S-b#TRao!cqSaPrDKxeqJOjo zml{5yQNEYHnnJMOUHUBmo4Q=cM1qpnio#8Uxx~|?($N?p><9S#EFe*7KU@iKo|^)J zfdGPA6Co{I{$ZaM#M7<2E=U`i@W9cZVz3Rl(n@%p zC!K#o3d*d6FqrbI!Bq4gl1aW28R!`EhPQJRp7x7Sbl81yUf$@cT zR|0{70Hk{CRiE_&-^6Tw?fk}J`EyuV91q}E=;#|V)sX2+oC7H>FyawDL5pH3YlGNf7Lm>kJ z5SLk)6U0@>%TGx)kQ3)MG&e9eG&eLeG%_`d66ZAnaZRCI`Z=VDQ3*K^7+D#Zn;7{S z44N3Zn3@9EiBm+{dKY&Mx?Dh3UI&DWy9-kNdy3Y1&zCVq0?3+gn8M zW^BmU&9hebbM`WL%SM>)H`p6)xX$#V^e3^_c|yKH+g&^_eww-4(bN0WeUHXpj<24| z8#s44{mgM{<-Ekjav*FHySkd1LILvkfpKRps}T$r5>2Z+690q z4;e5vapvSFC+1}27nd}N!kKztR!V*@FxfP5!G%ql#1U*{H6}=k%#akBAuBR7kOeu2 zk420{B!;(W5m!xu6HAtD?Gnzr*6g$93L!@ePE8)StWekcf6T@8>!Hn2u@bR@rlREmQe3 z*E!m4Dbw#AUFZ2b>f&VICND7KS>ox+=yy2j@n+^nCE4?htut-6e$*Az+UNcJvug@p zxy3{0kBJ}Or$o&-*t0S9IK#8deQaWSimx8m{Sg0s=1fG3q~-m^ZtGbxL!O1KZ8*=P zSUgEB^1BHS@5>phh28)F literal 0 HcmV?d00001 diff --git a/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/testks.jks b/java-client/src/test/resources/co/elastic/clients/transport/rest5_client/low_level/testks.jks new file mode 100644 index 0000000000000000000000000000000000000000..cd706db5e3834f4b44493530bca17697f32d8de2 GIT binary patch literal 2381 zcmcgt`9IYA8vlOB80%QF%P^K4yYI*ll4T;A?8cHMVh&T*Y{!}zYjfo2&U*Xwz`-tYJ4{loKmp6B&EKURON0ssIz4)Cv__YA&G z3_fNKX6Ang002DBTG4J7uREh5mJ7*;DV;dAlA=s5+KPl4wULQJdoL8~Pe7D3^*Cpn+O9sr03 zISo5KN{!m#CIL0)yz>t}H?Dji7ewQeMTpah_;lIM9lfx^{MI46EY=H2Gb87zSV!L9 zBNwGc)hcRNP-{ZbigjTAb+b#asOW1;eDvzR6aB?nZ0b51o)HL=(P6?em8T=Dtimf@ zkw!zNEH-cfeLsno#O=RbxSvAmmq_5eGP;=CTVxI0y~COhkJiND1UdLS2Ol7#CcE^d zL>{Nth-L14Z4j+X`yfibe=ag4rVlxJp#US7Q|y@QX^G~`3fl?pyl5Y^R8rIQWW4Id zQ{z=r`vr-qyIpkx)5AVH*xUo_`4Nx4b(N?wT{OYbRG2-rcbCh$ubPg4`&+@r5D zzPUX@zCiejz)?}CTe3&^u7rVdbjjY6C5l@V4|cw6NVPj+>;MVdCH6G z;*UbVqziZ{8{q|$yRDA6AbU?YM#cWH7~gF>>!8r(I*ID&@IdyK9Vh)mhm9(9QprF* zTcmmB5ofTGhE^wgc6e}~gaOj0+F~@7M8SRe&>AqUy`aoNh?gL%tOLG=Z`F!b~nE6fm|g zn~sp%GU*wT?a;|Lx{14y5rz@Zi*Ei`1^Asfnk+f1QoG!rBKWDm!N)n4tjm$w;#(Ms z*y4S9lR%xR>-qfN9B<|yeOK)-bH747=ZNpA3?IQ6RWFHIE8SgO`efbu{fcV~?x(fP{fMBSHJgUi@HE~B!hGqp zOO=HE;j=bw>XQc_Lz$N7+B!oz5O}9z4>`!sl^^yX__= zt-G_fDHBHnrXd?sZVgtfT~YRfOVPRZGOfh@@i4+lcQw=8yf9tQpirKyrJ1>K}mXUYH+n6o>({TY+BG7mQ%>U&6=* z!|7mnA?}kO1}>zLcYv3_Gsz2WMIr`aIF6U>vChoJK=g3-^C!7rkPyp>!omgjC;Itg z&OjU|l#Yw(grdE?U5St|9}=SqsbW+yDi|fK%5^@Z(h0@>hyMQ(p@3qii*S4+=qMmN z08v0@7zG4@>`*WMc%Za&v|oF7I$+-EMoqJWYD_z76t$P8`b?SG)ttMjYum12Jk&V7 zsHmP6>E*Mnu22@bNiF7pFuY}Fi}583GKns-grfA-Wjf!$M+>b?2e(*eKQE{W(|_ zqzG@m_}u1gR1!=yj)8OGH|*MNL9vesbIH50(wEe#5QVStcy5LF4wl>1k4ux85&=BQ z2HOs`T4SDJht6*D#{Bb)k$L@P!=cU0W*tNP=9B^XyUy|_5eP@s-Sg0mVfsMlT$VO- z8v~;Q0kD8v1Cc8sa`JH>QJipc97K+egWo;Aws9QCLw5Rs6ebUE7iSN5Z$Ezu&)-nt zFXZa&c|0~0=D%fS3g7?mX^-+LlgcTR%4w4dggQxx4bBVawnyH{XAX1Hh2PQ)FJzj~ zV0fH;=5%9^r~A0EAmDBKU?@>M=V)d1wfsQ-HBPDx*3~(vs9PE7?jqurC(74^6*Ip;MfXd)H`2nLpdhX1nEE+M3Y~js@@+eX5olR{E5MTxf=!t-%Qh!Tb~aJm hJ4YDTZ?{bYj5Ah4y>@bn_^A>sIjjET5{u3>b{aXM4 literal 0 HcmV?d00001