diff --git a/ChangeLog.md b/ChangeLog.md index 40bf7228d..f453fe068 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) a ### Added +- added support for Stream Transactions - added support for named indices - added support for TTL indices - added minReplicationAttribute for collections and graphs diff --git a/src/main/java/com/arangodb/ArangoCollection.java b/src/main/java/com/arangodb/ArangoCollection.java index 7ece5889d..423399fcc 100644 --- a/src/main/java/com/arangodb/ArangoCollection.java +++ b/src/main/java/com/arangodb/ArangoCollection.java @@ -36,24 +36,25 @@ /** * Interface for operations on ArangoDB collection level. - * + * * @see Collection API Documentation * @see Documents API Documentation * @author Mark Vollmary * @author Heiko Kernbach + * @author Michele Rastelli */ public interface ArangoCollection extends ArangoSerializationAccessor { /** * The the handler of the database the collection is within - * + * * @return database handler */ public ArangoDatabase db(); /** * The name of the collection - * + * * @return collection name */ public String name(); @@ -61,7 +62,7 @@ public interface ArangoCollection extends ArangoSerializationAccessor { /** * Creates a new document from the given document, unless there is already a document with the _key given. If no * _key is given, a new unique _key is generated automatically. - * + * * @see API * Documentation * @param value @@ -74,7 +75,7 @@ public interface ArangoCollection extends ArangoSerializationAccessor { /** * Creates a new document from the given document, unless there is already a document with the _key given. If no * _key is given, a new unique _key is generated automatically. - * + * * @see API * Documentation * @param value @@ -89,7 +90,7 @@ public interface ArangoCollection extends ArangoSerializationAccessor { /** * Creates new documents from the given documents, unless there is already a document with the _key given. If no * _key is given, a new unique _key is generated automatically. - * + * * @see API * Documentation * @param values @@ -102,7 +103,7 @@ public interface ArangoCollection extends ArangoSerializationAccessor { /** * Creates new documents from the given documents, unless there is already a document with the _key given. If no * _key is given, a new unique _key is generated automatically. - * + * * @see API * Documentation * @param values @@ -118,7 +119,7 @@ MultiDocumentEntity> insertDocuments( /** * Bulk imports the given values into the collection. - * + * * @param values * a list of Objects that will be stored as documents * @return information about the import @@ -128,7 +129,7 @@ MultiDocumentEntity> insertDocuments( /** * Bulk imports the given values into the collection. - * + * * @param values * a list of Objects that will be stored as documents * @param options @@ -157,7 +158,7 @@ Collection importDocuments(Collection values, DocumentI /** * Bulk imports the given values into the collection. - * + * * @param values * JSON-encoded array of objects that will be stored as documents * @return information about the import @@ -167,7 +168,7 @@ Collection importDocuments(Collection values, DocumentI /** * Bulk imports the given values into the collection. - * + * * @param values * JSON-encoded array of objects that will be stored as documents * @param options @@ -179,7 +180,7 @@ Collection importDocuments(Collection values, DocumentI /** * Retrieves the document with the given {@code key} from the collection. - * + * * @see API * Documentation * @param key @@ -193,7 +194,7 @@ Collection importDocuments(Collection values, DocumentI /** * Retrieves the document with the given {@code key} from the collection. - * + * * @see API * Documentation * @param key @@ -209,7 +210,7 @@ Collection importDocuments(Collection values, DocumentI /** * Retrieves multiple documents with the given {@code _key} from the collection. - * + * * @param keys * The keys of the documents * @param type @@ -221,7 +222,7 @@ Collection importDocuments(Collection values, DocumentI /** * Retrieves multiple documents with the given {@code _key} from the collection. - * + * * @param keys * The keys of the documents * @param type @@ -237,7 +238,7 @@ MultiDocumentEntity getDocuments(Collection keys, Class type, /** * Replaces the document with {@code key} with the one in the body, provided there is such a document and no * precondition is violated - * + * * @see API * Documentation * @param key @@ -252,7 +253,7 @@ MultiDocumentEntity getDocuments(Collection keys, Class type, /** * Replaces the document with {@code key} with the one in the body, provided there is such a document and no * precondition is violated - * + * * @see API * Documentation * @param key @@ -270,7 +271,7 @@ DocumentUpdateEntity replaceDocument(String key, T value, DocumentReplace /** * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are * specified by the _key attributes in the documents in values. - * + * * @see API * Documentation * @param values @@ -283,7 +284,7 @@ DocumentUpdateEntity replaceDocument(String key, T value, DocumentReplace /** * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are * specified by the _key attributes in the documents in values. - * + * * @see API * Documentation * @param values @@ -301,7 +302,7 @@ MultiDocumentEntity> replaceDocuments( * Partially updates the document identified by document-key. The value must contain a document with the attributes * to patch (the patch document). All attributes from the patch document will be added to the existing document if * they do not yet exist, and overwritten in the existing document if they do exist there. - * + * * @see API * Documentation * @param key @@ -317,7 +318,7 @@ MultiDocumentEntity> replaceDocuments( * Partially updates the document identified by document-key. The value must contain a document with the attributes * to patch (the patch document). All attributes from the patch document will be added to the existing document if * they do not yet exist, and overwritten in the existing document if they do exist there. - * + * * @see API * Documentation * @param key @@ -337,7 +338,7 @@ DocumentUpdateEntity updateDocument(String key, T value, DocumentUpdateOp * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All * attributes from the patch documents will be added to the existing documents if they do not yet exist, and * overwritten in the existing documents if they do exist there. - * + * * @see API * Documentation * @param values @@ -352,7 +353,7 @@ DocumentUpdateEntity updateDocument(String key, T value, DocumentUpdateOp * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All * attributes from the patch documents will be added to the existing documents if they do not yet exist, and * overwritten in the existing documents if they do exist there. - * + * * @see API * Documentation * @param values @@ -368,7 +369,7 @@ MultiDocumentEntity> updateDocuments( /** * Deletes the document with the given {@code key} from the collection. - * + * * @see API * Documentation * @param key @@ -380,7 +381,7 @@ MultiDocumentEntity> updateDocuments( /** * Deletes the document with the given {@code key} from the collection. - * + * * @see API * Documentation * @param key @@ -398,7 +399,7 @@ DocumentDeleteEntity deleteDocument(String key, Class type, DocumentDe /** * Deletes multiple documents from the collection. - * + * * @see API * Documentation @@ -411,7 +412,7 @@ DocumentDeleteEntity deleteDocument(String key, Class type, DocumentDe /** * Deletes multiple documents from the collection. - * + * * @see API * Documentation @@ -432,7 +433,7 @@ MultiDocumentEntity> deleteDocuments( /** * Checks if the document exists by reading a single document head - * + * * @see API * Documentation @@ -444,7 +445,7 @@ MultiDocumentEntity> deleteDocuments( /** * Checks if the document exists by reading a single document head - * + * * @see API * Documentation @@ -460,7 +461,7 @@ MultiDocumentEntity> deleteDocuments( /** * Fetches information about the index with the given {@code id} and returns it. - * + * * @see API Documentation * @param id * The index-handle @@ -471,7 +472,7 @@ MultiDocumentEntity> deleteDocuments( /** * Deletes the index with the given {@code id} from the collection. - * + * * @see API Documentation * @param id * The index-handle @@ -482,7 +483,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a hash index for the collection if it does not already exist. - * + * * @see API Documentation * @param fields * A list of attribute paths @@ -495,7 +496,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a skip-list index for the collection, if it does not already exist. - * + * * @see API * Documentation * @param fields @@ -509,7 +510,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a persistent index for the collection, if it does not already exist. - * + * * @see API * Documentation * @param fields @@ -523,7 +524,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a geo-spatial index for the collection, if it does not already exist. - * + * * @see API * Documentation * @param fields @@ -537,7 +538,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a fulltext index for the collection, if it does not already exist. - * + * * @see API * Documentation * @param fields @@ -565,7 +566,7 @@ MultiDocumentEntity> deleteDocuments( /** * Fetches a list of all indexes on this collection. - * + * * @see API * Documentation @@ -576,14 +577,14 @@ MultiDocumentEntity> deleteDocuments( /** * Checks whether the collection exists - * + * * @return true if the collection exists, otherwise false */ boolean exists() throws ArangoDBException; /** * Removes all documents from the collection, but leaves the indexes intact - * + * * @see API * Documentation * @return information about the collection @@ -591,9 +592,21 @@ MultiDocumentEntity> deleteDocuments( */ CollectionEntity truncate() throws ArangoDBException; + /** + * Removes all documents from the collection, but leaves the indexes intact + * + * @see API + * Documentation + * @param options + * @return information about the collection + * @since ArangoDB 3.5.0 + * @throws ArangoDBException + */ + CollectionEntity truncate(CollectionTruncateOptions options) throws ArangoDBException; + /** * Counts the documents in a collection - * + * * @see API * Documentation @@ -602,9 +615,22 @@ MultiDocumentEntity> deleteDocuments( */ CollectionPropertiesEntity count() throws ArangoDBException; + /** + * Counts the documents in a collection + * + * @see API + * Documentation + * @param options + * @return information about the collection, including the number of documents + * @since ArangoDB 3.5.0 + * @throws ArangoDBException + */ + CollectionPropertiesEntity count(CollectionCountOptions options) throws ArangoDBException; + /** * Creates a collection for this collection's name, then returns collection information from the server. - * + * * @see API * Documentation * @return information about the collection @@ -615,7 +641,7 @@ MultiDocumentEntity> deleteDocuments( /** * Creates a collection with the given {@code options} for this collection's name, then returns collection * information from the server. - * + * * @see API * Documentation * @param options @@ -627,7 +653,7 @@ MultiDocumentEntity> deleteDocuments( /** * Deletes the collection from the database. - * + * * @see API * Documentation * @throws ArangoDBException @@ -636,7 +662,7 @@ MultiDocumentEntity> deleteDocuments( /** * Deletes the collection from the database. - * + * * @see API * Documentation * @param isSystem @@ -649,7 +675,7 @@ MultiDocumentEntity> deleteDocuments( /** * Tells the server to load the collection into memory. - * + * * @see API * Documentation * @return information about the collection @@ -660,7 +686,7 @@ MultiDocumentEntity> deleteDocuments( /** * Tells the server to remove the collection from memory. This call does not delete any documents. You can use the * collection afterwards; in which case it will be loaded into memory, again. - * + * * @see API * Documentation * @return information about the collection @@ -670,7 +696,7 @@ MultiDocumentEntity> deleteDocuments( /** * Returns information about the collection - * + * * @see API * Documentation @@ -681,7 +707,7 @@ MultiDocumentEntity> deleteDocuments( /** * Reads the properties of the specified collection - * + * * @see API * Documentation @@ -692,7 +718,7 @@ MultiDocumentEntity> deleteDocuments( /** * Changes the properties of the collection - * + * * @see API * Documentation @@ -705,7 +731,7 @@ MultiDocumentEntity> deleteDocuments( /** * Renames the collection - * + * * @see API * Documentation * @param newName @@ -717,7 +743,7 @@ MultiDocumentEntity> deleteDocuments( /** * Retrieve the collections revision - * + * * @see API * Documentation * @return information about the collection, including the collections revision @@ -728,7 +754,7 @@ MultiDocumentEntity> deleteDocuments( /** * Grants or revoke access to the collection for user user. You need permission to the _system database in order to * execute this call. - * + * * @see API * Documentation @@ -743,7 +769,7 @@ MultiDocumentEntity> deleteDocuments( /** * Revokes access to the collection for user user. You need permission to the _system database in order to execute * this call. - * + * * @see API * Documentation @@ -755,7 +781,7 @@ MultiDocumentEntity> deleteDocuments( /** * Clear the collection access level, revert back to the default access level. - * + * * @see API * Documentation @@ -768,7 +794,7 @@ MultiDocumentEntity> deleteDocuments( /** * Get the collection access level - * + * * @see * API Documentation * @param user diff --git a/src/main/java/com/arangodb/ArangoDB.java b/src/main/java/com/arangodb/ArangoDB.java index ff786e24c..bd866ea49 100644 --- a/src/main/java/com/arangodb/ArangoDB.java +++ b/src/main/java/com/arangodb/ArangoDB.java @@ -27,13 +27,7 @@ import javax.net.ssl.SSLContext; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.LoadBalancingStrategy; -import com.arangodb.entity.LogEntity; -import com.arangodb.entity.LogLevelEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.UserEntity; +import com.arangodb.entity.*; import com.arangodb.internal.ArangoContext; import com.arangodb.internal.ArangoDBImpl; import com.arangodb.internal.ArangoDefaults; @@ -86,6 +80,7 @@ * * * @author Mark Vollmary + * @author Michele Rastelli */ public interface ArangoDB extends ArangoSerializationAccessor { @@ -717,6 +712,16 @@ public synchronized ArangoDB build() { */ ArangoDBVersion getVersion() throws ArangoDBException; + /** + * Returns the server storage engine. + * + * @see API + * Documentation + * @return the storage engine name + * @throws ArangoDBException + */ + ArangoDBEngine getEngine() throws ArangoDBException; + /** * Returns the server role. * diff --git a/src/main/java/com/arangodb/ArangoDatabase.java b/src/main/java/com/arangodb/ArangoDatabase.java index 6f643aadb..355da7774 100644 --- a/src/main/java/com/arangodb/ArangoDatabase.java +++ b/src/main/java/com/arangodb/ArangoDatabase.java @@ -23,197 +23,174 @@ import java.util.Collection; import java.util.Map; -import com.arangodb.entity.AqlExecutionExplainEntity; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.AqlParseEntity; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.entity.ViewEntity; -import com.arangodb.entity.ViewType; -import com.arangodb.model.AqlFunctionCreateOptions; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlFunctionGetOptions; -import com.arangodb.model.AqlQueryExplainOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; +import com.arangodb.entity.*; +import com.arangodb.model.*; import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; /** * Interface for operations on ArangoDB database level. - * + * + * @author Mark Vollmary + * @author Michele Rastelli * @see Databases API Documentation * @see Query API Documentation - * @author Mark Vollmary */ public interface ArangoDatabase extends ArangoSerializationAccessor { /** * Return the main entry point for the ArangoDB driver - * + * * @return main entry point */ ArangoDB arango(); /** * Returns the name of the database - * + * * @return database name */ String name(); /** * Returns the server name and version number. - * - * @see API - * Documentation + * * @return the server version, number * @throws ArangoDBException + * @see API + * Documentation */ ArangoDBVersion getVersion() throws ArangoDBException; + /** + * Returns the name of the used storage engine. + * + * @return the storage engine name + * @throws ArangoDBException + * @see API + * Documentation + */ + ArangoDBEngine getEngine() throws ArangoDBException; + /** * Checks whether the database exists - * + * * @return true if the database exists, otherwise false */ boolean exists() throws ArangoDBException; /** * Retrieves a list of all databases the current user can access - * - * @see API - * Documentation + * * @return a list of all databases the current user can access * @throws ArangoDBException + * @see API + * Documentation */ Collection getAccessibleDatabases() throws ArangoDBException; /** * Returns a {@code ArangoCollection} instance for the given collection name. - * - * @param name - * Name of the collection + * + * @param name Name of the collection * @return collection handler */ ArangoCollection collection(String name); /** * Creates a collection for the given collection's name, then returns collection information from the server. - * - * @see API - * Documentation - * @param name - * The name of the collection + * + * @param name The name of the collection * @return information about the collection * @throws ArangoDBException + * @see API + * Documentation */ CollectionEntity createCollection(String name) throws ArangoDBException; /** * Creates a collection with the given {@code options} for this collection's name, then returns collection * information from the server. - * - * @see API - * Documentation - * @param name - * The name of the collection - * @param options - * Additional options, can be null + * + * @param name The name of the collection + * @param options Additional options, can be null * @return information about the collection * @throws ArangoDBException + * @see API + * Documentation */ CollectionEntity createCollection(String name, CollectionCreateOptions options) throws ArangoDBException; /** * Fetches all collections from the database and returns an list of collection descriptions. - * - * @see API - * Documentation + * * @return list of information about all collections * @throws ArangoDBException + * @see API + * Documentation */ Collection getCollections() throws ArangoDBException; /** * Fetches all collections from the database and returns an list of collection descriptions. - * - * @see API - * Documentation - * @param options - * Additional options, can be null + * + * @param options Additional options, can be null * @return list of information about all collections * @throws ArangoDBException + * @see API + * Documentation */ Collection getCollections(CollectionsReadOptions options) throws ArangoDBException; /** * Returns an index - * - * @see API Documentation - * @param id - * The index-handle + * + * @param id The index-handle * @return information about the index * @throws ArangoDBException + * @see API Documentation */ IndexEntity getIndex(String id) throws ArangoDBException; /** * Deletes an index - * - * @see API Documentation - * @param id - * The index-handle + * + * @param id The index-handle * @return the id of the index * @throws ArangoDBException + * @see API Documentation */ String deleteIndex(String id) throws ArangoDBException; /** * Creates the database - * - * @see API - * Documentation + * * @return true if the database was created successfully. * @throws ArangoDBException + * @see API + * Documentation */ Boolean create() throws ArangoDBException; /** * Deletes the database from the server. - * - * @see API - * Documentation + * * @return true if the database was dropped successfully * @throws ArangoDBException + * @see API + * Documentation */ Boolean drop() throws ArangoDBException; /** * Grants or revoke access to the database for user {@code user}. You need permission to the _system database in * order to execute this call. - * - * @see - * API Documentation - * @param user - * The name of the user - * @param permissions - * The permissions the user grant + * + * @param user The name of the user + * @param permissions The permissions the user grant * @throws ArangoDBException + * @see + * API Documentation */ void grantAccess(String user, Permissions permissions) throws ArangoDBException; @@ -221,80 +198,70 @@ public interface ArangoDatabase extends ArangoSerializationAccessor { * Grants access to the database for user {@code user}. You need permission to the _system database in order to * execute this call. * - * @see - * API Documentation - * @param user - * The name of the user + * @param user The name of the user * @throws ArangoDBException + * @see + * API Documentation */ void grantAccess(String user) throws ArangoDBException; /** * Revokes access to the database dbname for user {@code user}. You need permission to the _system database in order * to execute this call. - * - * @see - * API Documentation - * @param user - * The name of the user + * + * @param user The name of the user * @throws ArangoDBException + * @see + * API Documentation */ void revokeAccess(String user) throws ArangoDBException; /** * Clear the database access level, revert back to the default access level. - * + * + * @param user The name of the user + * @throws ArangoDBException * @see - * API Documentation - * @param user - * The name of the user + * API Documentation * @since ArangoDB 3.2.0 - * @throws ArangoDBException */ void resetAccess(String user) throws ArangoDBException; /** * Sets the default access level for collections within this database for the user {@code user}. You need permission * to the _system database in order to execute this call. - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 + * + * @param user The name of the user + * @param permissions The permissions the user grant * @throws ArangoDBException + * @since ArangoDB 3.2.0 */ void grantDefaultCollectionAccess(String user, Permissions permissions) throws ArangoDBException; /** * Get specific database access level - * - * @see API - * Documentation - * @param user - * The name of the user + * + * @param user The name of the user * @return permissions of the user - * @since ArangoDB 3.2.0 * @throws ArangoDBException + * @see API + * Documentation + * @since ArangoDB 3.2.0 */ Permissions getPermissions(String user) throws ArangoDBException; /** * Performs a database query using the given {@code query} and {@code bindVars}, then returns a new * {@code ArangoCursor} instance for the result list. - * - * @see API - * Documentation - * @param query - * An AQL query string - * @param bindVars - * key/value pairs defining the variables to bind the query to - * @param options - * Additional options that will be passed to the query API, can be null - * @param type - * The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) + * + * @param query An AQL query string + * @param bindVars key/value pairs defining the variables to bind the query to + * @param options Additional options that will be passed to the query API, can be null + * @param type The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) * @return cursor of the results * @throws ArangoDBException + * @see API + * Documentation */ ArangoCursor query(String query, Map bindVars, AqlQueryOptions options, Class type) throws ArangoDBException; @@ -302,80 +269,67 @@ ArangoCursor query(String query, Map bindVars, AqlQueryOp /** * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the * result list. - * - * @see API - * Documentation - * @param query - * An AQL query string - * @param options - * Additional options that will be passed to the query API, can be null - * @param type - * The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) + * + * @param query An AQL query string + * @param options Additional options that will be passed to the query API, can be null + * @param type The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) * @return cursor of the results * @throws ArangoDBException + * @see API + * Documentation */ ArangoCursor query(String query, AqlQueryOptions options, Class type) throws ArangoDBException; /** * Performs a database query using the given {@code query} and {@code bindVars}, then returns a new * {@code ArangoCursor} instance for the result list. - * - * @see API - * Documentation - * @param query - * An AQL query string - * @param bindVars - * key/value pairs defining the variables to bind the query to - * @param type - * The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) + * + * @param query An AQL query string + * @param bindVars key/value pairs defining the variables to bind the query to + * @param type The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) * @return cursor of the results * @throws ArangoDBException + * @see API + * Documentation */ ArangoCursor query(String query, Map bindVars, Class type) throws ArangoDBException; /** * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the * result list. - * - * @see API - * Documentation - * @param query - * An AQL query string - * @param type - * The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) + * + * @param query An AQL query string + * @param type The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) * @return cursor of the results * @throws ArangoDBException + * @see API + * Documentation */ ArangoCursor query(String query, Class type) throws ArangoDBException; /** * Return an cursor from the given cursor-ID if still existing - * - * @see API - * Documentation - * @param cursorId - * The ID of the cursor - * @param type - * The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) + * + * @param cursorId The ID of the cursor + * @param type The type of the result (POJO class, VPackSlice, String for JSON, or Collection/List/Map) * @return cursor of the results * @throws ArangoDBException + * @see API + * Documentation */ ArangoCursor cursor(String cursorId, Class type) throws ArangoDBException; /** * Explain an AQL query and return information about it - * - * @see API - * Documentation - * @param query - * the query which you want explained - * @param bindVars - * key/value pairs representing the bind parameters - * @param options - * Additional options, can be null + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null * @return information about the query * @throws ArangoDBException + * @see API + * Documentation */ AqlExecutionExplainEntity explainQuery(String query, Map bindVars, AqlQueryExplainOptions options) throws ArangoDBException; @@ -383,167 +337,156 @@ AqlExecutionExplainEntity explainQuery(String query, Map bindVar /** * Parse an AQL query and return information about it This method is for query validation only. To actually query * the database, see {@link ArangoDatabase#query(String, Map, AqlQueryOptions, Class)} - * - * @see API - * Documentation - * @param query - * the query which you want parse + * + * @param query the query which you want parse * @return imformation about the query * @throws ArangoDBException + * @see API + * Documentation */ AqlParseEntity parseQuery(String query) throws ArangoDBException; /** * Clears the AQL query cache - * - * @see API - * Documentation + * * @throws ArangoDBException + * @see API + * Documentation */ void clearQueryCache() throws ArangoDBException; /** * Returns the global configuration for the AQL query cache - * - * @see API - * Documentation + * * @return configuration for the AQL query cache * @throws ArangoDBException + * @see API + * Documentation */ QueryCachePropertiesEntity getQueryCacheProperties() throws ArangoDBException; /** * Changes the configuration for the AQL query cache. Note: changing the properties may invalidate all results in * the cache. - * - * @see API - * Documentation - * @param properties - * properties to be set + * + * @param properties properties to be set * @return current set of properties * @throws ArangoDBException + * @see API + * Documentation */ QueryCachePropertiesEntity setQueryCacheProperties(QueryCachePropertiesEntity properties) throws ArangoDBException; /** * Returns the configuration for the AQL query tracking - * - * @see API - * Documentation + * * @return configuration for the AQL query tracking * @throws ArangoDBException + * @see API + * Documentation */ QueryTrackingPropertiesEntity getQueryTrackingProperties() throws ArangoDBException; /** * Changes the configuration for the AQL query tracking - * - * @see API - * Documentation - * @param properties - * properties to be set + * + * @param properties properties to be set * @return current set of properties * @throws ArangoDBException + * @see API + * Documentation */ QueryTrackingPropertiesEntity setQueryTrackingProperties(QueryTrackingPropertiesEntity properties) throws ArangoDBException; /** * Returns a list of currently running AQL queries - * - * @see API - * Documentation + * * @return a list of currently running AQL queries * @throws ArangoDBException + * @see API + * Documentation */ Collection getCurrentlyRunningQueries() throws ArangoDBException; /** * Returns a list of slow running AQL queries - * - * @see API - * Documentation + * * @return a list of slow running AQL queries * @throws ArangoDBException + * @see API + * Documentation */ Collection getSlowQueries() throws ArangoDBException; /** * Clears the list of slow AQL queries - * - * @see API - * Documentation + * * @throws ArangoDBException + * @see API + * Documentation */ void clearSlowQueries() throws ArangoDBException; /** * Kills a running query. The query will be terminated at the next cancelation point. - * - * @see API - * Documentation - * @param id - * The id of the query + * + * @param id The id of the query * @throws ArangoDBException + * @see API + * Documentation */ void killQuery(String id) throws ArangoDBException; /** * Create a new AQL user function - * - * @see API - * Documentation - * @param name - * A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"` - * @param code - * A String evaluating to a JavaScript function - * @param options - * Additional options, can be null + * + * @param name A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"` + * @param code A String evaluating to a JavaScript function + * @param options Additional options, can be null * @throws ArangoDBException + * @see API + * Documentation */ void createAqlFunction(String name, String code, AqlFunctionCreateOptions options) throws ArangoDBException; /** * Deletes the AQL user function with the given name from the database. - * - * @see API - * Documentation - * @param name - * The name of the user function to delete - * @param options - * Additional options, can be null + * + * @param name The name of the user function to delete + * @param options Additional options, can be null * @return number of deleted functions (since ArangoDB 3.4.0) * @throws ArangoDBException + * @see API + * Documentation */ Integer deleteAqlFunction(String name, AqlFunctionDeleteOptions options) throws ArangoDBException; /** * Gets all reqistered AQL user functions - * - * @see API - * Documentation - * @param options - * Additional options, can be null + * + * @param options Additional options, can be null * @return all reqistered AQL user functions * @throws ArangoDBException + * @see API + * Documentation */ Collection getAqlFunctions(AqlFunctionGetOptions options) throws ArangoDBException; /** * Returns a {@code ArangoGraph} instance for the given graph name. - * - * @param name - * Name of the graph + * + * @param name Name of the graph * @return graph handler */ ArangoGraph graph(String name); @@ -551,155 +494,181 @@ QueryTrackingPropertiesEntity setQueryTrackingProperties(QueryTrackingProperties /** * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition * of its edges. - * - * @see API - * Documentation - * @param name - * Name of the graph - * @param edgeDefinitions - * An array of definitions for the edge + * + * @param name Name of the graph + * @param edgeDefinitions An array of definitions for the edge * @return information about the graph * @throws ArangoDBException + * @see API + * Documentation */ GraphEntity createGraph(String name, Collection edgeDefinitions) throws ArangoDBException; /** * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition * of its edges. - * - * @see API - * Documentation - * @param name - * Name of the graph - * @param edgeDefinitions - * An array of definitions for the edge - * @param options - * Additional options, can be null + * + * @param name Name of the graph + * @param edgeDefinitions An array of definitions for the edge + * @param options Additional options, can be null * @return information about the graph * @throws ArangoDBException + * @see API + * Documentation */ GraphEntity createGraph(String name, Collection edgeDefinitions, GraphCreateOptions options) throws ArangoDBException; /** * Lists all graphs known to the graph module - * - * @see API - * Documentation + * * @return graphs stored in this database * @throws ArangoDBException + * @see API + * Documentation */ Collection getGraphs() throws ArangoDBException; /** * Performs a server-side transaction and returns its return value. - * - * @see API - * Documentation - * @param action - * A String evaluating to a JavaScript function to be executed on the server. - * @param type - * The type of the result (POJO class, VPackSlice or String for JSON) - * @param options - * Additional options, can be null + * + * @param action A String evaluating to a JavaScript function to be executed on the server. + * @param type The type of the result (POJO class, VPackSlice or String for JSON) + * @param options Additional options, can be null * @return the result of the transaction if it succeeded * @throws ArangoDBException + * @see API + * Documentation */ T transaction(String action, Class type, TransactionOptions options) throws ArangoDBException; + /** + * Begins a Stream Transaction. + * + * @param options Additional options, can be null + * @return information about the transaction + * @throws ArangoDBException + * @see API + * Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity beginStreamTransaction(StreamTransactionOptions options) throws ArangoDBException; + + /** + * Aborts a Stream Transaction. + * + * @return information about the transaction + * @throws ArangoDBException + * @see API + * Documentation + */ + StreamTransactionEntity abortStreamTransaction(String id) throws ArangoDBException; + + /** + * Gets information about a Stream Transaction. + * + * @return information about the transaction + * @throws ArangoDBException + * @see + * API Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity getStreamTransaction(String id) throws ArangoDBException; + + /** + * Commits a Stream Transaction. + * + * @return information about the transaction + * @throws ArangoDBException + * @see + * API Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity commitStreamTransaction(String id) throws ArangoDBException; + /** * Retrieves information about the current database - * - * @see API - * Documentation + * * @return information about the current database * @throws ArangoDBException + * @see API + * Documentation */ DatabaseEntity getInfo() throws ArangoDBException; /** * Execute a server-side traversal - * - * @see API - * Documentation - * @param vertexClass - * The type of the vertex documents (POJO class, VPackSlice or String for JSON) - * @param edgeClass - * The type of the edge documents (POJO class, VPackSlice or String for JSON) - * @param options - * Additional options + * + * @param vertexClass The type of the vertex documents (POJO class, VPackSlice or String for JSON) + * @param edgeClass The type of the edge documents (POJO class, VPackSlice or String for JSON) + * @param options Additional options * @return Result of the executed traversal * @throws ArangoDBException + * @see API + * Documentation */ TraversalEntity executeTraversal(Class vertexClass, Class edgeClass, TraversalOptions options) throws ArangoDBException; /** * Reads a single document - * - * @see API - * Documentation - * @param id - * The id of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for JSON) + * + * @param id The id of the document + * @param type The type of the document (POJO class, VPackSlice or String for JSON) * @return the document identified by the id * @throws ArangoDBException + * @see API + * Documentation */ T getDocument(String id, Class type) throws ArangoDBException; /** * Reads a single document - * - * @see API - * Documentation - * @param id - * The id of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for JSON) - * @param options - * Additional options, can be null + * + * @param id The id of the document + * @param type The type of the document (POJO class, VPackSlice or String for JSON) + * @param options Additional options, can be null * @return the document identified by the id * @throws ArangoDBException + * @see API + * Documentation */ T getDocument(String id, Class type, DocumentReadOptions options) throws ArangoDBException; /** * Reload the routing table. - * - * @see API - * Documentation + * * @throws ArangoDBException + * @see API + * Documentation */ void reloadRouting() throws ArangoDBException; /** * Returns a new {@link ArangoRoute} instance for the given path (relative to the database) that can be used to * perform arbitrary requests. - * - * @param path - * The database-relative URL of the route + * + * @param path The database-relative URL of the route * @return {@link ArangoRoute} */ ArangoRoute route(String... path); /** * Fetches all views from the database and returns an list of view descriptions. - * - * @see API Documentation + * * @return list of information about all views * @throws ArangoDBException + * @see API Documentation * @since ArangoDB 3.4.0 */ Collection getViews() throws ArangoDBException; /** * Returns a {@code ArangoView} instance for the given view name. - * - * @param name - * Name of the view + * + * @param name Name of the view * @return view handler * @since ArangoDB 3.4.0 */ @@ -707,9 +676,8 @@ TraversalEntity executeTraversal(Class vertexClass, Class edg /** * Returns a {@code ArangoSearch} instance for the given ArangoSearch view name. - * - * @param name - * Name of the view + * + * @param name Name of the view * @return ArangoSearch view handler * @since ArangoDB 3.4.0 */ @@ -717,29 +685,25 @@ TraversalEntity executeTraversal(Class vertexClass, Class edg /** * Creates a view of the given {@code type}, then returns view information from the server. - * - * @param name - * The name of the view - * @param type - * The type of the view + * + * @param name The name of the view + * @param type The type of the view * @return information about the view - * @since ArangoDB 3.4.0 * @throws ArangoDBException + * @since ArangoDB 3.4.0 */ ViewEntity createView(String name, ViewType type) throws ArangoDBException; /** * Creates a ArangoSearch view with the given {@code options}, then returns view information from the server. - * - * @see API - * Documentation - * @param name - * The name of the view - * @param options - * Additional options, can be null + * + * @param name The name of the view + * @param options Additional options, can be null * @return information about the view - * @since ArangoDB 3.4.0 * @throws ArangoDBException + * @see API + * Documentation + * @since ArangoDB 3.4.0 */ ViewEntity createArangoSearch(String name, ArangoSearchCreateOptions options) throws ArangoDBException; diff --git a/src/main/java/com/arangodb/entity/ArangoDBEngine.java b/src/main/java/com/arangodb/entity/ArangoDBEngine.java new file mode 100644 index 000000000..72d2c9300 --- /dev/null +++ b/src/main/java/com/arangodb/entity/ArangoDBEngine.java @@ -0,0 +1,47 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Michele Rastelli + * @see API + * Documentation + */ +public class ArangoDBEngine implements Entity { + + public enum StorageEngineName { + mmfiles, rocksdb + } + + private StorageEngineName name; + + public ArangoDBEngine() { + super(); + } + + /** + * @return the storage engine name + */ + public StorageEngineName getName() { + return name; + } + +} diff --git a/src/main/java/com/arangodb/entity/StreamTransactionEntity.java b/src/main/java/com/arangodb/entity/StreamTransactionEntity.java new file mode 100644 index 000000000..948a7b15c --- /dev/null +++ b/src/main/java/com/arangodb/entity/StreamTransactionEntity.java @@ -0,0 +1,46 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.5.0 + */ +public class StreamTransactionEntity implements Entity { + + private String id; + private StreamTransactionStatus status; + + public enum StreamTransactionStatus { + running, committed, aborted + } + + public String getId() { + return id; + } + + public StreamTransactionStatus getStatus() { + return status; + } + +} diff --git a/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java b/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java index 690e8f43f..16f8d81c7 100644 --- a/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java +++ b/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java @@ -50,6 +50,7 @@ /** * @author Mark Vollmary + * @author Michele Rastelli * */ public class ArangoCollectionImpl extends InternalArangoCollection @@ -349,12 +350,22 @@ public boolean exists() throws ArangoDBException { @Override public CollectionEntity truncate() throws ArangoDBException { - return executor.execute(truncateRequest(), CollectionEntity.class); + return truncate(null); + } + + @Override + public CollectionEntity truncate(CollectionTruncateOptions options) throws ArangoDBException { + return executor.execute(truncateRequest(options), CollectionEntity.class); } @Override public CollectionPropertiesEntity count() throws ArangoDBException { - return executor.execute(countRequest(), CollectionPropertiesEntity.class); + return count(null); + } + + @Override + public CollectionPropertiesEntity count(CollectionCountOptions options) throws ArangoDBException { + return executor.execute(countRequest(options), CollectionPropertiesEntity.class); } @Override diff --git a/src/main/java/com/arangodb/internal/ArangoDBImpl.java b/src/main/java/com/arangodb/internal/ArangoDBImpl.java index 47d589402..efdc34782 100644 --- a/src/main/java/com/arangodb/internal/ArangoDBImpl.java +++ b/src/main/java/com/arangodb/internal/ArangoDBImpl.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Collection; +import com.arangodb.entity.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,12 +31,6 @@ import com.arangodb.ArangoDBException; import com.arangodb.ArangoDatabase; import com.arangodb.Protocol; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.LogEntity; -import com.arangodb.entity.LogLevelEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.UserEntity; import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; import com.arangodb.internal.http.HttpCommunication; import com.arangodb.internal.http.HttpProtocol; @@ -58,6 +53,7 @@ /** * @author Mark Vollmary * @author Heiko Kernbach + * @author Michele Rastelli * */ public class ArangoDBImpl extends InternalArangoDB implements ArangoDB { @@ -163,6 +159,11 @@ public ArangoDBVersion getVersion() throws ArangoDBException { return db().getVersion(); } + @Override + public ArangoDBEngine getEngine() throws ArangoDBException { + return db().getEngine(); + } + @Override public ServerRole getRole() throws ArangoDBException { return executor.execute(getRoleRequest(), getRoleResponseDeserializer()); diff --git a/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java b/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java index c99a886e0..5db0e6795 100644 --- a/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java +++ b/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java @@ -32,37 +32,11 @@ import com.arangodb.ArangoRoute; import com.arangodb.ArangoSearch; import com.arangodb.ArangoView; -import com.arangodb.entity.AqlExecutionExplainEntity; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.AqlParseEntity; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CursorEntity; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.entity.ViewEntity; -import com.arangodb.entity.ViewType; +import com.arangodb.entity.*; import com.arangodb.internal.cursor.ArangoCursorImpl; import com.arangodb.internal.net.HostHandle; import com.arangodb.internal.util.DocumentUtil; -import com.arangodb.model.AqlFunctionCreateOptions; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlFunctionGetOptions; -import com.arangodb.model.AqlQueryExplainOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; +import com.arangodb.model.*; import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; import com.arangodb.util.ArangoCursorInitializer; import com.arangodb.velocypack.Type; @@ -70,7 +44,7 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli */ public class ArangoDatabaseImpl extends InternalArangoDatabase implements ArangoDatabase { @@ -86,6 +60,11 @@ public ArangoDBVersion getVersion() throws ArangoDBException { return executor.execute(getVersionRequest(), ArangoDBVersion.class); } + @Override + public ArangoDBEngine getEngine() throws ArangoDBException { + return executor.execute(getEngineRequest(), ArangoDBEngine.class); + } + @Override public boolean exists() throws ArangoDBException { try { @@ -122,8 +101,8 @@ public CollectionEntity createCollection(final String name, final CollectionCrea @Override public Collection getCollections() throws ArangoDBException { - return executor.execute(getCollectionsRequest(new CollectionsReadOptions()), - getCollectionsResponseDeserializer()); + return executor + .execute(getCollectionsRequest(new CollectionsReadOptions()), getCollectionsResponseDeserializer()); } @Override @@ -188,22 +167,20 @@ public Permissions getPermissions(final String user) throws ArangoDBException { @Override public ArangoCursor query( - final String query, - final Map bindVars, - final AqlQueryOptions options, - final Class type) throws ArangoDBException { - + final String query, final Map bindVars, final AqlQueryOptions options, final Class type) + throws ArangoDBException { + final Request request = queryRequest(query, bindVars, options); final HostHandle hostHandle = new HostHandle(); final CursorEntity result = executor.execute(request, CursorEntity.class, hostHandle); - + return createCursor(result, type, options, hostHandle); - + } @Override - public ArangoCursor query(final String query, final Map bindVars, final Class type) - throws ArangoDBException { + public ArangoCursor query( + final String query, final Map bindVars, final Class type) throws ArangoDBException { return query(query, bindVars, null, type); } @@ -221,16 +198,17 @@ public ArangoCursor query(final String query, final Class type) throws @Override public ArangoCursor cursor(final String cursorId, final Class type) throws ArangoDBException { final HostHandle hostHandle = new HostHandle(); - final CursorEntity result = executor.execute(queryNextRequest(cursorId, null, null), CursorEntity.class, hostHandle); + final CursorEntity result = executor + .execute(queryNextRequest(cursorId, null, null), CursorEntity.class, hostHandle); return createCursor(result, type, null, hostHandle); } private ArangoCursor createCursor( - final CursorEntity result, - final Class type, - final AqlQueryOptions options, - final HostHandle hostHandle) { - + final CursorEntity result, + final Class type, + final AqlQueryOptions options, + final HostHandle hostHandle) { + final ArangoCursorExecute execute = new ArangoCursorExecute() { @Override public CursorEntity next(final String id, Map meta) { @@ -242,16 +220,16 @@ public void close(final String id, Map meta) { executor.execute(queryCloseRequest(id, options, meta), Void.class, hostHandle); } }; - - return cursorInitializer != null ? cursorInitializer.createInstance(this, execute, type, result) - : new ArangoCursorImpl(this, execute, type, result); + + return cursorInitializer != null ? + cursorInitializer.createInstance(this, execute, type, result) : + new ArangoCursorImpl(this, execute, type, result); } @Override public AqlExecutionExplainEntity explainQuery( - final String query, - final Map bindVars, - final AqlQueryExplainOptions options) throws ArangoDBException { + final String query, final Map bindVars, final AqlQueryExplainOptions options) + throws ArangoDBException { return executor.execute(explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); } @@ -310,8 +288,8 @@ public void killQuery(final String id) throws ArangoDBException { } @Override - public void createAqlFunction(final String name, final String code, final AqlFunctionCreateOptions options) - throws ArangoDBException { + public void createAqlFunction( + final String name, final String code, final AqlFunctionCreateOptions options) throws ArangoDBException { executor.execute(createAqlFunctionRequest(name, code, options), Void.class); } @@ -335,14 +313,13 @@ public ArangoGraph graph(final String name) { public GraphEntity createGraph(final String name, final Collection edgeDefinitions) throws ArangoDBException { return executor.execute(createGraphRequest(name, edgeDefinitions, new GraphCreateOptions()), - createGraphResponseDeserializer()); + createGraphResponseDeserializer()); } @Override public GraphEntity createGraph( - final String name, - final Collection edgeDefinitions, - final GraphCreateOptions options) throws ArangoDBException { + final String name, final Collection edgeDefinitions, final GraphCreateOptions options) + throws ArangoDBException { return executor.execute(createGraphRequest(name, edgeDefinitions, options), createGraphResponseDeserializer()); } @@ -357,6 +334,26 @@ public T transaction(final String action, final Class type, final Transac return executor.execute(transactionRequest(action, options), transactionResponseDeserializer(type)); } + @Override + public StreamTransactionEntity beginStreamTransaction(StreamTransactionOptions options) throws ArangoDBException { + return executor.execute(beginStreamTransactionRequest(options), streamTransactionResponseDeserializer()); + } + + @Override + public StreamTransactionEntity abortStreamTransaction(String id) throws ArangoDBException { + return executor.execute(abortStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public StreamTransactionEntity getStreamTransaction(String id) throws ArangoDBException { + return executor.execute(getStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public StreamTransactionEntity commitStreamTransaction(String id) throws ArangoDBException { + return executor.execute(commitStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + @Override public DatabaseEntity getInfo() throws ArangoDBException { return executor.execute(getInfoRequest(), getInfoResponseDeserializer()); @@ -364,9 +361,8 @@ public DatabaseEntity getInfo() throws ArangoDBException { @Override public TraversalEntity executeTraversal( - final Class vertexClass, - final Class edgeClass, - final TraversalOptions options) throws ArangoDBException { + final Class vertexClass, final Class edgeClass, final TraversalOptions options) + throws ArangoDBException { final Request request = executeTraversalRequest(options); return executor.execute(request, executeTraversalResponseDeserializer(vertexClass, edgeClass)); } @@ -398,7 +394,7 @@ protected ArangoDatabaseImpl setCursorInitializer(final ArangoCursorInitializer @Override public ArangoRoute route(final String... path) { - return new ArangoRouteImpl(this, createPath(path), Collections. emptyMap()); + return new ArangoRouteImpl(this, createPath(path), Collections.emptyMap()); } @Override diff --git a/src/main/java/com/arangodb/internal/InternalArangoCollection.java b/src/main/java/com/arangodb/internal/InternalArangoCollection.java index 310cc023a..f3594440f 100644 --- a/src/main/java/com/arangodb/internal/InternalArangoCollection.java +++ b/src/main/java/com/arangodb/internal/InternalArangoCollection.java @@ -50,7 +50,7 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli */ public abstract class InternalArangoCollection, D extends InternalArangoDatabase, E extends ArangoExecutor> extends ArangoExecuteable { @@ -72,6 +72,8 @@ public abstract class InternalArangoCollection, D private static final String OLD = "old"; private static final String SILENT = "silent"; + private static final String TRANSACTION_ID = "x-arango-trx-id"; + private final D db; protected volatile String name; @@ -97,13 +99,13 @@ protected Request insertDocumentRequest(final T value, final DocumentCreateO request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); request.putQueryParam(OVERWRITE, params.getOverwrite()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.setBody(util(Serializer.CUSTOM).serialize(value)); return request; } protected ResponseDeserializer> insertDocumentResponseDeserializer( - final T value, - final DocumentCreateOptions options) { + final T value, final DocumentCreateOptions options) { return new ResponseDeserializer>() { @SuppressWarnings("unchecked") @Override @@ -137,15 +139,15 @@ protected Request insertDocumentsRequest(final Collection values, final D request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); request.putQueryParam(OVERWRITE, params.getOverwrite()); - request.setBody(util(Serializer.CUSTOM).serialize(values, - new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.setBody(util(Serializer.CUSTOM) + .serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); return request; } @SuppressWarnings("unchecked") protected ResponseDeserializer>> insertDocumentsResponseDeserializer( - final Collection values, - final DocumentCreateOptions params) { + final Collection values, final DocumentCreateOptions params) { return new ResponseDeserializer>>() { @Override public MultiDocumentEntity> deserialize(final Response response) @@ -162,7 +164,7 @@ public MultiDocumentEntity> deserialize(final Response r final Collection documentsAndErrors = new ArrayList(); final VPackSlice body = response.getBody(); if (body.isArray()) { - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = body.arrayIterator(); iterator.hasNext(); ) { final VPackSlice next = iterator.next(); if (next.get(ArangoResponseField.ERROR).isTrue()) { final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); @@ -211,10 +213,11 @@ protected Request importDocumentsRequest(final DocumentImportOptions options) { protected Request getDocumentRequest(final String key, final DocumentReadOptions options) { final Request request = request(db.name(), RequestType.GET, PATH_API_DOCUMENT, - DocumentUtil.createDocumentHandle(name, key)); + DocumentUtil.createDocumentHandle(name, key)); final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); if (params.getAllowDirtyRead() == Boolean.TRUE) { RequestUtils.allowDirtyRead(request); } @@ -226,7 +229,8 @@ protected Request getDocumentsRequest(final Collection keys, final Docum final Request request = request(db.name(), RequestType.PUT, PATH_API_DOCUMENT, name) .putQueryParam("onlyget", true) .putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()) - .putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()).setBody(util().serialize(keys)); + .putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()).setBody(util().serialize(keys)) + .putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); if (params.getAllowDirtyRead() == Boolean.TRUE) { RequestUtils.allowDirtyRead(request); } @@ -234,8 +238,7 @@ protected Request getDocumentsRequest(final Collection keys, final Docum } protected ResponseDeserializer> getDocumentsResponseDeserializer( - final Class type, - final DocumentReadOptions options) { + final Class type, final DocumentReadOptions options) { return new ResponseDeserializer>() { @SuppressWarnings("unchecked") @Override @@ -245,7 +248,7 @@ public MultiDocumentEntity deserialize(final Response response) throws VPackE final Collection errors = new ArrayList(); final Collection documentsAndErrors = new ArrayList(); final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = body.arrayIterator(); iterator.hasNext(); ) { final VPackSlice next = iterator.next(); if (next.get(ArangoResponseField.ERROR).isTrue()) { final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); @@ -266,13 +269,12 @@ public MultiDocumentEntity deserialize(final Response response) throws VPackE } protected Request replaceDocumentRequest( - final String key, - final T value, - final DocumentReplaceOptions options) { + final String key, final T value, final DocumentReplaceOptions options) { final Request request = request(db.name(), RequestType.PUT, PATH_API_DOCUMENT, - DocumentUtil.createDocumentHandle(name, key)); + DocumentUtil.createDocumentHandle(name, key)); final DocumentReplaceOptions params = (options != null ? options : new DocumentReplaceOptions()); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(IGNORE_REVS, params.getIgnoreRevs()); request.putQueryParam(RETURN_NEW, params.getReturnNew()); @@ -283,8 +285,7 @@ protected Request replaceDocumentRequest( } protected ResponseDeserializer> replaceDocumentResponseDeserializer( - final T value, - final DocumentReplaceOptions options) { + final T value, final DocumentReplaceOptions options) { return new ResponseDeserializer>() { @SuppressWarnings("unchecked") @Override @@ -312,20 +313,20 @@ public DocumentUpdateEntity deserialize(final Response response) throws VPack protected Request replaceDocumentsRequest(final Collection values, final DocumentReplaceOptions params) { final Request request = request(db.name(), RequestType.PUT, PATH_API_DOCUMENT, name); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(IGNORE_REVS, params.getIgnoreRevs()); request.putQueryParam(RETURN_NEW, params.getReturnNew()); request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); - request.setBody(util(Serializer.CUSTOM).serialize(values, - new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); + request.setBody(util(Serializer.CUSTOM) + .serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); return request; } @SuppressWarnings("unchecked") protected ResponseDeserializer>> replaceDocumentsResponseDeserializer( - final Collection values, - final DocumentReplaceOptions params) { + final Collection values, final DocumentReplaceOptions params) { return new ResponseDeserializer>>() { @Override public MultiDocumentEntity> deserialize(final Response response) @@ -342,7 +343,7 @@ public MultiDocumentEntity> deserialize(final Response r final Collection documentsAndErrors = new ArrayList(); final VPackSlice body = response.getBody(); if (body.isArray()) { - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = body.arrayIterator(); iterator.hasNext(); ) { final VPackSlice next = iterator.next(); if (next.get(ArangoResponseField.ERROR).isTrue()) { final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); @@ -373,9 +374,10 @@ public MultiDocumentEntity> deserialize(final Response r protected Request updateDocumentRequest(final String key, final T value, final DocumentUpdateOptions options) { final Request request = request(db.name(), RequestType.PATCH, PATH_API_DOCUMENT, - DocumentUtil.createDocumentHandle(name, key)); + DocumentUtil.createDocumentHandle(name, key)); final DocumentUpdateOptions params = (options != null ? options : new DocumentUpdateOptions()); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.KEEP_NULL, params.getKeepNull()); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(MERGE_OBJECTS, params.getMergeObjects()); @@ -389,8 +391,7 @@ protected Request updateDocumentRequest(final String key, final T value, fin } protected ResponseDeserializer> updateDocumentResponseDeserializer( - final T value, - final DocumentUpdateOptions options) { + final T value, final DocumentUpdateOptions options) { return new ResponseDeserializer>() { @SuppressWarnings("unchecked") @Override @@ -419,6 +420,7 @@ protected Request updateDocumentsRequest(final Collection values, final D final Request request = request(db.name(), RequestType.PATCH, PATH_API_DOCUMENT, name); final Boolean keepNull = params.getKeepNull(); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.KEEP_NULL, keepNull); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(MERGE_OBJECTS, params.getMergeObjects()); @@ -426,17 +428,15 @@ protected Request updateDocumentsRequest(final Collection values, final D request.putQueryParam(RETURN_NEW, params.getReturnNew()); request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); - request.setBody(util(Serializer.CUSTOM).serialize(values, - new ArangoSerializer.Options() - .serializeNullValues(params.getSerializeNull() == null || params.getSerializeNull()) - .stringAsJson(true))); + request.setBody(util(Serializer.CUSTOM).serialize(values, new ArangoSerializer.Options() + .serializeNullValues(params.getSerializeNull() == null || params.getSerializeNull()) + .stringAsJson(true))); return request; } @SuppressWarnings("unchecked") protected ResponseDeserializer>> updateDocumentsResponseDeserializer( - final Collection values, - final DocumentUpdateOptions params) { + final Collection values, final DocumentUpdateOptions params) { return new ResponseDeserializer>>() { @Override public MultiDocumentEntity> deserialize(final Response response) @@ -453,7 +453,7 @@ public MultiDocumentEntity> deserialize(final Response r final Collection documentsAndErrors = new ArrayList(); final VPackSlice body = response.getBody(); if (body.isArray()) { - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = body.arrayIterator(); iterator.hasNext(); ) { final VPackSlice next = iterator.next(); if (next.get(ArangoResponseField.ERROR).isTrue()) { final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); @@ -484,9 +484,10 @@ public MultiDocumentEntity> deserialize(final Response r protected Request deleteDocumentRequest(final String key, final DocumentDeleteOptions options) { final Request request = request(db.name(), RequestType.DELETE, PATH_API_DOCUMENT, - DocumentUtil.createDocumentHandle(name, key)); + DocumentUtil.createDocumentHandle(name, key)); final DocumentDeleteOptions params = (options != null ? options : new DocumentDeleteOptions()); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); @@ -494,7 +495,7 @@ protected Request deleteDocumentRequest(final String key, final DocumentDeleteOp } protected ResponseDeserializer> deleteDocumentResponseDeserializer( - final Class type) { + final Class type) { return new ResponseDeserializer>() { @SuppressWarnings("unchecked") @Override @@ -513,6 +514,7 @@ public DocumentDeleteEntity deserialize(final Response response) throws VPack protected Request deleteDocumentsRequest(final Collection keys, final DocumentDeleteOptions options) { final Request request = request(db.name(), RequestType.DELETE, PATH_API_DOCUMENT, name); final DocumentDeleteOptions params = (options != null ? options : new DocumentDeleteOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); request.putQueryParam(RETURN_OLD, params.getReturnOld()); request.putQueryParam(SILENT, params.getSilent()); @@ -521,7 +523,7 @@ protected Request deleteDocumentsRequest(final Collection keys, final Doc } protected ResponseDeserializer>> deleteDocumentsResponseDeserializer( - final Class type) { + final Class type) { return new ResponseDeserializer>>() { @SuppressWarnings("unchecked") @Override @@ -533,7 +535,7 @@ public MultiDocumentEntity> deserialize(final Response r final Collection documentsAndErrors = new ArrayList(); final VPackSlice body = response.getBody(); if (body.isArray()) { - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = body.arrayIterator(); iterator.hasNext(); ) { final VPackSlice next = iterator.next(); if (next.get(ArangoResponseField.ERROR).isTrue()) { final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); @@ -560,8 +562,9 @@ public MultiDocumentEntity> deserialize(final Response r protected Request documentExistsRequest(final String key, final DocumentExistsOptions options) { final Request request = request(db.name(), RequestType.HEAD, PATH_API_DOCUMENT, - DocumentUtil.createDocumentHandle(name, key)); + DocumentUtil.createDocumentHandle(name, key)); final DocumentExistsOptions params = (options != null ? options : new DocumentExistsOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); return request; @@ -600,7 +603,7 @@ protected Request createHashIndexRequest(final Iterable fields, final Ha final Request request = request(db.name(), RequestType.POST, PATH_API_INDEX); request.putQueryParam(COLLECTION, name); request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new HashIndexOptions(), fields))); + util().serialize(OptionsBuilder.build(options != null ? options : new HashIndexOptions(), fields))); return request; } @@ -608,17 +611,16 @@ protected Request createSkiplistIndexRequest(final Iterable fields, fina final Request request = request(db.name(), RequestType.POST, PATH_API_INDEX); request.putQueryParam(COLLECTION, name); request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new SkiplistIndexOptions(), fields))); + util().serialize(OptionsBuilder.build(options != null ? options : new SkiplistIndexOptions(), fields))); return request; } protected Request createPersistentIndexRequest( - final Iterable fields, - final PersistentIndexOptions options) { + final Iterable fields, final PersistentIndexOptions options) { final Request request = request(db.name(), RequestType.POST, PATH_API_INDEX); request.putQueryParam(COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new PersistentIndexOptions(), fields))); + request.setBody(util().serialize( + OptionsBuilder.build(options != null ? options : new PersistentIndexOptions(), fields))); return request; } @@ -626,7 +628,7 @@ protected Request createGeoIndexRequest(final Iterable fields, final Geo final Request request = request(db.name(), RequestType.POST, PATH_API_INDEX); request.putQueryParam(COLLECTION, name); request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new GeoIndexOptions(), fields))); + util().serialize(OptionsBuilder.build(options != null ? options : new GeoIndexOptions(), fields))); return request; } @@ -634,7 +636,7 @@ protected Request createFulltextIndexRequest(final Iterable fields, fina final Request request = request(db.name(), RequestType.POST, PATH_API_INDEX); request.putQueryParam(COLLECTION, name); request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new FulltextIndexOptions(), fields))); + util().serialize(OptionsBuilder.build(options != null ? options : new FulltextIndexOptions(), fields))); return request; } @@ -662,12 +664,18 @@ public Collection deserialize(final Response response) throws VPack }; } - protected Request truncateRequest() { - return request(db.name(), RequestType.PUT, PATH_API_COLLECTION, name, "truncate"); + protected Request truncateRequest(final CollectionTruncateOptions options) { + final Request request = request(db.name(), RequestType.PUT, PATH_API_COLLECTION, name, "truncate"); + final CollectionTruncateOptions params = (options != null ? options : new CollectionTruncateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + return request; } - protected Request countRequest() { - return request(db.name(), RequestType.GET, PATH_API_COLLECTION, name, "count"); + protected Request countRequest(final CollectionCountOptions options) { + final Request request = request(db.name(), RequestType.GET, PATH_API_COLLECTION, name, "count"); + final CollectionCountOptions params = (options != null ? options : new CollectionCountOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + return request; } protected Request dropRequest(final Boolean isSystem) { @@ -708,17 +716,17 @@ protected Request getRevisionRequest() { protected Request grantAccessRequest(final String user, final Permissions permissions) { return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, - db.name(), name).setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + db.name(), name).setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); } protected Request resetAccessRequest(final String user) { return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_USER, user, ArangoRequestParam.DATABASE, - db.name(), name); + db.name(), name); } protected Request getPermissionsRequest(final String user) { return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_USER, user, ArangoRequestParam.DATABASE, - db.name(), name); + db.name(), name); } protected ResponseDeserializer getPermissionsResponseDeserialzer() { diff --git a/src/main/java/com/arangodb/internal/InternalArangoDatabase.java b/src/main/java/com/arangodb/internal/InternalArangoDatabase.java index daad3e2fc..d48002d5f 100644 --- a/src/main/java/com/arangodb/internal/InternalArangoDatabase.java +++ b/src/main/java/com/arangodb/internal/InternalArangoDatabase.java @@ -25,35 +25,11 @@ import java.util.Iterator; import java.util.Map; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.PathEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.entity.ViewEntity; -import com.arangodb.entity.ViewType; +import com.arangodb.entity.*; import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; import com.arangodb.internal.util.ArangoSerializationFactory.Serializer; import com.arangodb.internal.util.RequestUtils; -import com.arangodb.model.AqlFunctionCreateOptions; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlFunctionGetOptions; -import com.arangodb.model.AqlQueryExplainOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.AqlQueryParseOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.OptionsBuilder; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; -import com.arangodb.model.UserAccessOptions; -import com.arangodb.model.ViewCreateOptions; +import com.arangodb.model.*; import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; import com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder; import com.arangodb.util.ArangoSerializer; @@ -66,15 +42,17 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli */ public abstract class InternalArangoDatabase, E extends ArangoExecutor> extends ArangoExecuteable { protected static final String PATH_API_DATABASE = "/_api/database"; private static final String PATH_API_VERSION = "/_api/version"; + private static final String PATH_API_ENGINE = "/_api/engine"; private static final String PATH_API_CURSOR = "/_api/cursor"; private static final String PATH_API_TRANSACTION = "/_api/transaction"; + private static final String PATH_API_BEGIN_STREAM_TRANSACTION = "/_api/transaction/begin"; private static final String PATH_API_AQLFUNCTION = "/_api/aqlfunction"; private static final String PATH_API_EXPLAIN = "/_api/explain"; private static final String PATH_API_QUERY = "/_api/query"; @@ -87,6 +65,8 @@ public abstract class InternalArangoDatabase, E ex private static final String PATH_API_ADMIN_ROUTING_RELOAD = "/_admin/routing/reload"; private static final String PATH_API_USER = "/_api/user"; + private static final String TRANSACTION_ID = "x-arango-trx-id"; + private final String name; private final A arango; @@ -116,14 +96,16 @@ protected Request getVersionRequest() { return request(name, RequestType.GET, PATH_API_VERSION); } + protected Request getEngineRequest() { + return request(name, RequestType.GET, PATH_API_ENGINE); + } + protected Request createCollectionRequest(final String name, final CollectionCreateOptions options) { - - VPackSlice body = util().serialize(OptionsBuilder.build(options != null ? options : new CollectionCreateOptions(), name)); - - return request( - name(), - RequestType.POST, - InternalArangoCollection.PATH_API_COLLECTION).setBody(body); + + VPackSlice body = util() + .serialize(OptionsBuilder.build(options != null ? options : new CollectionCreateOptions(), name)); + + return request(name(), RequestType.POST, InternalArangoCollection.PATH_API_COLLECTION).setBody(body); } protected Request getCollectionsRequest(final CollectionsReadOptions options) { @@ -160,22 +142,22 @@ public Boolean deserialize(final Response response) throws VPackException { protected Request grantAccessRequest(final String user, final Permissions permissions) { return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, - name).setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + name).setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); } protected Request resetAccessRequest(final String user) { return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_USER, user, ArangoRequestParam.DATABASE, - name); + name); } protected Request updateUserDefaultCollectionAccessRequest(final String user, final Permissions permissions) { return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, - name, "*").setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + name, "*").setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); } protected Request getPermissionsRequest(final String user) { return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_USER, user, ArangoRequestParam.DATABASE, - name); + name); } protected ResponseDeserializer getPermissionsResponseDeserialzer() { @@ -195,58 +177,57 @@ public Permissions deserialize(final Response response) throws VPackException { } protected Request queryRequest( - final String query, - final Map bindVars, - final AqlQueryOptions options) { + final String query, final Map bindVars, final AqlQueryOptions options) { final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); - final Request request = request(name, RequestType.POST, PATH_API_CURSOR).setBody( - util().serialize(OptionsBuilder.build(opt, query, bindVars != null - ? util().serialize(bindVars, new ArangoSerializer.Options().serializeNullValues(true)) : null))); + final Request request = request(name, RequestType.POST, PATH_API_CURSOR).setBody(util().serialize(OptionsBuilder + .build(opt, query, bindVars != null ? + util().serialize(bindVars, new ArangoSerializer.Options().serializeNullValues(true)) : + null))); if (opt.getAllowDirtyRead() == Boolean.TRUE) { RequestUtils.allowDirtyRead(request); } + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); return request; } protected Request queryNextRequest(final String id, final AqlQueryOptions options, Map meta) { - + final Request request = request(name, RequestType.PUT, PATH_API_CURSOR, id); - - if(meta != null) { + + if (meta != null) { request.getHeaderParam().putAll(meta); } - + final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); - + if (opt.getAllowDirtyRead() == Boolean.TRUE) { RequestUtils.allowDirtyRead(request); } + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); return request; } protected Request queryCloseRequest(final String id, final AqlQueryOptions options, Map meta) { - + final Request request = request(name, RequestType.DELETE, PATH_API_CURSOR, id); - if(meta != null) { + if (meta != null) { request.getHeaderParam().putAll(meta); } - + final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); - + if (opt.getAllowDirtyRead() == Boolean.TRUE) { RequestUtils.allowDirtyRead(request); } - + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); return request; } protected Request explainQueryRequest( - final String query, - final Map bindVars, - final AqlQueryExplainOptions options) { + final String query, final Map bindVars, final AqlQueryExplainOptions options) { return request(name, RequestType.POST, PATH_API_EXPLAIN).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new AqlQueryExplainOptions(), query, bindVars))); + OptionsBuilder.build(options != null ? options : new AqlQueryExplainOptions(), query, bindVars))); } protected Request parseQueryRequest(final String query) { @@ -291,11 +272,9 @@ protected Request killQueryRequest(final String id) { } protected Request createAqlFunctionRequest( - final String name, - final String code, - final AqlFunctionCreateOptions options) { + final String name, final String code, final AqlFunctionCreateOptions options) { return request(name(), RequestType.POST, PATH_API_AQLFUNCTION).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new AqlFunctionCreateOptions(), name, code))); + OptionsBuilder.build(options != null ? options : new AqlFunctionCreateOptions(), name, code))); } protected Request deleteAqlFunctionRequest(final String name, final AqlFunctionDeleteOptions options) { @@ -320,7 +299,9 @@ public Integer deserialize(final Response response) throws VPackException { } } return count; - }; + } + + ; }; } @@ -346,11 +327,9 @@ public Collection deserialize(final Response response) throws } protected Request createGraphRequest( - final String name, - final Collection edgeDefinitions, - final GraphCreateOptions options) { + final String name, final Collection edgeDefinitions, final GraphCreateOptions options) { return request(name(), RequestType.POST, InternalArangoGraph.PATH_API_GHARIAL).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new GraphCreateOptions(), name, edgeDefinitions))); + OptionsBuilder.build(options != null ? options : new GraphCreateOptions(), name, edgeDefinitions))); } protected ResponseDeserializer createGraphResponseDeserializer() { @@ -378,7 +357,7 @@ public Collection deserialize(final Response response) throws VPack protected Request transactionRequest(final String action, final TransactionOptions options) { return request(name, RequestType.POST, PATH_API_TRANSACTION).setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new TransactionOptions(), action))); + util().serialize(OptionsBuilder.build(options != null ? options : new TransactionOptions(), action))); } protected ResponseDeserializer transactionResponseDeserializer(final Class type) { @@ -397,6 +376,33 @@ public T deserialize(final Response response) throws VPackException { }; } + protected Request beginStreamTransactionRequest(final StreamTransactionOptions options) { + return request(name, RequestType.POST, PATH_API_BEGIN_STREAM_TRANSACTION) + .setBody(util().serialize(options != null ? options : new StreamTransactionOptions())); + } + + protected Request abortStreamTransactionRequest(String id) { + return request(name, RequestType.DELETE, PATH_API_TRANSACTION, id); + } + + protected Request getStreamTransactionRequest(String id) { + return request(name, RequestType.GET, PATH_API_TRANSACTION, id); + } + + protected Request commitStreamTransactionRequest(String id) { + return request(name, RequestType.PUT, PATH_API_TRANSACTION, id); + } + + protected ResponseDeserializer streamTransactionResponseDeserializer() { + return new ResponseDeserializer() { + @Override + public StreamTransactionEntity deserialize(final Response response) throws VPackException { + return util() + .deserialize(response.getBody().get(ArangoResponseField.RESULT), StreamTransactionEntity.class); + } + }; + } + protected Request getInfoRequest() { return request(name, RequestType.GET, PATH_API_DATABASE, "current"); } @@ -417,8 +423,7 @@ protected Request executeTraversalRequest(final TraversalOptions options) { @SuppressWarnings("hiding") protected ResponseDeserializer> executeTraversalResponseDeserializer( - final Class vertexClass, - final Class edgeClass) { + final Class vertexClass, final Class edgeClass) { return new ResponseDeserializer>() { @Override public TraversalEntity deserialize(final Response response) throws VPackException { @@ -427,7 +432,7 @@ public TraversalEntity deserialize(final Response response) throws VPackEx result.setVertices(deserializeVertices(vertexClass, visited)); final Collection> paths = new ArrayList>(); - for (final Iterator iterator = visited.get("paths").arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = visited.get("paths").arrayIterator(); iterator.hasNext(); ) { final PathEntity path = new PathEntity(); final VPackSlice next = iterator.next(); path.setEdges(deserializeEdges(edgeClass, next)); @@ -444,7 +449,7 @@ public TraversalEntity deserialize(final Response response) throws VPackEx protected Collection deserializeVertices(final Class vertexClass, final VPackSlice vpack) throws VPackException { final Collection vertices = new ArrayList(); - for (final Iterator iterator = vpack.get("vertices").arrayIterator(); iterator.hasNext();) { + for (final Iterator iterator = vpack.get("vertices").arrayIterator(); iterator.hasNext(); ) { vertices.add((V) util(Serializer.CUSTOM).deserialize(iterator.next(), vertexClass)); } return vertices; @@ -454,7 +459,7 @@ protected Collection deserializeVertices(final Class vertexClass, fina protected Collection deserializeEdges(final Class edgeClass, final VPackSlice next) throws VPackException { final Collection edges = new ArrayList(); - for (final Iterator iteratorEdge = next.get("edges").arrayIterator(); iteratorEdge.hasNext();) { + for (final Iterator iteratorEdge = next.get("edges").arrayIterator(); iteratorEdge.hasNext(); ) { edges.add((E) util(Serializer.CUSTOM).deserialize(iteratorEdge.next(), edgeClass)); } return edges; @@ -486,6 +491,6 @@ protected Request createViewRequest(final String name, final ViewType type) { protected Request createArangoSearchRequest(final String name, final ArangoSearchCreateOptions options) { return request(name(), RequestType.POST, InternalArangoView.PATH_API_VIEW).setBody(util().serialize( - ArangoSearchOptionsBuilder.build(options != null ? options : new ArangoSearchCreateOptions(), name))); + ArangoSearchOptionsBuilder.build(options != null ? options : new ArangoSearchCreateOptions(), name))); } } diff --git a/src/main/java/com/arangodb/model/AqlQueryOptions.java b/src/main/java/com/arangodb/model/AqlQueryOptions.java index 21864f46f..dd50faec6 100644 --- a/src/main/java/com/arangodb/model/AqlQueryOptions.java +++ b/src/main/java/com/arangodb/model/AqlQueryOptions.java @@ -30,9 +30,9 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class AqlQueryOptions implements Serializable { @@ -48,6 +48,7 @@ public class AqlQueryOptions implements Serializable { private Options options; @Expose(serialize = false) private Boolean allowDirtyRead; + private String streamTransactionId; public AqlQueryOptions() { super(); @@ -58,11 +59,10 @@ public Boolean getCount() { } /** - * @param count - * indicates whether the number of documents in the result set should be returned in the "count" - * attribute of the result. Calculating the "count" attribute might have a performance impact for some - * queries in the future so this option is turned off by default, and "count" is only returned when - * requested. + * @param count indicates whether the number of documents in the result set should be returned in the "count" + * attribute of the result. Calculating the "count" attribute might have a performance impact for some + * queries in the future so this option is turned off by default, and "count" is only returned when + * requested. * @return options */ public AqlQueryOptions count(final Boolean count) { @@ -75,8 +75,7 @@ public Integer getTtl() { } /** - * @param ttl - * The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically + * @param ttl The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically * after the specified amount of time. This is useful to ensure garbage collection of cursors that are * not fully fetched by clients. If not set, a server-defined value will be used. * @return options @@ -91,10 +90,9 @@ public Integer getBatchSize() { } /** - * @param batchSize - * maximum number of result documents to be transferred from the server to the client in one roundtrip. - * If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 - * is disallowed. + * @param batchSize maximum number of result documents to be transferred from the server to the client in one roundtrip. + * If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 + * is disallowed. * @return options */ public AqlQueryOptions batchSize(final Integer batchSize) { @@ -107,12 +105,11 @@ public Long getMemoryLimit() { } /** - * @param memoryLimit - * the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the - * query will fail with error "resource limit exceeded" in case it allocates too much memory. A value of - * 0 indicates that there is no memory limit. - * @since ArangoDB 3.1.0 + * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the + * query will fail with error "resource limit exceeded" in case it allocates too much memory. A value of + * 0 indicates that there is no memory limit. * @return options + * @since ArangoDB 3.1.0 */ public AqlQueryOptions memoryLimit(final Long memoryLimit) { this.memoryLimit = memoryLimit; @@ -124,10 +121,9 @@ public Boolean getCache() { } /** - * @param cache - * flag to determine whether the AQL query cache shall be used. If set to false, then any query cache - * lookup will be skipped for the query. If set to true, it will lead to the query cache being checked - * for the query if the query cache mode is either on or demand. + * @param cache flag to determine whether the AQL query cache shall be used. If set to false, then any query cache + * lookup will be skipped for the query. If set to true, it will lead to the query cache being checked + * for the query if the query cache mode is either on or demand. * @return options */ public AqlQueryOptions cache(final Boolean cache) { @@ -140,8 +136,7 @@ protected VPackSlice getBindVars() { } /** - * @param bindVars - * key/value pairs representing the bind parameters + * @param bindVars key/value pairs representing the bind parameters * @return options */ protected AqlQueryOptions bindVars(final VPackSlice bindVars) { @@ -154,8 +149,7 @@ protected String getQuery() { } /** - * @param query - * the query which you want parse + * @param query the query which you want parse * @return options */ protected AqlQueryOptions query(final String query) { @@ -168,12 +162,11 @@ public Boolean getFailOnWarning() { } /** - * @param failOnWarning - * When set to true, the query will throw an exception and abort instead of producing a warning. This - * option should be used during development to catch potential issues early. When the attribute is set to - * false, warnings will not be propagated to exceptions and will be returned with the query result. There - * is also a server configuration option --query.fail-on-warning for setting the default value for - * failOnWarning so it does not need to be set on a per-query level. + * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a warning. This + * option should be used during development to catch potential issues early. When the attribute is set to + * false, warnings will not be propagated to exceptions and will be returned with the query result. There + * is also a server configuration option --query.fail-on-warning for setting the default value for + * failOnWarning so it does not need to be set on a per-query level. * @return options */ public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { @@ -183,16 +176,15 @@ public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { /** * @return If set to true, then the additional query profiling information will be returned in the sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. + * profile of the extra return attribute if the query result is not served from the query cache. */ public Boolean getProfile() { return options != null ? options.profile : null; } /** - * @param profile - * If set to true, then the additional query profiling information will be returned in the sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. + * @param profile If set to true, then the additional query profiling information will be returned in the sub-attribute + * profile of the extra return attribute if the query result is not served from the query cache. * @return options */ public AqlQueryOptions profile(final Boolean profile) { @@ -205,10 +197,9 @@ public Long getMaxTransactionSize() { } /** - * @param maxTransactionSize - * Transaction size limit in bytes. Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { getOptions().maxTransactionSize = maxTransactionSize; @@ -220,11 +211,10 @@ public Long getMaxWarningCount() { } /** - * @param maxWarningCount - * Limits the maximum number of warnings a query will return. The number of warnings a query will return - * is limited to 10 by default, but that number can be increased or decreased by setting this attribute. - * @since ArangoDB 3.2.0 + * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a query will return + * is limited to 10 by default, but that number can be increased or decreased by setting this attribute. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions maxWarningCount(final Long maxWarningCount) { getOptions().maxWarningCount = maxWarningCount; @@ -236,11 +226,10 @@ public Long getIntermediateCommitCount() { } /** - * @param intermediateCommitCount - * Maximum number of operations after which an intermediate commit is performed automatically. Honored by - * the RocksDB storage engine only. - * @since ArangoDB 3.2.0 + * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed automatically. Honored by + * the RocksDB storage engine only. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { getOptions().intermediateCommitCount = intermediateCommitCount; @@ -252,11 +241,10 @@ public Long getIntermediateCommitSize() { } /** - * @param intermediateCommitSize - * Maximum total size of operations after which an intermediate commit is performed automatically. - * Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 + * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed automatically. + * Honored by the RocksDB storage engine only. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { getOptions().intermediateCommitSize = intermediateCommitSize; @@ -268,12 +256,11 @@ public Double getSatelliteSyncWait() { } /** - * @param satelliteSyncWait - * This enterprise parameter allows to configure how long a DBServer will have time to bring the - * satellite collections involved in the query into sync. The default value is 60.0 (seconds). When the - * max time has been reached the query will be stopped. - * @since ArangoDB 3.2.0 + * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to bring the + * satellite collections involved in the query into sync. The default value is 60.0 (seconds). When the + * max time has been reached the query will be stopped. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { getOptions().satelliteSyncWait = satelliteSyncWait; @@ -285,15 +272,14 @@ public Boolean getSkipInaccessibleCollections() { } /** - * @param skipInaccessibleCollections - * AQL queries (especially graph traversals) will treat collection to which a user has no access rights - * as if these collections were empty. Instead of returning a forbidden access error, your queries will - * execute normally. This is intended to help with certain use-cases: A graph contains several - * collections and different users execute AQL queries on that graph. You can now naturally limit the - * accessible results by changing the access rights of users on collections. This feature is only - * available in the Enterprise Edition. - * @since ArangoDB 3.2.0 + * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a user has no access rights + * as if these collections were empty. Instead of returning a forbidden access error, your queries will + * execute normally. This is intended to help with certain use-cases: A graph contains several + * collections and different users execute AQL queries on that graph. You can now naturally limit the + * accessible results by changing the access rights of users on collections. This feature is only + * available in the Enterprise Edition. * @return options + * @since ArangoDB 3.2.0 */ public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { getOptions().skipInaccessibleCollections = skipInaccessibleCollections; @@ -305,16 +291,15 @@ public Boolean getFullCount() { } /** - * @param fullCount - * if set to true and the query contains a LIMIT clause, then the result will have an extra attribute - * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The - * fullCount attribute will contain the number of documents in the result before the last LIMIT in the - * query was applied. It can be used to count the number of documents that match certain filter criteria, - * but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. - * Note that setting the option will disable a few LIMIT optimizations and may lead to more documents - * being processed, and thus make queries run longer. Note that the fullCount attribute will only be - * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the - * query. + * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra attribute + * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The + * fullCount attribute will contain the number of documents in the result before the last LIMIT in the + * query was applied. It can be used to count the number of documents that match certain filter criteria, + * but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. + * Note that setting the option will disable a few LIMIT optimizations and may lead to more documents + * being processed, and thus make queries run longer. Note that the fullCount attribute will only be + * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the + * query. * @return options */ public AqlQueryOptions fullCount(final Boolean fullCount) { @@ -327,9 +312,7 @@ public Integer getMaxPlans() { } /** - * - * @param maxPlans - * Limits the maximum number of plans that are created by the AQL query optimizer. + * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer. * @return options */ public AqlQueryOptions maxPlans(final Integer maxPlans) { @@ -342,11 +325,9 @@ public Collection getRules() { } /** - * - * @param rules - * A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the - * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to enable - * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules + * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the + * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to enable + * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules * @return options */ public AqlQueryOptions rules(final Collection rules) { @@ -359,19 +340,17 @@ public Boolean getStream() { } /** - * - * @param stream - * Specify true and the query will be executed in a streaming fashion. The query result is not stored on - * the server, but calculated on the fly. Beware: long-running queries will need to hold the collection - * locks for as long as the query cursor exists. When set to false a query will be executed right away in - * its entirety. In that case query results are either returned right away (if the resultset is small - * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the - * ttl). It is advisable to only use this option on short-running queries or without exclusive locks - * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not work - * on streaming queries. Additionally query statistics, warnings and profiling data will only be - * available after the query is finished. The default value is false - * @since ArangoDB 3.4.0 + * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not stored on + * the server, but calculated on the fly. Beware: long-running queries will need to hold the collection + * locks for as long as the query cursor exists. When set to false a query will be executed right away in + * its entirety. In that case query results are either returned right away (if the resultset is small + * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the + * ttl). It is advisable to only use this option on short-running queries or without exclusive locks + * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not work + * on streaming queries. Additionally query statistics, warnings and profiling data will only be + * available after the query is finished. The default value is false * @return options + * @since ArangoDB 3.4.0 */ public AqlQueryOptions stream(final Boolean stream) { getOptions().stream = stream; @@ -384,7 +363,7 @@ public Collection getShardIds() { /** * Restrict query to shards by given ids. This is an internal option. Use at your own risk. - * + * * @param shardIds * @return options */ @@ -439,12 +418,11 @@ private static class Optimizer { } /** + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return options * @see API - * Documentation - * @param allowDirtyRead - * Set to {@code true} allows reading from followers in an active-failover setup. + * Documentation * @since ArangoDB 3.4.0 - * @return options */ public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) { this.allowDirtyRead = allowDirtyRead; @@ -455,4 +433,18 @@ public Boolean getAllowDirtyRead() { return allowDirtyRead; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public AqlQueryOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/CollectionCountOptions.java b/src/main/java/com/arangodb/model/CollectionCountOptions.java new file mode 100644 index 000000000..0104de3f5 --- /dev/null +++ b/src/main/java/com/arangodb/model/CollectionCountOptions.java @@ -0,0 +1,48 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Michele Rastelli + */ +public class CollectionCountOptions { + + private String streamTransactionId; + + public CollectionCountOptions() { + super(); + } + + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public CollectionCountOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/CollectionTruncateOptions.java b/src/main/java/com/arangodb/model/CollectionTruncateOptions.java new file mode 100644 index 000000000..c2f203220 --- /dev/null +++ b/src/main/java/com/arangodb/model/CollectionTruncateOptions.java @@ -0,0 +1,48 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Michele Rastelli + */ +public class CollectionTruncateOptions { + + private String streamTransactionId; + + public CollectionTruncateOptions() { + super(); + } + + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public CollectionTruncateOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/DocumentCreateOptions.java b/src/main/java/com/arangodb/model/DocumentCreateOptions.java index 93dad4897..7b30bc069 100644 --- a/src/main/java/com/arangodb/model/DocumentCreateOptions.java +++ b/src/main/java/com/arangodb/model/DocumentCreateOptions.java @@ -22,9 +22,9 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class DocumentCreateOptions { @@ -33,6 +33,7 @@ public class DocumentCreateOptions { private Boolean returnOld; private Boolean overwrite; private Boolean silent; + private String streamTransactionId; public DocumentCreateOptions() { super(); @@ -43,8 +44,7 @@ public Boolean getWaitForSync() { } /** - * @param waitForSync - * Wait until document has been synced to disk. + * @param waitForSync Wait until document has been synced to disk. * @return options */ public DocumentCreateOptions waitForSync(final Boolean waitForSync) { @@ -57,8 +57,7 @@ public Boolean getReturnNew() { } /** - * @param returnNew - * Return additionally the complete new document under the attribute new in the result. + * @param returnNew Return additionally the complete new document under the attribute new in the result. * @return options */ public DocumentCreateOptions returnNew(final Boolean returnNew) { @@ -71,11 +70,10 @@ public Boolean getReturnOld() { } /** - * @param returnOld - * Additionally return the complete old document under the attribute old in the result. Only available if - * the {@code overwrite} option is used. - * @since ArangoDB 3.4 + * @param returnOld Additionally return the complete old document under the attribute old in the result. Only + * available if the {@code overwrite} option is used. * @return options + * @since ArangoDB 3.4 */ public DocumentCreateOptions returnOld(final Boolean returnOld) { this.returnOld = returnOld; @@ -87,12 +85,11 @@ public Boolean getOverwrite() { } /** - * @param overwrite - * If set to true, the insert becomes a replace-insert. If a document with the same {@code _key} - * already exists the new document is not rejected with unique constraint violated but will replace the - * old document. - * @since ArangoDB 3.4 + * @param overwrite If set to true, the insert becomes a replace-insert. If a document with the same {@code _key} + * already exists the new document is not rejected with unique constraint violated but will replace + * the old document. * @return options + * @since ArangoDB 3.4 */ public DocumentCreateOptions overwrite(final Boolean overwrite) { this.overwrite = overwrite; @@ -104,9 +101,8 @@ public Boolean getSilent() { } /** - * @param silent - * If set to true, an empty object will be returned as response. No meta-data will be returned for the - * created document. This option can be used to save some network traffic. + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. * @return options */ public DocumentCreateOptions silent(final Boolean silent) { @@ -114,4 +110,18 @@ public DocumentCreateOptions silent(final Boolean silent) { return this; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentCreateOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/DocumentDeleteOptions.java b/src/main/java/com/arangodb/model/DocumentDeleteOptions.java index 6908af233..642654e39 100644 --- a/src/main/java/com/arangodb/model/DocumentDeleteOptions.java +++ b/src/main/java/com/arangodb/model/DocumentDeleteOptions.java @@ -22,9 +22,9 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class DocumentDeleteOptions { @@ -32,6 +32,7 @@ public class DocumentDeleteOptions { private String ifMatch; private Boolean returnOld; private Boolean silent; + private String streamTransactionId; public DocumentDeleteOptions() { super(); @@ -42,8 +43,7 @@ public Boolean getWaitForSync() { } /** - * @param waitForSync - * Wait until deletion operation has been synced to disk. + * @param waitForSync Wait until deletion operation has been synced to disk. * @return options */ public DocumentDeleteOptions waitForSync(final Boolean waitForSync) { @@ -56,8 +56,7 @@ public String getIfMatch() { } /** - * @param ifMatch - * remove a document based on a target revision + * @param ifMatch remove a document based on a target revision * @return options */ public DocumentDeleteOptions ifMatch(final String ifMatch) { @@ -70,9 +69,8 @@ public Boolean getReturnOld() { } /** - * @param returnOld - * Return additionally the complete previous revision of the changed document under the attribute old in - * the result. + * @param returnOld Return additionally the complete previous revision of the changed document under the attribute old in + * the result. * @return options */ public DocumentDeleteOptions returnOld(final Boolean returnOld) { @@ -85,9 +83,8 @@ public Boolean getSilent() { } /** - * @param silent - * If set to true, an empty object will be returned as response. No meta-data will be returned for the - * created document. This option can be used to save some network traffic. + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. * @return options */ public DocumentDeleteOptions silent(final Boolean silent) { @@ -95,4 +92,18 @@ public DocumentDeleteOptions silent(final Boolean silent) { return this; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentDeleteOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/DocumentExistsOptions.java b/src/main/java/com/arangodb/model/DocumentExistsOptions.java index 17d7784b5..b139f09cd 100644 --- a/src/main/java/com/arangodb/model/DocumentExistsOptions.java +++ b/src/main/java/com/arangodb/model/DocumentExistsOptions.java @@ -22,15 +22,16 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class DocumentExistsOptions { private String ifNoneMatch; private String ifMatch; private boolean catchException; + private String streamTransactionId; public DocumentExistsOptions() { super(); @@ -42,8 +43,7 @@ public String getIfNoneMatch() { } /** - * @param ifNoneMatch - * document revision must not contain If-None-Match + * @param ifNoneMatch document revision must not contain If-None-Match * @return options */ public DocumentExistsOptions ifNoneMatch(final String ifNoneMatch) { @@ -56,8 +56,7 @@ public String getIfMatch() { } /** - * @param ifMatch - * document revision must contain If-Match + * @param ifMatch document revision must contain If-Match * @return options */ public DocumentExistsOptions ifMatch(final String ifMatch) { @@ -70,8 +69,7 @@ public boolean isCatchException() { } /** - * @param catchException - * whether or not catch possible thrown exceptions + * @param catchException whether or not catch possible thrown exceptions * @return options */ public DocumentExistsOptions catchException(final boolean catchException) { @@ -79,4 +77,18 @@ public DocumentExistsOptions catchException(final boolean catchException) { return this; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentExistsOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/DocumentReadOptions.java b/src/main/java/com/arangodb/model/DocumentReadOptions.java index 7350792f7..d8a0d658c 100644 --- a/src/main/java/com/arangodb/model/DocumentReadOptions.java +++ b/src/main/java/com/arangodb/model/DocumentReadOptions.java @@ -24,7 +24,8 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli + * * @see API * Documentation */ @@ -35,6 +36,7 @@ public class DocumentReadOptions { private boolean catchException; @Expose(serialize = false) private Boolean allowDirtyRead; + private String streamTransactionId; public DocumentReadOptions() { super(); @@ -100,4 +102,18 @@ public Boolean getAllowDirtyRead() { return allowDirtyRead; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentReadOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/DocumentReplaceOptions.java b/src/main/java/com/arangodb/model/DocumentReplaceOptions.java index 0d046e54f..aa737b3db 100644 --- a/src/main/java/com/arangodb/model/DocumentReplaceOptions.java +++ b/src/main/java/com/arangodb/model/DocumentReplaceOptions.java @@ -22,7 +22,8 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli + * * @see API * Documentation */ @@ -34,6 +35,7 @@ public class DocumentReplaceOptions { private Boolean returnNew; private Boolean returnOld; private Boolean silent; + private String streamTransactionId; public DocumentReplaceOptions() { super(); @@ -127,4 +129,18 @@ public DocumentReplaceOptions silent(final Boolean silent) { return this; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentReplaceOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/DocumentUpdateOptions.java b/src/main/java/com/arangodb/model/DocumentUpdateOptions.java index e92e3021d..b495e5e3f 100644 --- a/src/main/java/com/arangodb/model/DocumentUpdateOptions.java +++ b/src/main/java/com/arangodb/model/DocumentUpdateOptions.java @@ -22,9 +22,9 @@ /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class DocumentUpdateOptions { @@ -37,6 +37,7 @@ public class DocumentUpdateOptions { private Boolean returnOld; private Boolean serializeNull; private Boolean silent; + private String streamTransactionId; public DocumentUpdateOptions() { super(); @@ -47,11 +48,10 @@ public Boolean getKeepNull() { } /** - * @param keepNull - * If the intention is to delete existing attributes with the patch command, the URL query parameter - * keepNull can be used with a value of false. This will modify the behavior of the patch command to - * remove any attributes from the existing document that are contained in the patch document with an - * attribute value of null. + * @param keepNull If the intention is to delete existing attributes with the patch command, the URL query parameter + * keepNull can be used with a value of false. This will modify the behavior of the patch command to + * remove any attributes from the existing document that are contained in the patch document with an + * attribute value of null. * @return options */ public DocumentUpdateOptions keepNull(final Boolean keepNull) { @@ -64,10 +64,9 @@ public Boolean getMergeObjects() { } /** - * @param mergeObjects - * Controls whether objects (not arrays) will be merged if present in both the existing and the patch - * document. If set to false, the value in the patch document will overwrite the existing document's - * value. If set to true, objects will be merged. The default is true. + * @param mergeObjects Controls whether objects (not arrays) will be merged if present in both the existing and the patch + * document. If set to false, the value in the patch document will overwrite the existing document's + * value. If set to true, objects will be merged. The default is true. * @return options */ public DocumentUpdateOptions mergeObjects(final Boolean mergeObjects) { @@ -80,8 +79,7 @@ public Boolean getWaitForSync() { } /** - * @param waitForSync - * Wait until document has been synced to disk. + * @param waitForSync Wait until document has been synced to disk. * @return options */ public DocumentUpdateOptions waitForSync(final Boolean waitForSync) { @@ -94,10 +92,9 @@ public Boolean getIgnoreRevs() { } /** - * @param ignoreRevs - * By default, or if this is set to true, the _rev attributes in the given document is ignored. If this - * is set to false, then the _rev attribute given in the body document is taken as a precondition. The - * document is only updated if the current revision is the one specified. + * @param ignoreRevs By default, or if this is set to true, the _rev attributes in the given document is ignored. If this + * is set to false, then the _rev attribute given in the body document is taken as a precondition. The + * document is only updated if the current revision is the one specified. * @return options */ public DocumentUpdateOptions ignoreRevs(final Boolean ignoreRevs) { @@ -110,8 +107,7 @@ public String getIfMatch() { } /** - * @param ifMatch - * update a document based on target revision + * @param ifMatch update a document based on target revision * @return options */ public DocumentUpdateOptions ifMatch(final String ifMatch) { @@ -124,8 +120,7 @@ public Boolean getReturnNew() { } /** - * @param returnNew - * Return additionally the complete new document under the attribute new in the result. + * @param returnNew Return additionally the complete new document under the attribute new in the result. * @return options */ public DocumentUpdateOptions returnNew(final Boolean returnNew) { @@ -138,9 +133,8 @@ public Boolean getReturnOld() { } /** - * @param returnOld - * Return additionally the complete previous revision of the changed document under the attribute old in - * the result. + * @param returnOld Return additionally the complete previous revision of the changed document under the attribute old in + * the result. * @return options */ public DocumentUpdateOptions returnOld(final Boolean returnOld) { @@ -153,10 +147,9 @@ public Boolean getSerializeNull() { } /** - * @param serializeNull - * By default, or if this is set to true, all fields of the document which have null values are - * serialized to VelocyPack otherwise they are excluded from serialization. Use this to update single - * fields from a stored document. + * @param serializeNull By default, or if this is set to true, all fields of the document which have null values are + * serialized to VelocyPack otherwise they are excluded from serialization. Use this to update single + * fields from a stored document. * @return options */ public DocumentUpdateOptions serializeNull(final Boolean serializeNull) { @@ -169,9 +162,8 @@ public Boolean getSilent() { } /** - * @param silent - * If set to true, an empty object will be returned as response. No meta-data will be returned for the - * created document. This option can be used to save some network traffic. + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. * @return options */ public DocumentUpdateOptions silent(final Boolean silent) { @@ -179,4 +171,18 @@ public DocumentUpdateOptions silent(final Boolean silent) { return this; } + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + * @since ArangoDB 3.5.0 + */ + public DocumentUpdateOptions streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return this; + } + } diff --git a/src/main/java/com/arangodb/model/StreamTransactionOptions.java b/src/main/java/com/arangodb/model/StreamTransactionOptions.java new file mode 100644 index 000000000..4e3307275 --- /dev/null +++ b/src/main/java/com/arangodb/model/StreamTransactionOptions.java @@ -0,0 +1,123 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.5.0 + */ +public class StreamTransactionOptions { + + private final TransactionCollectionOptions collections; + private Integer lockTimeout; + private Boolean waitForSync; + private Long maxTransactionSize; + + public StreamTransactionOptions() { + super(); + collections = new TransactionCollectionOptions(); + } + + public Integer getLockTimeout() { + return lockTimeout; + } + + /** + * @param lockTimeout an optional numeric value that can be used to set a timeout for waiting on collection locks. If not + * specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out + * waiting for a lock. + * @return options + */ + public StreamTransactionOptions lockTimeout(final Integer lockTimeout) { + this.lockTimeout = lockTimeout; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync an optional boolean flag that, if set, will force the transaction to write all data to disk before + * returning + * @return options + */ + public StreamTransactionOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + /** + * @param read contains the array of collection-names to be used in the transaction (mandatory) for read + * @return options + */ + public StreamTransactionOptions readCollections(final String... read) { + collections.read(read); + return this; + } + + /** + * @param write contains the array of collection-names to be used in the transaction (mandatory) for write + * @return options + */ + public StreamTransactionOptions writeCollections(final String... write) { + collections.write(write); + return this; + } + + /** + * @param exclusive contains the array of collection-names to be used in the transaction (mandatory) for exclusive write + * @return options + */ + public StreamTransactionOptions exclusiveCollections(final String... exclusive) { + collections.exclusive(exclusive); + return this; + } + + /** + * @param allowImplicit Collections that will be written to in the transaction must be declared with the write attribute or it + * will fail, whereas non-declared collections from which is solely read will be added lazily. The + * optional attribute allowImplicit can be set to false to let transactions fail in case of undeclared + * collections for reading. Collections for reading should be fully declared if possible, to avoid + * deadlocks. + * @return options + */ + public StreamTransactionOptions allowImplicit(final Boolean allowImplicit) { + collections.allowImplicit(allowImplicit); + return this; + } + + public Long getMaxTransactionSize() { + return maxTransactionSize; + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return options + */ + public StreamTransactionOptions maxTransactionSize(final Long maxTransactionSize) { + this.maxTransactionSize = maxTransactionSize; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/TransactionCollectionOptions.java b/src/main/java/com/arangodb/model/TransactionCollectionOptions.java new file mode 100644 index 000000000..551b87698 --- /dev/null +++ b/src/main/java/com/arangodb/model/TransactionCollectionOptions.java @@ -0,0 +1,73 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class TransactionCollectionOptions { + + private Collection read; + private Collection write; + private Collection exclusive; + private Boolean allowImplicit; + + public Collection getRead() { + return read; + } + + public TransactionCollectionOptions read(final String... read) { + this.read = Arrays.asList(read); + return this; + } + + public Collection getWrite() { + return write; + } + + public TransactionCollectionOptions write(final String... write) { + this.write = Arrays.asList(write); + return this; + } + + public Collection getExclusive() { + return exclusive; + } + + public TransactionCollectionOptions exclusive(final String... exclusive) { + this.exclusive = Arrays.asList(exclusive); + return this; + } + + public Boolean getAllowImplicit() { + return allowImplicit; + } + + public TransactionCollectionOptions allowImplicit(final Boolean allowImplicit) { + this.allowImplicit = allowImplicit; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/TransactionOptions.java b/src/main/java/com/arangodb/model/TransactionOptions.java index 4bf7b19f2..99d4e0c60 100644 --- a/src/main/java/com/arangodb/model/TransactionOptions.java +++ b/src/main/java/com/arangodb/model/TransactionOptions.java @@ -20,14 +20,11 @@ package com.arangodb.model; -import java.util.Arrays; -import java.util.Collection; - /** * @author Mark Vollmary - * + * @author Michele Rastelli * @see API - * Documentation + * Documentation */ public class TransactionOptions { @@ -37,8 +34,6 @@ public class TransactionOptions { private Integer lockTimeout; private Boolean waitForSync; private Long maxTransactionSize; - private Long intermediateCommitCount; - private Long intermediateCommitSize; public TransactionOptions() { super(); @@ -50,8 +45,7 @@ protected String getAction() { } /** - * @param action - * the actual transaction operations to be executed, in the form of stringified JavaScript code + * @param action the actual transaction operations to be executed, in the form of stringified JavaScript code * @return options */ protected TransactionOptions action(final String action) { @@ -64,8 +58,7 @@ public Object getParams() { } /** - * @param params - * optional arguments passed to action + * @param params optional arguments passed to action * @return options */ public TransactionOptions params(final Object params) { @@ -78,10 +71,9 @@ public Integer getLockTimeout() { } /** - * @param lockTimeout - * an optional numeric value that can be used to set a timeout for waiting on collection locks. If not - * specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out - * waiting for a lock. + * @param lockTimeout an optional numeric value that can be used to set a timeout for waiting on collection locks. If not + * specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out + * waiting for a lock. * @return options */ public TransactionOptions lockTimeout(final Integer lockTimeout) { @@ -94,9 +86,8 @@ public Boolean getWaitForSync() { } /** - * @param waitForSync - * an optional boolean flag that, if set, will force the transaction to write all data to disk before - * returning + * @param waitForSync an optional boolean flag that, if set, will force the transaction to write all data to disk before + * returning * @return options */ public TransactionOptions waitForSync(final Boolean waitForSync) { @@ -105,8 +96,7 @@ public TransactionOptions waitForSync(final Boolean waitForSync) { } /** - * @param read - * contains the array of collection-names to be used in the transaction (mandatory) for read + * @param read contains the array of collection-names to be used in the transaction (mandatory) for read * @return options */ public TransactionOptions readCollections(final String... read) { @@ -115,8 +105,7 @@ public TransactionOptions readCollections(final String... read) { } /** - * @param write - * contains the array of collection-names to be used in the transaction (mandatory) for write + * @param write contains the array of collection-names to be used in the transaction (mandatory) for write * @return options */ public TransactionOptions writeCollections(final String... write) { @@ -125,10 +114,9 @@ public TransactionOptions writeCollections(final String... write) { } /** - * @param exclusive - * contains the array of collection-names to be used in the transaction (mandatory) for exclusive write - * @since ArangoDB 3.4.0 + * @param exclusive contains the array of collection-names to be used in the transaction (mandatory) for exclusive write * @return options + * @since ArangoDB 3.4.0 */ public TransactionOptions exclusiveCollections(final String... exclusive) { collections.exclusive(exclusive); @@ -136,12 +124,11 @@ public TransactionOptions exclusiveCollections(final String... exclusive) { } /** - * @param allowImplicit - * Collections that will be written to in the transaction must be declared with the write attribute or it - * will fail, whereas non-declared collections from which is solely read will be added lazily. The - * optional attribute allowImplicit can be set to false to let transactions fail in case of undeclared - * collections for reading. Collections for reading should be fully declared if possible, to avoid - * deadlocks. + * @param allowImplicit Collections that will be written to in the transaction must be declared with the write attribute or it + * will fail, whereas non-declared collections from which is solely read will be added lazily. The + * optional attribute allowImplicit can be set to false to let transactions fail in case of undeclared + * collections for reading. Collections for reading should be fully declared if possible, to avoid + * deadlocks. * @return options */ public TransactionOptions allowImplicit(final Boolean allowImplicit) { @@ -154,91 +141,13 @@ public Long getMaxTransactionSize() { } /** - * @param maxTransactionSize - * Transaction size limit in bytes. Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. * @return options + * @since ArangoDB 3.2.0 */ public TransactionOptions maxTransactionSize(final Long maxTransactionSize) { this.maxTransactionSize = maxTransactionSize; return this; } - public Long getIntermediateCommitCount() { - return intermediateCommitCount; - } - - /** - * @param intermediateCommitCount - * Maximum number of operations after which an intermediate commit is performed automatically. Honored by - * the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public TransactionOptions intermediateCommitCount(final Long intermediateCommitCount) { - this.intermediateCommitCount = intermediateCommitCount; - return this; - } - - public Long getIntermediateCommitSize() { - return intermediateCommitSize; - } - - /** - * @param intermediateCommitSize - * Maximum total size of operations after which an intermediate commit is performed automatically. - * Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public TransactionOptions intermediateCommitSize(final Long intermediateCommitSize) { - this.intermediateCommitSize = intermediateCommitSize; - return this; - } - - public static class TransactionCollectionOptions { - - private Collection read; - private Collection write; - private Collection exclusive; - private Boolean allowImplicit; - - public Collection getRead() { - return read; - } - - public TransactionCollectionOptions read(final String... read) { - this.read = Arrays.asList(read); - return this; - } - - public Collection getWrite() { - return write; - } - - public TransactionCollectionOptions write(final String... write) { - this.write = Arrays.asList(write); - return this; - } - - public Collection getExclusive() { - return exclusive; - } - - public TransactionCollectionOptions exclusive(final String... exclusive) { - this.exclusive = Arrays.asList(exclusive); - return this; - } - - public Boolean getAllowImplicit() { - return allowImplicit; - } - - public TransactionCollectionOptions allowImplicit(final Boolean allowImplicit) { - this.allowImplicit = allowImplicit; - return this; - } - - } - } diff --git a/src/test/java/com/arangodb/ArangoDatabaseTest.java b/src/test/java/com/arangodb/ArangoDatabaseTest.java index ab8447f2f..7df9cb947 100644 --- a/src/test/java/com/arangodb/ArangoDatabaseTest.java +++ b/src/test/java/com/arangodb/ArangoDatabaseTest.java @@ -20,1518 +20,1497 @@ package com.arangodb; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.AqlExecutionExplainEntity; +import com.arangodb.entity.*; import com.arangodb.entity.AqlExecutionExplainEntity.ExecutionPlan; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.AqlParseEntity; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CollectionPropertiesEntity; -import com.arangodb.entity.CollectionType; import com.arangodb.entity.CursorEntity.Warning; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.License; -import com.arangodb.entity.PathEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryExecutionState; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.ShardingStrategy; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; +import com.arangodb.model.*; import com.arangodb.model.TraversalOptions.Direction; import com.arangodb.util.MapBuilder; import com.arangodb.velocypack.VPackBuilder; import com.arangodb.velocypack.VPackSlice; import com.arangodb.velocypack.ValueType; import com.arangodb.velocypack.exception.VPackException; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.*; +import static org.junit.Assert.*; /** * @author Mark Vollmary - * + * @author Michele Rastelli */ @RunWith(Parameterized.class) public class ArangoDatabaseTest extends BaseTest { - - Logger LOG = LoggerFactory.getLogger(ArangoDatabaseTest.class); - - private static final String COLLECTION_NAME = "db_test"; - private static final String GRAPH_NAME = "graph_test"; - - public ArangoDatabaseTest(final Builder builder) { - super(builder); - } - - @Before - public void setUp() { - try { - ArangoCollection c = db.collection(COLLECTION_NAME); - c.drop(); - } catch (final ArangoDBException e) { - } - - try { - ArangoCollection c = db.collection(COLLECTION_NAME + "1"); - c.drop(); - } catch (final ArangoDBException e) { - } - - try { - ArangoCollection c = db.collection(COLLECTION_NAME + "2"); - c.drop(); - } catch (final ArangoDBException e) { - } - - try { - ArangoCollection c = db.collection(COLLECTION_NAME + "edge"); - c.drop(); - } catch (final ArangoDBException e) { - } - - try { - ArangoCollection c = db.collection(COLLECTION_NAME + "from"); - c.drop(); - } catch (final ArangoDBException e) { - } - - try { - ArangoCollection c = db.collection(COLLECTION_NAME + "to"); - c.drop(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void create() { - try { - final Boolean result = arangoDB.db(BaseTest.TEST_DB + "_1").create(); - assertThat(result, is(true)); - } finally { - try { - arangoDB.db(BaseTest.TEST_DB + "_1").drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void getVersion() { - final ArangoDBVersion version = db.getVersion(); - assertThat(version, is(notNullValue())); - assertThat(version.getServer(), is(notNullValue())); - assertThat(version.getVersion(), is(notNullValue())); - } - - @Test - public void exists() { - assertThat(db.exists(), is(true)); - assertThat(arangoDB.db("no").exists(), is(false)); - } - - @Test - public void getAccessibleDatabases() { - final Collection dbs = db.getAccessibleDatabases(); - assertThat(dbs, is(notNullValue())); - assertThat(dbs.size(), greaterThan(0)); - assertThat(dbs, hasItem("_system")); - } - - @Test - public void createCollection() { - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, null); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithReplicationFactor() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().replicationFactor(2)); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(2)); - assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(nullValue())); - } catch (final ArangoDBException e) { - e.printStackTrace(); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - - } - - @Test - public void createCollectionWithMinReplicationFactor() { - - // if we do not have version at least 3.5+ => exit - if (!requireVersion(3, 5)) { - LOG.info("Skip Test 'createCollectionWithMinReplicationFactor' because feature not implemented yet."); - return; - } - - // if we do not have a cluster => exit - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().replicationFactor(2).minReplicationFactor(2)); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(2)); - assertThat(db.collection(COLLECTION_NAME).getProperties().getMinReplicationFactor(), is(2)); - assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(nullValue())); - db.collection(COLLECTION_NAME).drop(); - } catch (final ArangoDBException e) { - e.printStackTrace(); - } - - } - - - @Test - public void createSatelliteCollection() { - if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { - LOG.info("Skip Test 'createSatelliteCollection' on COMMUNITY VERSION"); - return; - } - - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createSatelliteCollection' on SINGLE SERVER"); - return; - } - - try { - - final CollectionEntity result = db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().satellite(true)); - - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(nullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(true)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithNumberOfShards() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - - try { - - final CollectionEntity result = db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(2)); - - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(2)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithShardingStrategys() { - if (!requireVersion(3, 4)) { - LOG.info("Skip Test 'createCollectionWithShardingStrategys' because feature not implemented yet."); - return; - } - - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createCollectionWithShardingStrategys' on SINGLE SERVER"); - return; - } - - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().shardingStrategy(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())); - - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getShardingStrategy(), is(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())); - - } catch (ArangoDBException e) { - System.out.println(e); - assertTrue(false); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithSmartJoinAttribute() { - if (!requireVersion(3, 5)) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' because feature not implemented yet."); - return; - } - - if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' on COMMUNITY SERVER"); - return; - } - - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' on SINGLE SERVER"); - return; - } - - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().smartJoinAttribute("test123").shardKeys("_key:")); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getSmartJoinAttribute(), is("test123")); - - } catch (Exception e) { - System.out.println(e); - assertTrue(false); - } finally { - try { - db.collection(COLLECTION_NAME).drop(); - } catch (Exception e) { - System.out.println(e); - } - } - } - - @Test - public void createCollectionWithSmartJoinAttributeWrong() { - if (!requireVersion(3, 5)) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' because feature not implemented yet."); - return; - } - - if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' on COMMUNITY SERVER"); - return; - } - - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' on SINGLE SERVER"); - return; - } - - try { - db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().smartJoinAttribute("test123")); - } catch (ArangoDBException e) { - assertThat(e.getErrorNum(), is(4006)); - assertThat(e.getResponseCode(), is(500)); - } finally { - try { - db.collection(COLLECTION_NAME).drop(); - assertTrue(false); - } catch (Exception e) { - System.out.println(e); - } - } - } - - @Test - public void createCollectionWithNumberOfShardsAndShardKey() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createCollectionWithNumberOfShardsAndShardKey' on SINGLE SERVER"); - return; - } - - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().numberOfShards(2).shardKeys("a")); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); - assertThat(properties.getNumberOfShards(), is(2)); - assertThat(properties.getShardKeys().size(), is(1)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithNumberOfShardsAndShardKeys() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - LOG.info("Skip Test 'createCollectionWithNumberOfShardsAndShardKeys' on SINGLE SERVER"); - return; - } - - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().numberOfShards(2).shardKeys("a", "b")); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); - assertThat(properties.getNumberOfShards(), is(2)); - assertThat(properties.getShardKeys().size(), is(2)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithDistributeShardsLike() { - - if (arangoDB.getVersion().getLicense() == License.ENTERPRISE && arangoDB.getRole() != ServerRole.SINGLE) { - - - final Integer numberOfShards = 3; - - db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(numberOfShards)); - db.createCollection(COLLECTION_NAME + "2", new CollectionCreateOptions().distributeShardsLike(COLLECTION_NAME)); - - assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(numberOfShards)); - assertThat(db.collection(COLLECTION_NAME + "2").getProperties().getNumberOfShards(), is(numberOfShards)); - } - - } - - @Test - public void deleteCollection() { - db.createCollection(COLLECTION_NAME, null); - db.collection(COLLECTION_NAME).drop(); - try { - db.collection(COLLECTION_NAME).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void deleteSystemCollection() { - - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - - final String name = "_system_test"; - db.createCollection(name, new CollectionCreateOptions().isSystem(true)); - db.collection(name).drop(true); - try { - db.collection(name).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void deleteSystemCollectionFail() { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - final String name = "_system_test"; - db.createCollection(name, new CollectionCreateOptions().isSystem(true)); - try { - db.collection(name).drop(); - fail(); - } catch (final ArangoDBException e) { - } - db.collection(name).drop(true); - try { - db.collection(name).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void getIndex() { - try { - db.createCollection(COLLECTION_NAME, null); - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final IndexEntity readResult = db.getIndex(createResult.getId()); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getType(), is(createResult.getType())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void deleteIndex() { - try { - db.createCollection(COLLECTION_NAME, null); - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final String id = db.deleteIndex(createResult.getId()); - assertThat(id, is(createResult.getId())); - try { - db.getIndex(id); - fail(); - } catch (final ArangoDBException e) { - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void getCollections() { - try { - final Collection systemCollections = db.getCollections(null); - db.createCollection(COLLECTION_NAME + "1", null); - db.createCollection(COLLECTION_NAME + "2", null); - final Collection collections = db.getCollections(null); - assertThat(collections.size(), is(2 + systemCollections.size())); - assertThat(collections, is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME + "1").drop(); - db.collection(COLLECTION_NAME + "2").drop(); - } - } - - @Test - public void getCollectionsExcludeSystem() { - try { - final CollectionsReadOptions options = new CollectionsReadOptions().excludeSystem(true); - final Collection nonSystemCollections = db.getCollections(options); - - assertThat(nonSystemCollections.size(), is(0)); - db.createCollection(COLLECTION_NAME + "1", null); - db.createCollection(COLLECTION_NAME + "2", null); - final Collection newCollections = db.getCollections(options); - assertThat(newCollections.size(), is(2)); - assertThat(newCollections, is(notNullValue())); - } catch (final ArangoDBException e) { - System.out.println(e.getErrorMessage()); - } finally { - try { - db.collection(COLLECTION_NAME + "1").drop(); - db.collection(COLLECTION_NAME + "2").drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void grantAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessRW() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.RW); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessRO() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.RO); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessNONE() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.NONE); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void grantAccessUserNotFound() { - db.grantAccess("user1", Permissions.RW); - } - - @Test - public void revokeAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.revokeAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void revokeAccessUserNotFound() { - db.revokeAccess("user1"); - } - - @Test - public void resetAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.resetAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void resetAccessUserNotFound() { - db.resetAccess("user1"); - } - - @Test - public void grantDefaultCollectionAccess() { - try { - arangoDB.createUser("user1", "1234"); - db.grantDefaultCollectionAccess("user1", Permissions.RW); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void getPermissions() { - assertThat(Permissions.RW, is(db.getPermissions("root"))); - } - - @Test - public void query() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryForEach() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryIterate() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithCount() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test Limit 6 return i._id", null, - new AqlQueryOptions().count(true), String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 6; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 6)); - } - assertThat(cursor.getCount(), is(6)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithLimitAndFullCount() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test Limit 5 return i._id", null, - new AqlQueryOptions().fullCount(true), String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 5; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 5)); - } - assertThat(cursor.getStats(), is(notNullValue())); - assertThat(cursor.getStats().getFullCount(), is(10L)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithBatchSize() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).count(true), String.class); - - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - } - } catch (final ArangoDBException e) { - System.out.println(e.getErrorMessage()); - System.out.println(e.getErrorNum()); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryIterateWithBatchSize() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).count(true), String.class); - - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - /** - * ignored. takes to long - */ - @Test - @Ignore - public void queryWithTTL() throws InterruptedException { - // set TTL to 1 seconds and get the second batch after 2 seconds! - final int ttl = 1; - final int wait = 2; - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).ttl(ttl), String.class); - - assertThat(cursor, is(notNullValue())); - - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - if (i == 1) { - Thread.sleep(wait * 1000); - } - } - fail("this should fail"); - } catch (final ArangoDBException ex) { - assertThat(ex.getMessage(), is("Response: 404, Error: 1600 - cursor not found")); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void changeQueryCache() { - try { - QueryCachePropertiesEntity properties = db.getQueryCacheProperties(); - assertThat(properties, is(notNullValue())); - assertThat(properties.getMode(), is(CacheMode.off)); - assertThat(properties.getMaxResults(), greaterThan(0L)); - - properties.setMode(CacheMode.on); - properties = db.setQueryCacheProperties(properties); - assertThat(properties, is(notNullValue())); - assertThat(properties.getMode(), is(CacheMode.on)); - - properties = db.getQueryCacheProperties(); - assertThat(properties.getMode(), is(CacheMode.on)); - } finally { - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.off); - db.setQueryCacheProperties(properties); - } - } - - @Test - public void queryWithCache() throws InterruptedException { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.on); - db.setQueryCacheProperties(properties); - - final ArangoCursor cursor = db.query("FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", - null, new AqlQueryOptions().cache(true), String.class); - - assertThat(cursor, is(notNullValue())); - assertThat(cursor.isCached(), is(false)); - - final ArangoCursor cachedCursor = db.query( - "FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", null, new AqlQueryOptions().cache(true), - String.class); - - assertThat(cachedCursor, is(notNullValue())); - assertThat(cachedCursor.isCached(), is(true)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.off); - db.setQueryCacheProperties(properties); - } - } - - @Test - public void queryWithMemoryLimit() { - try { - db.query("RETURN 'bla'", null, new AqlQueryOptions().memoryLimit(1L), String.class); - fail(); - } catch (final ArangoDBException e) { - assertThat(e.getErrorNum(), is(32)); - } - } - - @Test(expected = ArangoDBException.class) - public void queryWithFailOnWarningTrue() { - db.query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(true), String.class); - } - - @Test - public void queryWithFailOnWarningFalse() { - final ArangoCursor cursor = db.query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(false), - String.class); - assertThat(cursor.next(), is("null")); - } - - @Test - public void queryWithMaxWarningCount() { - final ArangoCursor cursorWithWarnings = db.query("RETURN 1 / 0", null, new AqlQueryOptions(), - String.class); - assertThat(cursorWithWarnings.getWarnings().size(), is(1)); - final ArangoCursor cursorWithLimitedWarnings = db.query("RETURN 1 / 0", null, - new AqlQueryOptions().maxWarningCount(0L), String.class); - final Collection warnings = cursorWithLimitedWarnings.getWarnings(); - if (warnings != null) { - assertThat(warnings.size(), is(0)); - } - } - - @Test - public void queryCursor() { - try { - db.createCollection(COLLECTION_NAME, null); - final int numbDocs = 10; - for (int i = 0; i < numbDocs; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final int batchSize = 5; - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(batchSize).count(true), String.class); - assertThat(cursor, is(notNullValue())); - assertThat(cursor.getCount(), is(numbDocs)); - - final ArangoCursor cursor2 = db.cursor(cursor.getId(), String.class); - assertThat(cursor2, is(notNullValue())); - assertThat(cursor2.getCount(), is(numbDocs)); - assertThat(cursor2.hasNext(), is(true)); - - for (int i = 0; i < batchSize; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != batchSize)); - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void changeQueryTrackingProperties() { - try { - QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); - assertThat(properties, is(notNullValue())); - assertThat(properties.getEnabled(), is(true)); - assertThat(properties.getTrackSlowQueries(), is(true)); - assertThat(properties.getMaxQueryStringLength(), greaterThan(0L)); - assertThat(properties.getMaxSlowQueries(), greaterThan(0L)); - assertThat(properties.getSlowQueryThreshold(), greaterThan(0L)); - properties.setEnabled(false); - properties = db.setQueryTrackingProperties(properties); - assertThat(properties, is(notNullValue())); - assertThat(properties.getEnabled(), is(false)); - properties = db.getQueryTrackingProperties(); - assertThat(properties.getEnabled(), is(false)); - } finally { - final QueryTrackingPropertiesEntity properties = new QueryTrackingPropertiesEntity(); - properties.setEnabled(true); - db.setQueryTrackingProperties(properties); - } - } - - @Test - public void queryWithBindVars() throws InterruptedException { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - final BaseDocument baseDocument = new BaseDocument(); - baseDocument.addAttribute("age", 20 + i); - db.collection(COLLECTION_NAME).insertDocument(baseDocument, null); - } - final Map bindVars = new HashMap(); - bindVars.put("@coll", COLLECTION_NAME); - bindVars.put("age", 25); - - final ArangoCursor cursor = db.query("FOR t IN @@coll FILTER t.age >= @age SORT t.age RETURN t._id", - bindVars, null, String.class); - - assertThat(cursor, is(notNullValue())); - - for (int i = 0; i < 5; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 5)); - } - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithWarning() { - final ArangoCursor cursor = arangoDB.db().query("return 1/0", null, null, String.class); - - assertThat(cursor, is(notNullValue())); - assertThat(cursor.getWarnings(), is(notNullValue())); - } - - @Test - public void queryStream() { - if (requireVersion(3, 4)) { - final ArangoCursor cursor = db.query("FOR i IN 1..2 RETURN i", null, - new AqlQueryOptions().stream(true).count(true), VPackSlice.class); - assertThat(cursor, is(notNullValue())); - assertThat(cursor.getCount(), is(nullValue())); - } - } - - @Test - public void queryClose() throws IOException { - final ArangoCursor cursor = arangoDB.db().query("for i in 1..2 return i", null, - new AqlQueryOptions().batchSize(1), String.class); - cursor.close(); - int count = 0; - try { - for (; cursor.hasNext(); cursor.next(), count++) { - } - fail(); - } catch (final ArangoDBException e) { - assertThat(count, is(1)); - } - - } - - @Test - public void queryNoResults() throws IOException { - try { - db.createCollection(COLLECTION_NAME); - final ArangoCursor cursor = db.query("FOR i IN @@col RETURN i", - new MapBuilder().put("@col", COLLECTION_NAME).get(), null, BaseDocument.class); - cursor.close(); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithNullBindParam() throws IOException { - try { - db.createCollection(COLLECTION_NAME); - final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", - new MapBuilder().put("@col", COLLECTION_NAME).put("test", null).get(), null, BaseDocument.class); - cursor.close(); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryAllowDirtyRead() throws IOException { - try { - db.createCollection(COLLECTION_NAME); - final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", - new MapBuilder().put("@col", COLLECTION_NAME).put("test", null).get(), - new AqlQueryOptions().allowDirtyRead(true), BaseDocument.class); - cursor.close(); - } catch (ArangoDBException e) { - System.out.println(e); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void explainQuery() { - final AqlExecutionExplainEntity explain = arangoDB.db().explainQuery("for i in 1..1 return i", null, null); - assertThat(explain, is(notNullValue())); - assertThat(explain.getPlan(), is(notNullValue())); - assertThat(explain.getPlans(), is(nullValue())); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections().size(), is(0)); - assertThat(plan.getEstimatedCost(), greaterThan(0)); - assertThat(plan.getEstimatedNrItems(), greaterThan(0)); - assertThat(plan.getVariables().size(), is(2)); - assertThat(plan.getNodes().size(), is(greaterThan(0))); - } - - @Test - public void parseQuery() { - final AqlParseEntity parse = arangoDB.db().parseQuery("for i in 1..1 return i"); - assertThat(parse, is(notNullValue())); - assertThat(parse.getBindVars(), is(empty())); - assertThat(parse.getCollections().size(), is(0)); - assertThat(parse.getAst().size(), is(1)); - } - - @Test - @Ignore - public void getCurrentlyRunningQueries() throws InterruptedException, ExecutionException { - final Thread t = new Thread() { - @Override - public void run() { - super.run(); - db.query("return sleep(0.2)", null, null, Void.class); - } - }; - t.start(); - Thread.sleep(100); - try { - final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); - assertThat(currentlyRunningQueries, is(notNullValue())); - assertThat(currentlyRunningQueries.size(), is(1)); - final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); - assertThat(queryEntity.getQuery(), is("return sleep(0.2)")); - assertThat(queryEntity.getState(), is(QueryExecutionState.EXECUTING)); - } finally { - t.join(); - } - } - - @Test - @Ignore - public void getAndClearSlowQueries() throws InterruptedException, ExecutionException { - final QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); - final Long slowQueryThreshold = properties.getSlowQueryThreshold(); - try { - properties.setSlowQueryThreshold(1L); - db.setQueryTrackingProperties(properties); - - db.query("return sleep(1.1)", null, null, Void.class); - final Collection slowQueries = db.getSlowQueries(); - assertThat(slowQueries, is(notNullValue())); - assertThat(slowQueries.size(), is(1)); - final QueryEntity queryEntity = slowQueries.iterator().next(); - assertThat(queryEntity.getQuery(), is("return sleep(1.1)")); - - db.clearSlowQueries(); - assertThat(db.getSlowQueries().size(), is(0)); - } finally { - properties.setSlowQueryThreshold(slowQueryThreshold); - db.setQueryTrackingProperties(properties); - } - } - - @Test - @Ignore - public void killQuery() throws InterruptedException, ExecutionException { - final Thread t = new Thread() { - @Override - public void run() { - super.run(); - try { - db.query("return sleep(0.2)", null, null, Void.class); - fail(); - } catch (final ArangoDBException e) { - } - } - }; - t.start(); - Thread.sleep(100); - final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); - assertThat(currentlyRunningQueries, is(notNullValue())); - assertThat(currentlyRunningQueries.size(), is(1)); - - final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); - db.killQuery(queryEntity.getId()); - } - - @Test - public void createGetDeleteAqlFunction() { - final Collection aqlFunctionsInitial = db.getAqlFunctions(null); - assertThat(aqlFunctionsInitial, is(empty())); - try { - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit", - "function (celsius) { return celsius * 1.8 + 32; }", null); - - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(greaterThan(aqlFunctionsInitial.size()))); - } finally { - final Integer deleteCount = db.deleteAqlFunction("myfunctions::temperature::celsiustofahrenheit", null); - // compatibility with ArangoDB < 3.4 - if (requireVersion(3, 4)) { - assertThat(deleteCount, is(1)); - } else { - assertThat(deleteCount, is(nullValue())); - } - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); - } - } - - @Test - public void createGetDeleteAqlFunctionWithNamespace() { - final Collection aqlFunctionsInitial = db.getAqlFunctions(null); - assertThat(aqlFunctionsInitial, is(empty())); - try { - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit1", - "function (celsius) { return celsius * 1.8 + 32; }", null); - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit2", - "function (celsius) { return celsius * 1.8 + 32; }", null); - - } finally { - final Integer deleteCount = db.deleteAqlFunction("myfunctions::temperature", - new AqlFunctionDeleteOptions().group(true)); - // compatibility with ArangoDB < 3.4 - if (requireVersion(3, 4)) { - assertThat(deleteCount, is(2)); - } else { - assertThat(deleteCount, is(nullValue())); - } - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); - } - } - - @Test - public void createGraph() { - try { - final GraphEntity result = db.createGraph(GRAPH_NAME, null, null); - assertThat(result, is(notNullValue())); - assertThat(result.getName(), is(GRAPH_NAME)); - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void createGraphReplicationFaktor() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final String edgeCollection = COLLECTION_NAME + "edge"; - final String fromCollection = COLLECTION_NAME + "from"; - final String toCollection = COLLECTION_NAME + "to"; - final Collection edgeDefinitions = Arrays - .asList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); - final GraphEntity result = db.createGraph(GRAPH_NAME, edgeDefinitions, - new GraphCreateOptions().replicationFactor(2)); - assertThat(result, is(notNullValue())); - for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { - final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); - assertThat(properties.getReplicationFactor(), is(2)); - } - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void createGraphNumberOfShards() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final String edgeCollection = COLLECTION_NAME + "edge"; - final String fromCollection = COLLECTION_NAME + "from"; - final String toCollection = COLLECTION_NAME + "to"; - final Collection edgeDefinitions = Arrays - .asList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); - final GraphEntity result = db.createGraph(GRAPH_NAME, edgeDefinitions, - new GraphCreateOptions().numberOfShards(2)); - assertThat(result, is(notNullValue())); - for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { - final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); - assertThat(properties.getNumberOfShards(), is(2)); - } - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void getGraphs() { - try { - db.createGraph(GRAPH_NAME, null, null); - final Collection graphs = db.getGraphs(); - assertThat(graphs, is(notNullValue())); - assertThat(graphs.size(), is(1)); - assertThat(graphs.iterator().next().getName(), is(GRAPH_NAME)); - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void transactionString() { - final TransactionOptions options = new TransactionOptions().params("test"); - final String result = db.transaction("function (params) {return params;}", String.class, options); - assertThat(result, is("test")); - } - - @Test - public void transactionNumber() { - final TransactionOptions options = new TransactionOptions().params(5); - final Integer result = db.transaction("function (params) {return params;}", Integer.class, options); - assertThat(result, is(5)); - } - - @Test - public void transactionVPack() throws VPackException { - final TransactionOptions options = new TransactionOptions().params(new VPackBuilder().add("test").slice()); - final VPackSlice result = db.transaction("function (params) {return params;}", VPackSlice.class, options); - assertThat(result.isString(), is(true)); - assertThat(result.getAsString(), is("test")); - } - - @Test - public void transactionVPackObject() throws VPackException { - final VPackSlice params = new VPackBuilder().add(ValueType.OBJECT).add("foo", "hello").add("bar", "world") - .close().slice(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", - String.class, options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionVPackArray() throws VPackException { - final VPackSlice params = new VPackBuilder().add(ValueType.ARRAY).add("hello").add("world").close().slice(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionMap() { - final Map params = new MapBuilder().put("foo", "hello").put("bar", "world").get(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", - String.class, options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionArray() { - final String[] params = new String[] { "hello", "world" }; - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionCollection() { - final Collection params = new ArrayList(); - params.add("hello"); - params.add("world"); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionInsertJson() { - try { - db.createCollection(COLLECTION_NAME); - final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"0\"}") - .writeCollections(COLLECTION_NAME); - //@formatter:off - db.transaction("function (params) { " - + "var db = require('internal').db;" - + "db." + COLLECTION_NAME + ".save(JSON.parse(params));" - + "}", Void.class, options); - //@formatter:on - assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(1L)); - assertThat(db.collection(COLLECTION_NAME).getDocument("0", String.class), is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void transactionExclusiveWrite() { - if (!requireVersion(3, 4)) { - return; - } - try { - db.createCollection(COLLECTION_NAME); - final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"0\"}") - .exclusiveCollections(COLLECTION_NAME); - //@formatter:off - db.transaction("function (params) { " - + "var db = require('internal').db;" - + "db." + COLLECTION_NAME + ".save(JSON.parse(params));" - + "}", Void.class, options); - //@formatter:on - assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(1L)); - assertThat(db.collection(COLLECTION_NAME).getDocument("0", String.class), is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void transactionEmpty() { - db.transaction("function () {}", null, null); - } - - @Test - public void transactionallowImplicit() { - try { - db.createCollection("someCollection", null); - db.createCollection("someOtherCollection", null); - final String action = "function (params) {" + "var db = require('internal').db;" - + "return {'a':db.someCollection.all().toArray()[0], 'b':db.someOtherCollection.all().toArray()[0]};" - + "}"; - final TransactionOptions options = new TransactionOptions().readCollections("someCollection"); - db.transaction(action, VPackSlice.class, options); - try { - options.allowImplicit(false); - db.transaction(action, VPackSlice.class, options); - fail(); - } catch (final ArangoDBException e) { - } - } finally { - db.collection("someCollection").drop(); - db.collection("someOtherCollection").drop(); - } - } - - protected static class TransactionTestEntity { - private String value; - - public TransactionTestEntity() { - super(); - } - } - - @Test - public void transactionPojoReturn() { - final String action = "function() { return {'value':'hello world'}; }"; - final TransactionTestEntity res = db.transaction(action, TransactionTestEntity.class, new TransactionOptions()); - assertThat(res, is(notNullValue())); - assertThat(res.value, is("hello world")); - } - - @Test - public void getInfo() { - final DatabaseEntity info = db.getInfo(); - assertThat(info, is(notNullValue())); - assertThat(info.getId(), is(notNullValue())); - assertThat(info.getName(), is(TEST_DB)); - assertThat(info.getPath(), is(notNullValue())); - assertThat(info.getIsSystem(), is(false)); - } - - @Test - public void executeTraversal() { - try { - db.createCollection("person", null); - db.createCollection("knows", new CollectionCreateOptions().type(CollectionType.EDGES)); - for (final String e : new String[] { "Alice", "Bob", "Charlie", "Dave", "Eve" }) { - final BaseDocument doc = new BaseDocument(); - doc.setKey(e); - db.collection("person").insertDocument(doc, null); - } - for (final String[] e : new String[][] { new String[] { "Alice", "Bob" }, new String[] { "Bob", "Charlie" }, - new String[] { "Bob", "Dave" }, new String[] { "Eve", "Alice" }, new String[] { "Eve", "Bob" } }) { - final BaseEdgeDocument edge = new BaseEdgeDocument(); - edge.setKey(e[0] + "_knows_" + e[1]); - edge.setFrom("person/" + e[0]); - edge.setTo("person/" + e[1]); - db.collection("knows").insertDocument(edge, null); - } - final TraversalOptions options = new TraversalOptions().edgeCollection("knows").startVertex("person/Alice") - .direction(Direction.outbound); - final TraversalEntity traversal = db.executeTraversal(BaseDocument.class, - BaseEdgeDocument.class, options); - - assertThat(traversal, is(notNullValue())); - - final Collection vertices = traversal.getVertices(); - assertThat(vertices, is(notNullValue())); - assertThat(vertices.size(), is(4)); - - final Iterator verticesIterator = vertices.iterator(); - final Collection v = Arrays.asList(new String[] { "Alice", "Bob", "Charlie", "Dave" }); - for (; verticesIterator.hasNext();) { - assertThat(v.contains(verticesIterator.next().getKey()), is(true)); - } - - final Collection> paths = traversal.getPaths(); - assertThat(paths, is(notNullValue())); - assertThat(paths.size(), is(4)); - - assertThat(paths.iterator().hasNext(), is(true)); - final PathEntity first = paths.iterator().next(); - assertThat(first, is(notNullValue())); - assertThat(first.getEdges().size(), is(0)); - assertThat(first.getVertices().size(), is(1)); - assertThat(first.getVertices().iterator().next().getKey(), is("Alice")); - } finally { - db.collection("person").drop(); - db.collection("knows").drop(); - } - } - - @Test - public void getDocument() { - try { - db.createCollection(COLLECTION_NAME); - final BaseDocument value = new BaseDocument(); - value.setKey("123"); - db.collection(COLLECTION_NAME).insertDocument(value); - final BaseDocument document = db.getDocument(COLLECTION_NAME + "/123", BaseDocument.class); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is("123")); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void shouldIncludeExceptionMessage() { - if (!requireVersion(3, 2)) { - final String exceptionMessage = "My error context"; - final String action = "function (params) {" + "throw '" + exceptionMessage + "';" + "}"; - try { - db.transaction(action, VPackSlice.class, null); - fail(); - } catch (final ArangoDBException e) { - assertTrue(e.getException().contains(exceptionMessage)); - } - } - } - - @Test(expected = ArangoDBException.class) - public void getDocumentWrongId() { - db.getDocument("123", BaseDocument.class); - } - - @Test - public void reloadRouting() { - db.reloadRouting(); - } + + Logger LOG = LoggerFactory.getLogger(ArangoDatabaseTest.class); + + private static final String COLLECTION_NAME = "db_test"; + private static final String GRAPH_NAME = "graph_test"; + + public ArangoDatabaseTest(final Builder builder) { + super(builder); + } + + @Before + public void setUp() { + try { + ArangoCollection c = db.collection(COLLECTION_NAME); + c.drop(); + } catch (final ArangoDBException e) { + } + + try { + ArangoCollection c = db.collection(COLLECTION_NAME + "1"); + c.drop(); + } catch (final ArangoDBException e) { + } + + try { + ArangoCollection c = db.collection(COLLECTION_NAME + "2"); + c.drop(); + } catch (final ArangoDBException e) { + } + + try { + ArangoCollection c = db.collection(COLLECTION_NAME + "edge"); + c.drop(); + } catch (final ArangoDBException e) { + } + + try { + ArangoCollection c = db.collection(COLLECTION_NAME + "from"); + c.drop(); + } catch (final ArangoDBException e) { + } + + try { + ArangoCollection c = db.collection(COLLECTION_NAME + "to"); + c.drop(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void create() { + try { + final Boolean result = arangoDB.db(BaseTest.TEST_DB + "_1").create(); + assertThat(result, is(true)); + } finally { + try { + arangoDB.db(BaseTest.TEST_DB + "_1").drop(); + } catch (final ArangoDBException e) { + } + } + } + + @Test + public void getVersion() { + final ArangoDBVersion version = db.getVersion(); + assertThat(version, is(notNullValue())); + assertThat(version.getServer(), is(notNullValue())); + assertThat(version.getVersion(), is(notNullValue())); + } + + @Test + public void getEngine() { + final ArangoDBEngine engine = db.getEngine(); + assertThat(engine, is(notNullValue())); + assertThat(engine.getName(), is(notNullValue())); + } + + @Test + public void exists() { + assertThat(db.exists(), is(true)); + assertThat(arangoDB.db("no").exists(), is(false)); + } + + @Test + public void getAccessibleDatabases() { + final Collection dbs = db.getAccessibleDatabases(); + assertThat(dbs, is(notNullValue())); + assertThat(dbs.size(), greaterThan(0)); + assertThat(dbs, hasItem("_system")); + } + + @Test + public void createCollection() { + try { + final CollectionEntity result = db.createCollection(COLLECTION_NAME, null); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithReplicationFactor() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + return; + } + try { + final CollectionEntity result = db + .createCollection(COLLECTION_NAME, new CollectionCreateOptions().replicationFactor(2)); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(2)); + assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(nullValue())); + } catch (final ArangoDBException e) { + e.printStackTrace(); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + + } + + @Test + public void createCollectionWithMinReplicationFactor() { + + // if we do not have version at least 3.5+ => exit + if (!requireVersion(3, 5)) { + LOG.info("Skip Test 'createCollectionWithMinReplicationFactor' because feature not implemented yet."); + return; + } + + // if we do not have a cluster => exit + if (arangoDB.getRole() == ServerRole.SINGLE) { + return; + } + + try { + final CollectionEntity result = db.createCollection(COLLECTION_NAME, + new CollectionCreateOptions().replicationFactor(2).minReplicationFactor(2)); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(2)); + assertThat(db.collection(COLLECTION_NAME).getProperties().getMinReplicationFactor(), is(2)); + assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(nullValue())); + db.collection(COLLECTION_NAME).drop(); + } catch (final ArangoDBException e) { + e.printStackTrace(); + } + + } + + @Test + public void createSatelliteCollection() { + if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { + LOG.info("Skip Test 'createSatelliteCollection' on COMMUNITY VERSION"); + return; + } + + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createSatelliteCollection' on SINGLE SERVER"); + return; + } + + try { + + final CollectionEntity result = db + .createCollection(COLLECTION_NAME, new CollectionCreateOptions().satellite(true)); + + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(nullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getSatellite(), is(true)); + + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithNumberOfShards() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + return; + } + + try { + + final CollectionEntity result = db + .createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(2)); + + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(2)); + + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithShardingStrategys() { + if (!requireVersion(3, 4)) { + LOG.info("Skip Test 'createCollectionWithShardingStrategys' because feature not implemented yet."); + return; + } + + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createCollectionWithShardingStrategys' on SINGLE SERVER"); + return; + } + + try { + final CollectionEntity result = db.createCollection(COLLECTION_NAME, new CollectionCreateOptions() + .shardingStrategy(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())); + + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getShardingStrategy(), + is(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())); + + } catch (ArangoDBException e) { + System.out.println(e); + assertTrue(false); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithSmartJoinAttribute() { + if (!requireVersion(3, 5)) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' because feature not implemented yet."); + return; + } + + if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' on COMMUNITY SERVER"); + return; + } + + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttribute' on SINGLE SERVER"); + return; + } + + try { + final CollectionEntity result = db.createCollection(COLLECTION_NAME, + new CollectionCreateOptions().smartJoinAttribute("test123").shardKeys("_key:")); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + assertThat(db.collection(COLLECTION_NAME).getProperties().getSmartJoinAttribute(), is("test123")); + + } catch (Exception e) { + System.out.println(e); + assertTrue(false); + } finally { + try { + db.collection(COLLECTION_NAME).drop(); + } catch (Exception e) { + System.out.println(e); + } + } + } + + @Test + public void createCollectionWithSmartJoinAttributeWrong() { + if (!requireVersion(3, 5)) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' because feature not implemented yet."); + return; + } + + if (arangoDB.getVersion().getLicense() == License.COMMUNITY) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' on COMMUNITY SERVER"); + return; + } + + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createCollectionWithSmartJoinAttributeWrong' on SINGLE SERVER"); + return; + } + + try { + db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().smartJoinAttribute("test123")); + } catch (ArangoDBException e) { + assertThat(e.getErrorNum(), is(4006)); + assertThat(e.getResponseCode(), is(500)); + } finally { + try { + db.collection(COLLECTION_NAME).drop(); + assertTrue(false); + } catch (Exception e) { + System.out.println(e); + } + } + } + + @Test + public void createCollectionWithNumberOfShardsAndShardKey() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createCollectionWithNumberOfShardsAndShardKey' on SINGLE SERVER"); + return; + } + + try { + final CollectionEntity result = db + .createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(2).shardKeys("a")); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); + assertThat(properties.getNumberOfShards(), is(2)); + assertThat(properties.getShardKeys().size(), is(1)); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithNumberOfShardsAndShardKeys() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + LOG.info("Skip Test 'createCollectionWithNumberOfShardsAndShardKeys' on SINGLE SERVER"); + return; + } + + try { + final CollectionEntity result = db.createCollection(COLLECTION_NAME, + new CollectionCreateOptions().numberOfShards(2).shardKeys("a", "b")); + assertThat(result, is(notNullValue())); + assertThat(result.getId(), is(notNullValue())); + final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); + assertThat(properties.getNumberOfShards(), is(2)); + assertThat(properties.getShardKeys().size(), is(2)); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void createCollectionWithDistributeShardsLike() { + + if (arangoDB.getVersion().getLicense() == License.ENTERPRISE && arangoDB.getRole() != ServerRole.SINGLE) { + + final Integer numberOfShards = 3; + + db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(numberOfShards)); + db.createCollection(COLLECTION_NAME + "2", + new CollectionCreateOptions().distributeShardsLike(COLLECTION_NAME)); + + assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(numberOfShards)); + assertThat(db.collection(COLLECTION_NAME + "2").getProperties().getNumberOfShards(), is(numberOfShards)); + } + + } + + @Test + public void deleteCollection() { + db.createCollection(COLLECTION_NAME, null); + db.collection(COLLECTION_NAME).drop(); + try { + db.collection(COLLECTION_NAME).getInfo(); + fail(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void deleteSystemCollection() { + + if (arangoDB.getRole() != ServerRole.SINGLE) { + return; + } + + final String name = "_system_test"; + db.createCollection(name, new CollectionCreateOptions().isSystem(true)); + db.collection(name).drop(true); + try { + db.collection(name).getInfo(); + fail(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void deleteSystemCollectionFail() { + if (arangoDB.getRole() != ServerRole.SINGLE) { + return; + } + final String name = "_system_test"; + db.createCollection(name, new CollectionCreateOptions().isSystem(true)); + try { + db.collection(name).drop(); + fail(); + } catch (final ArangoDBException e) { + } + db.collection(name).drop(true); + try { + db.collection(name).getInfo(); + fail(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void getIndex() { + try { + db.createCollection(COLLECTION_NAME, null); + final Collection fields = new ArrayList(); + fields.add("a"); + final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); + final IndexEntity readResult = db.getIndex(createResult.getId()); + assertThat(readResult.getId(), is(createResult.getId())); + assertThat(readResult.getType(), is(createResult.getType())); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void deleteIndex() { + try { + db.createCollection(COLLECTION_NAME, null); + final Collection fields = new ArrayList(); + fields.add("a"); + final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); + final String id = db.deleteIndex(createResult.getId()); + assertThat(id, is(createResult.getId())); + try { + db.getIndex(id); + fail(); + } catch (final ArangoDBException e) { + } + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void getCollections() { + try { + final Collection systemCollections = db.getCollections(null); + db.createCollection(COLLECTION_NAME + "1", null); + db.createCollection(COLLECTION_NAME + "2", null); + final Collection collections = db.getCollections(null); + assertThat(collections.size(), is(2 + systemCollections.size())); + assertThat(collections, is(notNullValue())); + } finally { + db.collection(COLLECTION_NAME + "1").drop(); + db.collection(COLLECTION_NAME + "2").drop(); + } + } + + @Test + public void getCollectionsExcludeSystem() { + try { + final CollectionsReadOptions options = new CollectionsReadOptions().excludeSystem(true); + final Collection nonSystemCollections = db.getCollections(options); + + assertThat(nonSystemCollections.size(), is(0)); + db.createCollection(COLLECTION_NAME + "1", null); + db.createCollection(COLLECTION_NAME + "2", null); + final Collection newCollections = db.getCollections(options); + assertThat(newCollections.size(), is(2)); + assertThat(newCollections, is(notNullValue())); + } catch (final ArangoDBException e) { + System.out.println(e.getErrorMessage()); + } finally { + try { + db.collection(COLLECTION_NAME + "1").drop(); + db.collection(COLLECTION_NAME + "2").drop(); + } catch (final ArangoDBException e) { + } + } + } + + @Test + public void grantAccess() { + try { + arangoDB.createUser("user1", "1234", null); + db.grantAccess("user1"); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test + public void grantAccessRW() { + try { + arangoDB.createUser("user1", "1234", null); + db.grantAccess("user1", Permissions.RW); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test + public void grantAccessRO() { + try { + arangoDB.createUser("user1", "1234", null); + db.grantAccess("user1", Permissions.RO); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test + public void grantAccessNONE() { + try { + arangoDB.createUser("user1", "1234", null); + db.grantAccess("user1", Permissions.NONE); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test(expected = ArangoDBException.class) + public void grantAccessUserNotFound() { + db.grantAccess("user1", Permissions.RW); + } + + @Test + public void revokeAccess() { + try { + arangoDB.createUser("user1", "1234", null); + db.revokeAccess("user1"); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test(expected = ArangoDBException.class) + public void revokeAccessUserNotFound() { + db.revokeAccess("user1"); + } + + @Test + public void resetAccess() { + try { + arangoDB.createUser("user1", "1234", null); + db.resetAccess("user1"); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test(expected = ArangoDBException.class) + public void resetAccessUserNotFound() { + db.resetAccess("user1"); + } + + @Test + public void grantDefaultCollectionAccess() { + try { + arangoDB.createUser("user1", "1234"); + db.grantDefaultCollectionAccess("user1", Permissions.RW); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @Test + public void getPermissions() { + assertThat(Permissions.RW, is(db.getPermissions("root"))); + } + + @Test + public void query() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); + assertThat(cursor, is(notNullValue())); + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 10)); + } + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryForEach() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); + assertThat(cursor, is(notNullValue())); + final AtomicInteger i = new AtomicInteger(0); + for (; cursor.hasNext(); cursor.next()) { + i.incrementAndGet(); + } + assertThat(i.get(), is(10)); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryIterate() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); + assertThat(cursor, is(notNullValue())); + final AtomicInteger i = new AtomicInteger(0); + for (; cursor.hasNext(); cursor.next()) { + i.incrementAndGet(); + } + assertThat(i.get(), is(10)); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryWithCount() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in db_test Limit 6 return i._id", null, new AqlQueryOptions().count(true), + String.class); + assertThat(cursor, is(notNullValue())); + for (int i = 0; i < 6; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 6)); + } + assertThat(cursor.getCount(), is(6)); + + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryWithLimitAndFullCount() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in db_test Limit 5 return i._id", null, new AqlQueryOptions().fullCount(true), + String.class); + assertThat(cursor, is(notNullValue())); + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 5)); + } + assertThat(cursor.getStats(), is(notNullValue())); + assertThat(cursor.getStats().getFullCount(), is(10L)); + + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryWithBatchSize() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in db_test return i._id", null, new AqlQueryOptions().batchSize(5).count(true), + String.class); + + assertThat(cursor, is(notNullValue())); + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 10)); + } + } catch (final ArangoDBException e) { + System.out.println(e.getErrorMessage()); + System.out.println(e.getErrorNum()); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryIterateWithBatchSize() { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in db_test return i._id", null, new AqlQueryOptions().batchSize(5).count(true), + String.class); + + assertThat(cursor, is(notNullValue())); + final AtomicInteger i = new AtomicInteger(0); + for (; cursor.hasNext(); cursor.next()) { + i.incrementAndGet(); + } + assertThat(i.get(), is(10)); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + /** + * ignored. takes to long + */ + @Test + @Ignore + public void queryWithTTL() throws InterruptedException { + // set TTL to 1 seconds and get the second batch after 2 seconds! + final int ttl = 1; + final int wait = 2; + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in db_test return i._id", null, new AqlQueryOptions().batchSize(5).ttl(ttl), + String.class); + + assertThat(cursor, is(notNullValue())); + + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 10)); + if (i == 1) { + Thread.sleep(wait * 1000); + } + } + fail("this should fail"); + } catch (final ArangoDBException ex) { + assertThat(ex.getMessage(), is("Response: 404, Error: 1600 - cursor not found")); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void changeQueryCache() { + try { + QueryCachePropertiesEntity properties = db.getQueryCacheProperties(); + assertThat(properties, is(notNullValue())); + assertThat(properties.getMode(), is(CacheMode.off)); + assertThat(properties.getMaxResults(), greaterThan(0L)); + + properties.setMode(CacheMode.on); + properties = db.setQueryCacheProperties(properties); + assertThat(properties, is(notNullValue())); + assertThat(properties.getMode(), is(CacheMode.on)); + + properties = db.getQueryCacheProperties(); + assertThat(properties.getMode(), is(CacheMode.on)); + } finally { + final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); + properties.setMode(CacheMode.off); + db.setQueryCacheProperties(properties); + } + } + + @Test + public void queryWithCache() throws InterruptedException { + if (arangoDB.getRole() != ServerRole.SINGLE) { + return; + } + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); + properties.setMode(CacheMode.on); + db.setQueryCacheProperties(properties); + + final ArangoCursor cursor = db + .query("FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", null, + new AqlQueryOptions().cache(true), String.class); + + assertThat(cursor, is(notNullValue())); + assertThat(cursor.isCached(), is(false)); + + final ArangoCursor cachedCursor = db + .query("FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", null, + new AqlQueryOptions().cache(true), String.class); + + assertThat(cachedCursor, is(notNullValue())); + assertThat(cachedCursor.isCached(), is(true)); + + } finally { + db.collection(COLLECTION_NAME).drop(); + final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); + properties.setMode(CacheMode.off); + db.setQueryCacheProperties(properties); + } + } + + @Test + public void queryWithMemoryLimit() { + try { + db.query("RETURN 'bla'", null, new AqlQueryOptions().memoryLimit(1L), String.class); + fail(); + } catch (final ArangoDBException e) { + assertThat(e.getErrorNum(), is(32)); + } + } + + @Test(expected = ArangoDBException.class) + public void queryWithFailOnWarningTrue() { + db.query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(true), String.class); + } + + @Test + public void queryWithFailOnWarningFalse() { + final ArangoCursor cursor = db + .query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(false), String.class); + assertThat(cursor.next(), is("null")); + } + + @Test + public void queryWithMaxWarningCount() { + final ArangoCursor cursorWithWarnings = db + .query("RETURN 1 / 0", null, new AqlQueryOptions(), String.class); + assertThat(cursorWithWarnings.getWarnings().size(), is(1)); + final ArangoCursor cursorWithLimitedWarnings = db + .query("RETURN 1 / 0", null, new AqlQueryOptions().maxWarningCount(0L), String.class); + final Collection warnings = cursorWithLimitedWarnings.getWarnings(); + if (warnings != null) { + assertThat(warnings.size(), is(0)); + } + } + + @Test + public void queryCursor() { + try { + db.createCollection(COLLECTION_NAME, null); + final int numbDocs = 10; + for (int i = 0; i < numbDocs; i++) { + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + } + + final int batchSize = 5; + final ArangoCursor cursor = db.query("for i in db_test return i._id", null, + new AqlQueryOptions().batchSize(batchSize).count(true), String.class); + assertThat(cursor, is(notNullValue())); + assertThat(cursor.getCount(), is(numbDocs)); + + final ArangoCursor cursor2 = db.cursor(cursor.getId(), String.class); + assertThat(cursor2, is(notNullValue())); + assertThat(cursor2.getCount(), is(numbDocs)); + assertThat(cursor2.hasNext(), is(true)); + + for (int i = 0; i < batchSize; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != batchSize)); + } + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void changeQueryTrackingProperties() { + try { + QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); + assertThat(properties, is(notNullValue())); + assertThat(properties.getEnabled(), is(true)); + assertThat(properties.getTrackSlowQueries(), is(true)); + assertThat(properties.getMaxQueryStringLength(), greaterThan(0L)); + assertThat(properties.getMaxSlowQueries(), greaterThan(0L)); + assertThat(properties.getSlowQueryThreshold(), greaterThan(0L)); + properties.setEnabled(false); + properties = db.setQueryTrackingProperties(properties); + assertThat(properties, is(notNullValue())); + assertThat(properties.getEnabled(), is(false)); + properties = db.getQueryTrackingProperties(); + assertThat(properties.getEnabled(), is(false)); + } finally { + final QueryTrackingPropertiesEntity properties = new QueryTrackingPropertiesEntity(); + properties.setEnabled(true); + db.setQueryTrackingProperties(properties); + } + } + + @Test + public void queryWithBindVars() throws InterruptedException { + try { + db.createCollection(COLLECTION_NAME, null); + for (int i = 0; i < 10; i++) { + final BaseDocument baseDocument = new BaseDocument(); + baseDocument.addAttribute("age", 20 + i); + db.collection(COLLECTION_NAME).insertDocument(baseDocument, null); + } + final Map bindVars = new HashMap(); + bindVars.put("@coll", COLLECTION_NAME); + bindVars.put("age", 25); + + final ArangoCursor cursor = db + .query("FOR t IN @@coll FILTER t.age >= @age SORT t.age RETURN t._id", bindVars, null, + String.class); + + assertThat(cursor, is(notNullValue())); + + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat(cursor.hasNext(), is(i != 5)); + } + + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryWithWarning() { + final ArangoCursor cursor = arangoDB.db().query("return 1/0", null, null, String.class); + + assertThat(cursor, is(notNullValue())); + assertThat(cursor.getWarnings(), is(notNullValue())); + } + + @Test + public void queryStream() { + if (requireVersion(3, 4)) { + final ArangoCursor cursor = db + .query("FOR i IN 1..2 RETURN i", null, new AqlQueryOptions().stream(true).count(true), + VPackSlice.class); + assertThat(cursor, is(notNullValue())); + assertThat(cursor.getCount(), is(nullValue())); + } + } + + @Test + public void queryClose() throws IOException { + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", null, new AqlQueryOptions().batchSize(1), String.class); + cursor.close(); + int count = 0; + try { + for (; cursor.hasNext(); cursor.next(), count++) { + } + fail(); + } catch (final ArangoDBException e) { + assertThat(count, is(1)); + } + + } + + @Test + public void queryNoResults() throws IOException { + try { + db.createCollection(COLLECTION_NAME); + final ArangoCursor cursor = db + .query("FOR i IN @@col RETURN i", new MapBuilder().put("@col", COLLECTION_NAME).get(), null, + BaseDocument.class); + cursor.close(); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryWithNullBindParam() throws IOException { + try { + db.createCollection(COLLECTION_NAME); + final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", + new MapBuilder().put("@col", COLLECTION_NAME).put("test", null).get(), null, BaseDocument.class); + cursor.close(); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void queryAllowDirtyRead() throws IOException { + try { + db.createCollection(COLLECTION_NAME); + final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", + new MapBuilder().put("@col", COLLECTION_NAME).put("test", null).get(), + new AqlQueryOptions().allowDirtyRead(true), BaseDocument.class); + cursor.close(); + } catch (ArangoDBException e) { + System.out.println(e); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void explainQuery() { + final AqlExecutionExplainEntity explain = arangoDB.db().explainQuery("for i in 1..1 return i", null, null); + assertThat(explain, is(notNullValue())); + assertThat(explain.getPlan(), is(notNullValue())); + assertThat(explain.getPlans(), is(nullValue())); + final ExecutionPlan plan = explain.getPlan(); + assertThat(plan.getCollections().size(), is(0)); + assertThat(plan.getEstimatedCost(), greaterThan(0)); + assertThat(plan.getEstimatedNrItems(), greaterThan(0)); + assertThat(plan.getVariables().size(), is(2)); + assertThat(plan.getNodes().size(), is(greaterThan(0))); + } + + @Test + public void parseQuery() { + final AqlParseEntity parse = arangoDB.db().parseQuery("for i in 1..1 return i"); + assertThat(parse, is(notNullValue())); + assertThat(parse.getBindVars(), is(empty())); + assertThat(parse.getCollections().size(), is(0)); + assertThat(parse.getAst().size(), is(1)); + } + + @Test + @Ignore + public void getCurrentlyRunningQueries() throws InterruptedException, ExecutionException { + final Thread t = new Thread() { + @Override + public void run() { + super.run(); + db.query("return sleep(0.2)", null, null, Void.class); + } + }; + t.start(); + Thread.sleep(100); + try { + final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); + assertThat(currentlyRunningQueries, is(notNullValue())); + assertThat(currentlyRunningQueries.size(), is(1)); + final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + assertThat(queryEntity.getQuery(), is("return sleep(0.2)")); + assertThat(queryEntity.getState(), is(QueryExecutionState.EXECUTING)); + } finally { + t.join(); + } + } + + @Test + @Ignore + public void getAndClearSlowQueries() throws InterruptedException, ExecutionException { + final QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); + final Long slowQueryThreshold = properties.getSlowQueryThreshold(); + try { + properties.setSlowQueryThreshold(1L); + db.setQueryTrackingProperties(properties); + + db.query("return sleep(1.1)", null, null, Void.class); + final Collection slowQueries = db.getSlowQueries(); + assertThat(slowQueries, is(notNullValue())); + assertThat(slowQueries.size(), is(1)); + final QueryEntity queryEntity = slowQueries.iterator().next(); + assertThat(queryEntity.getQuery(), is("return sleep(1.1)")); + + db.clearSlowQueries(); + assertThat(db.getSlowQueries().size(), is(0)); + } finally { + properties.setSlowQueryThreshold(slowQueryThreshold); + db.setQueryTrackingProperties(properties); + } + } + + @Test + @Ignore + public void killQuery() throws InterruptedException, ExecutionException { + final Thread t = new Thread() { + @Override + public void run() { + super.run(); + try { + db.query("return sleep(0.2)", null, null, Void.class); + fail(); + } catch (final ArangoDBException e) { + } + } + }; + t.start(); + Thread.sleep(100); + final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); + assertThat(currentlyRunningQueries, is(notNullValue())); + assertThat(currentlyRunningQueries.size(), is(1)); + + final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + db.killQuery(queryEntity.getId()); + } + + @Test + public void createGetDeleteAqlFunction() { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null); + assertThat(aqlFunctionsInitial, is(empty())); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit", + "function (celsius) { return celsius * 1.8 + 32; }", null); + + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions.size(), is(greaterThan(aqlFunctionsInitial.size()))); + } finally { + final Integer deleteCount = db.deleteAqlFunction("myfunctions::temperature::celsiustofahrenheit", null); + // compatibility with ArangoDB < 3.4 + if (requireVersion(3, 4)) { + assertThat(deleteCount, is(1)); + } else { + assertThat(deleteCount, is(nullValue())); + } + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); + } + } + + @Test + public void createGetDeleteAqlFunctionWithNamespace() { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null); + assertThat(aqlFunctionsInitial, is(empty())); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit1", + "function (celsius) { return celsius * 1.8 + 32; }", null); + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit2", + "function (celsius) { return celsius * 1.8 + 32; }", null); + + } finally { + final Integer deleteCount = db + .deleteAqlFunction("myfunctions::temperature", new AqlFunctionDeleteOptions().group(true)); + // compatibility with ArangoDB < 3.4 + if (requireVersion(3, 4)) { + assertThat(deleteCount, is(2)); + } else { + assertThat(deleteCount, is(nullValue())); + } + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); + } + } + + @Test + public void createGraph() { + try { + final GraphEntity result = db.createGraph(GRAPH_NAME, null, null); + assertThat(result, is(notNullValue())); + assertThat(result.getName(), is(GRAPH_NAME)); + } finally { + db.graph(GRAPH_NAME).drop(); + } + } + + @Test + public void createGraphReplicationFaktor() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + return; + } + try { + final String edgeCollection = COLLECTION_NAME + "edge"; + final String fromCollection = COLLECTION_NAME + "from"; + final String toCollection = COLLECTION_NAME + "to"; + final Collection edgeDefinitions = Arrays + .asList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db + .createGraph(GRAPH_NAME, edgeDefinitions, new GraphCreateOptions().replicationFactor(2)); + assertThat(result, is(notNullValue())); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); + assertThat(properties.getReplicationFactor(), is(2)); + } + } finally { + db.graph(GRAPH_NAME).drop(); + } + } + + @Test + public void createGraphNumberOfShards() { + if (arangoDB.getRole() == ServerRole.SINGLE) { + return; + } + try { + final String edgeCollection = COLLECTION_NAME + "edge"; + final String fromCollection = COLLECTION_NAME + "from"; + final String toCollection = COLLECTION_NAME + "to"; + final Collection edgeDefinitions = Arrays + .asList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db + .createGraph(GRAPH_NAME, edgeDefinitions, new GraphCreateOptions().numberOfShards(2)); + assertThat(result, is(notNullValue())); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); + assertThat(properties.getNumberOfShards(), is(2)); + } + } finally { + db.graph(GRAPH_NAME).drop(); + } + } + + @Test + public void getGraphs() { + try { + db.createGraph(GRAPH_NAME, null, null); + final Collection graphs = db.getGraphs(); + assertThat(graphs, is(notNullValue())); + assertThat(graphs.size(), is(1)); + assertThat(graphs.iterator().next().getName(), is(GRAPH_NAME)); + } finally { + db.graph(GRAPH_NAME).drop(); + } + } + + @Test + public void transactionString() { + final TransactionOptions options = new TransactionOptions().params("test"); + final String result = db.transaction("function (params) {return params;}", String.class, options); + assertThat(result, is("test")); + } + + @Test + public void transactionNumber() { + final TransactionOptions options = new TransactionOptions().params(5); + final Integer result = db.transaction("function (params) {return params;}", Integer.class, options); + assertThat(result, is(5)); + } + + @Test + public void transactionVPack() throws VPackException { + final TransactionOptions options = new TransactionOptions().params(new VPackBuilder().add("test").slice()); + final VPackSlice result = db.transaction("function (params) {return params;}", VPackSlice.class, options); + assertThat(result.isString(), is(true)); + assertThat(result.getAsString(), is("test")); + } + + @Test + public void transactionVPackObject() throws VPackException { + final VPackSlice params = new VPackBuilder().add(ValueType.OBJECT).add("foo", "hello").add("bar", "world") + .close().slice(); + final TransactionOptions options = new TransactionOptions().params(params); + final String result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", String.class, options); + assertThat(result, is("hello world")); + } + + @Test + public void transactionVPackArray() throws VPackException { + final VPackSlice params = new VPackBuilder().add(ValueType.ARRAY).add("hello").add("world").close().slice(); + final TransactionOptions options = new TransactionOptions().params(params); + final String result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, options); + assertThat(result, is("hello world")); + } + + @Test + public void transactionMap() { + final Map params = new MapBuilder().put("foo", "hello").put("bar", "world").get(); + final TransactionOptions options = new TransactionOptions().params(params); + final String result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", String.class, options); + assertThat(result, is("hello world")); + } + + @Test + public void transactionArray() { + final String[] params = new String[] { "hello", "world" }; + final TransactionOptions options = new TransactionOptions().params(params); + final String result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, options); + assertThat(result, is("hello world")); + } + + @Test + public void transactionCollection() { + final Collection params = new ArrayList(); + params.add("hello"); + params.add("world"); + final TransactionOptions options = new TransactionOptions().params(params); + final String result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, options); + assertThat(result, is("hello world")); + } + + @Test + public void transactionInsertJson() { + try { + db.createCollection(COLLECTION_NAME); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"0\"}") + .writeCollections(COLLECTION_NAME); + //@formatter:off + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + COLLECTION_NAME + ".save(JSON.parse(params));" + + "}", Void.class, options); + //@formatter:on + assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(1L)); + assertThat(db.collection(COLLECTION_NAME).getDocument("0", String.class), is(notNullValue())); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void transactionExclusiveWrite() { + if (!requireVersion(3, 4)) { + return; + } + try { + db.createCollection(COLLECTION_NAME); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"0\"}") + .exclusiveCollections(COLLECTION_NAME); + //@formatter:off + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + COLLECTION_NAME + ".save(JSON.parse(params));" + + "}", Void.class, options); + //@formatter:on + assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(1L)); + assertThat(db.collection(COLLECTION_NAME).getDocument("0", String.class), is(notNullValue())); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void transactionEmpty() { + db.transaction("function () {}", null, null); + } + + @Test + public void transactionallowImplicit() { + try { + db.createCollection("someCollection", null); + db.createCollection("someOtherCollection", null); + final String action = "function (params) {" + "var db = require('internal').db;" + + "return {'a':db.someCollection.all().toArray()[0], 'b':db.someOtherCollection.all().toArray()[0]};" + + "}"; + final TransactionOptions options = new TransactionOptions().readCollections("someCollection"); + db.transaction(action, VPackSlice.class, options); + try { + options.allowImplicit(false); + db.transaction(action, VPackSlice.class, options); + fail(); + } catch (final ArangoDBException e) { + } + } finally { + db.collection("someCollection").drop(); + db.collection("someOtherCollection").drop(); + } + } + + protected static class TransactionTestEntity { + private String value; + + public TransactionTestEntity() { + super(); + } + } + + @Test + public void transactionPojoReturn() { + final String action = "function() { return {'value':'hello world'}; }"; + final TransactionTestEntity res = db.transaction(action, TransactionTestEntity.class, new TransactionOptions()); + assertThat(res, is(notNullValue())); + assertThat(res.value, is("hello world")); + } + + @Test + public void getInfo() { + final DatabaseEntity info = db.getInfo(); + assertThat(info, is(notNullValue())); + assertThat(info.getId(), is(notNullValue())); + assertThat(info.getName(), is(TEST_DB)); + assertThat(info.getPath(), is(notNullValue())); + assertThat(info.getIsSystem(), is(false)); + } + + @Test + public void executeTraversal() { + try { + db.createCollection("person", null); + db.createCollection("knows", new CollectionCreateOptions().type(CollectionType.EDGES)); + for (final String e : new String[] { "Alice", "Bob", "Charlie", "Dave", "Eve" }) { + final BaseDocument doc = new BaseDocument(); + doc.setKey(e); + db.collection("person").insertDocument(doc, null); + } + for (final String[] e : new String[][] { new String[] { "Alice", "Bob" }, new String[] { "Bob", "Charlie" }, + new String[] { "Bob", "Dave" }, new String[] { "Eve", "Alice" }, new String[] { "Eve", "Bob" } }) { + final BaseEdgeDocument edge = new BaseEdgeDocument(); + edge.setKey(e[0] + "_knows_" + e[1]); + edge.setFrom("person/" + e[0]); + edge.setTo("person/" + e[1]); + db.collection("knows").insertDocument(edge, null); + } + final TraversalOptions options = new TraversalOptions().edgeCollection("knows").startVertex("person/Alice") + .direction(Direction.outbound); + final TraversalEntity traversal = db + .executeTraversal(BaseDocument.class, BaseEdgeDocument.class, options); + + assertThat(traversal, is(notNullValue())); + + final Collection vertices = traversal.getVertices(); + assertThat(vertices, is(notNullValue())); + assertThat(vertices.size(), is(4)); + + final Iterator verticesIterator = vertices.iterator(); + final Collection v = Arrays.asList(new String[] { "Alice", "Bob", "Charlie", "Dave" }); + for (; verticesIterator.hasNext(); ) { + assertThat(v.contains(verticesIterator.next().getKey()), is(true)); + } + + final Collection> paths = traversal.getPaths(); + assertThat(paths, is(notNullValue())); + assertThat(paths.size(), is(4)); + + assertThat(paths.iterator().hasNext(), is(true)); + final PathEntity first = paths.iterator().next(); + assertThat(first, is(notNullValue())); + assertThat(first.getEdges().size(), is(0)); + assertThat(first.getVertices().size(), is(1)); + assertThat(first.getVertices().iterator().next().getKey(), is("Alice")); + } finally { + db.collection("person").drop(); + db.collection("knows").drop(); + } + } + + @Test + public void getDocument() { + try { + db.createCollection(COLLECTION_NAME); + final BaseDocument value = new BaseDocument(); + value.setKey("123"); + db.collection(COLLECTION_NAME).insertDocument(value); + final BaseDocument document = db.getDocument(COLLECTION_NAME + "/123", BaseDocument.class); + assertThat(document, is(notNullValue())); + assertThat(document.getKey(), is("123")); + } finally { + db.collection(COLLECTION_NAME).drop(); + } + } + + @Test + public void shouldIncludeExceptionMessage() { + if (!requireVersion(3, 2)) { + final String exceptionMessage = "My error context"; + final String action = "function (params) {" + "throw '" + exceptionMessage + "';" + "}"; + try { + db.transaction(action, VPackSlice.class, null); + fail(); + } catch (final ArangoDBException e) { + assertTrue(e.getException().contains(exceptionMessage)); + } + } + } + + @Test(expected = ArangoDBException.class) + public void getDocumentWrongId() { + db.getDocument("123", BaseDocument.class); + } + + @Test + public void reloadRouting() { + db.reloadRouting(); + } } diff --git a/src/test/java/com/arangodb/BaseTest.java b/src/test/java/com/arangodb/BaseTest.java index cee218aec..1aa769fda 100644 --- a/src/test/java/com/arangodb/BaseTest.java +++ b/src/test/java/com/arangodb/BaseTest.java @@ -23,21 +23,23 @@ import java.util.Arrays; import java.util.Collection; +import com.arangodb.entity.ArangoDBEngine; +import com.arangodb.entity.ServerRole; import org.junit.AfterClass; import org.junit.runners.Parameterized.Parameters; /** * @author Mark Vollmary - * + * @author Michele Rastelli */ public abstract class BaseTest { @Parameters public static Collection builders() { return Arrays.asList(// - new ArangoDB.Builder().useProtocol(Protocol.VST), // - new ArangoDB.Builder().useProtocol(Protocol.HTTP_JSON), // - new ArangoDB.Builder().useProtocol(Protocol.HTTP_VPACK) // + new ArangoDB.Builder().useProtocol(Protocol.VST), // + new ArangoDB.Builder().useProtocol(Protocol.HTTP_JSON), // + new ArangoDB.Builder().useProtocol(Protocol.HTTP_VPACK) // ); } @@ -51,9 +53,9 @@ public BaseTest(final ArangoDB.Builder builder) { if (arangoDB != null) { shutdown(); } - arangoDB = builder.build(); - db = arangoDB.db(TEST_DB); - + arangoDB = builder.build(); + db = arangoDB.db(TEST_DB); + // only create the database if not existing try { db.getVersion().getVersion(); @@ -75,4 +77,12 @@ protected boolean requireVersion(final int major, final int minor) { return Integer.valueOf(split[0]) >= major && Integer.valueOf(split[1]) >= minor; } + protected boolean requireStorageEngine(ArangoDBEngine.StorageEngineName name) { + return name.equals(arangoDB.getEngine().getName()); + } + + protected boolean requireSingleServer() { + return (arangoDB.getRole() == ServerRole.SINGLE); + } + } diff --git a/src/test/java/com/arangodb/ConcurrentStreamTransactionsTest.java b/src/test/java/com/arangodb/ConcurrentStreamTransactionsTest.java new file mode 100644 index 000000000..29cb3e671 --- /dev/null +++ b/src/test/java/com/arangodb/ConcurrentStreamTransactionsTest.java @@ -0,0 +1,128 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.ArangoDB.Builder; +import com.arangodb.entity.ArangoDBEngine; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.StreamTransactionEntity; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.model.StreamTransactionOptions; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.UUID; + +import static org.junit.Assume.assumeTrue; + +/** + * @author Michele Rastelli + */ +@RunWith(Parameterized.class) +public class ConcurrentStreamTransactionsTest extends BaseTest { + + private static final String COLLECTION_NAME = "db_concurrent_stream_transactions_test"; + + public ConcurrentStreamTransactionsTest(final Builder builder) { + super(builder); + try { + if (db.collection(COLLECTION_NAME).exists()) + db.collection(COLLECTION_NAME).drop(); + + db.createCollection(COLLECTION_NAME, null); + } catch (final ArangoDBException e) { + + } + } + + @After + public void teardown() { + try { + db.collection(COLLECTION_NAME).drop(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void conflictOnInsertDocumentWithNotYetCommittedTx() { + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())); + + try { + // insert conflicting document from within tx2 + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId())); + + throw new RuntimeException("This should never be thrown"); + } catch (ArangoDBException e) { + e.printStackTrace(); + } + + db.abortStreamTransaction(tx1.getId()); + db.abortStreamTransaction(tx2.getId()); + } + + @Test + public void conflictOnInsertDocumentWithAlreadyCommittedTx() { + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())); + + // commit tx1 + db.commitStreamTransaction(tx1.getId()); + + try { + // insert conflicting document from within tx2 + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId())); + + throw new RuntimeException("This should never be thrown"); + } catch (ArangoDBException e) { + e.printStackTrace(); + db.abortStreamTransaction(tx2.getId()); + } + } +} diff --git a/src/test/java/com/arangodb/StreamTransactionTest.java b/src/test/java/com/arangodb/StreamTransactionTest.java new file mode 100644 index 000000000..71f498ecd --- /dev/null +++ b/src/test/java/com/arangodb/StreamTransactionTest.java @@ -0,0 +1,708 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.ArangoDB.Builder; +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.*; +import static org.junit.Assert.assertThat; +import static org.junit.Assume.assumeTrue; + +/** + * @author Michele Rastelli + */ +@RunWith(Parameterized.class) +public class StreamTransactionTest extends BaseTest { + + private static final String COLLECTION_NAME = "db_stream_transaction_test"; + + public StreamTransactionTest(final Builder builder) { + super(builder); + try { + if (db.collection(COLLECTION_NAME).exists()) + db.collection(COLLECTION_NAME).drop(); + + db.createCollection(COLLECTION_NAME, null); + } catch (final ArangoDBException e) { + + } + } + + @After + public void teardown() { + try { + db.collection(COLLECTION_NAME).drop(); + } catch (final ArangoDBException e) { + } + } + + @Test + public void beginStreamTransaction() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction(null); + assertThat(tx.getId(), is(notNullValue())); + assertThat(tx.getStatus(), is(StreamTransactionEntity.StreamTransactionStatus.running)); + db.abortStreamTransaction(tx.getId()); + } + + @Test(expected = ArangoDBException.class) + public void beginStreamTransactionWithNonExistingCollectionsShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.beginStreamTransaction(new StreamTransactionOptions().writeCollections("notExistingCollection")); + } + + @Test + public void abortStreamTransaction() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null); + StreamTransactionEntity abortedTx = db.abortStreamTransaction(begunTx.getId()); + + assertThat(abortedTx.getId(), is(notNullValue())); + assertThat(abortedTx.getId(), is(begunTx.getId())); + assertThat(abortedTx.getStatus(), is(StreamTransactionEntity.StreamTransactionStatus.aborted)); + } + + @Test + public void abortStreamTransactionTwice() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null); + db.abortStreamTransaction(begunTx.getId()); + db.abortStreamTransaction(begunTx.getId()); + } + + @Test(expected = ArangoDBException.class) + public void abortStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.abortStreamTransaction("000000"); + } + + @Test(expected = ArangoDBException.class) + public void abortStreamTransactionWithInvalidTransactionIdShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.abortStreamTransaction("invalidTransactionId"); + } + + @Test(expected = ArangoDBException.class) + public void abortCommittedStreamTransactionShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.commitStreamTransaction(createdTx.getId()); + db.abortStreamTransaction(createdTx.getId()); + } + + @Test + public void getStreamTransaction() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + StreamTransactionEntity gotTx = db.getStreamTransaction(createdTx.getId()); + + assertThat(gotTx.getId(), is(notNullValue())); + assertThat(gotTx.getId(), is(createdTx.getId())); + assertThat(gotTx.getStatus(), is(StreamTransactionEntity.StreamTransactionStatus.running)); + + db.abortStreamTransaction(createdTx.getId()); + } + + @Test(expected = ArangoDBException.class) + public void getStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.getStreamTransaction("000000"); + } + + @Test(expected = ArangoDBException.class) + public void getStreamTransactionWithInvalidTransactionIdShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.getStreamTransaction("invalidTransactionId"); + } + + @Test + public void commitStreamTransaction() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + StreamTransactionEntity committedTx = db.commitStreamTransaction(createdTx.getId()); + + assertThat(committedTx.getId(), is(notNullValue())); + assertThat(committedTx.getId(), is(createdTx.getId())); + assertThat(committedTx.getStatus(), is(StreamTransactionEntity.StreamTransactionStatus.committed)); + } + + @Test + public void commitStreamTransactionTwice() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.commitStreamTransaction(createdTx.getId()); + db.commitStreamTransaction(createdTx.getId()); + } + + @Test(expected = ArangoDBException.class) + public void commitStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.commitStreamTransaction("000000"); + } + + @Test(expected = ArangoDBException.class) + public void commitStreamTransactionWithInvalidTransactionIdShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.commitStreamTransaction("invalidTransactionId"); + } + + @Test(expected = ArangoDBException.class) + public void commitAbortedStreamTransactionShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.abortStreamTransaction(createdTx.getId()); + db.commitStreamTransaction(createdTx.getId()); + } + + @Test + public void getDocument() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + // assert that the document is not found from within the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(externalDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())), is(nullValue())); + + db.abortStreamTransaction(tx.getId()); + } + + @Test(expected = ArangoDBException.class) + public void getDocumentWithNonExistingTransactionIdShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.collection(COLLECTION_NAME) + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("123456")); + } + + @Test(expected = ArangoDBException.class) + public void getDocumentWithInvalidTransactionIdShouldThrow() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.collection(COLLECTION_NAME) + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("abcde")); + } + + @Test + public void getDocuments() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert documents from outside the tx + DocumentCreateEntity externalDoc1 = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + DocumentCreateEntity externalDoc2 = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + // assert that the documents are not found from within the tx + MultiDocumentEntity documents = db.collection(COLLECTION_NAME) + .getDocuments(Arrays.asList(externalDoc1.getId(), externalDoc2.getId()), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())); + + assertThat(documents.getDocuments(), is(empty())); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + public void insertDocument() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // insert a document from within the tx + DocumentCreateEntity txDoc = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), new DocumentCreateOptions().streamTransactionId(tx.getId())); + + // assert that the document is not found from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(txDoc.getKey(), BaseDocument.class, null), + is(nullValue())); + + // assert that the document is found from within the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(txDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())), is(notNullValue())); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document is found after commit + assertThat(db.collection(COLLECTION_NAME).getDocument(txDoc.getKey(), BaseDocument.class, null), + is(notNullValue())); + } + + @Test + public void insertDocuments() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // insert documents from within the tx + MultiDocumentEntity> txDocs = db.collection(COLLECTION_NAME) + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().streamTransactionId(tx.getId())); + + List keys = txDocs.getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + // assert that the documents are not found from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments(), + is(empty())); + + // assert that the documents are found from within the tx + assertThat(db.collection(COLLECTION_NAME) + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments(), hasSize(keys.size())); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document is found after commit + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments(), + hasSize(keys.size())); + } + + @Test + public void replaceDocument() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(); + doc.addAttribute("test", "foo"); + + DocumentCreateEntity createdDoc = db.collection(COLLECTION_NAME).insertDocument(doc, null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // replace document from within the tx + doc.getProperties().clear(); + doc.addAttribute("test", "bar"); + db.collection(COLLECTION_NAME).replaceDocument(createdDoc.getKey(), doc, + new DocumentReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been replaced from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties().get("test"), is("foo")); + + // assert that the document has been replaced from within the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).getProperties().get("test"), is("bar")); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been replaced after commit + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties().get("test"), is("bar")); + } + + @Test + public void replaceDocuments() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + List createdDocs = db.collection(COLLECTION_NAME) + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true)).getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + List modifiedDocs = createdDocs.stream().peek(doc -> { + doc.getProperties().clear(); + doc.addAttribute("test", "bar"); + }).collect(Collectors.toList()); + + // replace document from within the tx + db.collection(COLLECTION_NAME) + .replaceDocuments(modifiedDocs, new DocumentReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the documents has not been replaced from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()), everyItem(is("foo"))); + + // assert that the document has been replaced from within the tx + assertThat(db.collection(COLLECTION_NAME) + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()), + everyItem(is("bar"))); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been replaced after commit + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()), everyItem(is("bar"))); + } + + @Test + public void updateDocument() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(); + doc.addAttribute("test", "foo"); + + DocumentCreateEntity createdDoc = db.collection(COLLECTION_NAME).insertDocument(doc, null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // update document from within the tx + doc.getProperties().clear(); + doc.addAttribute("test", "bar"); + db.collection(COLLECTION_NAME) + .updateDocument(createdDoc.getKey(), doc, new DocumentUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been updated from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties().get("test"), is("foo")); + + // assert that the document has been updated from within the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).getProperties().get("test"), is("bar")); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been updated after commit + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties().get("test"), is("bar")); + + } + + @Test + public void updateDocuments() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + List createdDocs = db.collection(COLLECTION_NAME) + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true)).getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + List modifiedDocs = createdDocs.stream().peek(doc -> { + doc.getProperties().clear(); + doc.addAttribute("test", "bar"); + }).collect(Collectors.toList()); + + // update documents from within the tx + db.collection(COLLECTION_NAME) + .updateDocuments(modifiedDocs, new DocumentUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the documents have not been updated from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()), everyItem(is("foo"))); + + // assert that the documents have been updated from within the tx + List values = db.collection(COLLECTION_NAME) + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()); + assertThat(values, everyItem(is("bar"))); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been updated after commit + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))).collect(Collectors.toList()), everyItem(is("bar"))); + } + + @Test + public void deleteDocument() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + DocumentCreateEntity createdDoc = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // delete document from within the tx + db.collection(COLLECTION_NAME) + .deleteDocument(createdDoc.getKey(), null, new DocumentDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been deleted from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null), + is(notNullValue())); + + // assert that the document has been deleted from within the tx + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())), is(nullValue())); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been deleted after commit + assertThat(db.collection(COLLECTION_NAME).getDocument(createdDoc.getKey(), BaseDocument.class, null), + is(nullValue())); + } + + @Test + public void deleteDocuments() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List keys = db.collection(COLLECTION_NAME) + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument()), null) + .getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // delete document from within the tx + db.collection(COLLECTION_NAME) + .deleteDocuments(keys, null, new DocumentDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the documents has not been deleted from outside the tx + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments(), + hasSize(keys.size())); + + // assert that the document has been deleted from within the tx + assertThat(db.collection(COLLECTION_NAME) + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments(), is(empty())); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been deleted after commit + assertThat(db.collection(COLLECTION_NAME).getDocuments(keys, BaseDocument.class, null).getDocuments(), + is(empty())); + } + + @Test + public void documentExists() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + // assert that the document is not found from within the tx + assertThat(db.collection(COLLECTION_NAME) + .documentExists(externalDoc.getKey(), new DocumentExistsOptions().streamTransactionId(tx.getId())), + is(false)); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + public void count() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Long initialCount = db.collection(COLLECTION_NAME).count().getCount(); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert a document from outside the tx + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + + // assert that the document is not counted from within the tx + assertThat(db.collection(COLLECTION_NAME).count(new CollectionCountOptions().streamTransactionId(tx.getId())) + .getCount(), is(initialCount)); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + public void truncate() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // truncate document from within the tx + db.collection(COLLECTION_NAME).truncate(new CollectionTruncateOptions().streamTransactionId(tx.getId())); + + // assert that the collection has not been truncated from outside the tx + assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(greaterThan(0L))); + + // assert that the collection has been truncated from within the tx + assertThat(db.collection(COLLECTION_NAME).count(new CollectionCountOptions().streamTransactionId(tx.getId())) + .getCount(), is(0L)); + + db.commitStreamTransaction(tx.getId()); + + // assert that the collection has been truncated after commit + assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(0L)); + } + + @Test + public void createCursor() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(), null); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("key", externalDoc.getKey()); + + ArangoCursor cursor = db + .query("FOR doc IN @@collection FILTER doc._key == @key RETURN doc", bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId()), BaseDocument.class); + + // assert that the document is not found from within the tx + assertThat(cursor.hasNext(), is(false)); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + public void nextCursor() { + assumeTrue(requireSingleServer()); + assumeTrue(requireVersion(3, 5)); + assumeTrue(requireStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // insert documents from within the tx + List keys = db.collection(COLLECTION_NAME) + .insertDocuments(IntStream.range(0, 10).mapToObj(it -> new BaseDocument()).collect(Collectors.toList()), + new DocumentCreateOptions().streamTransactionId(tx.getId())).getDocuments().stream() + .map(DocumentEntity::getKey).collect(Collectors.toList()); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("keys", keys); + + ArangoCursor cursor = db + .query("FOR doc IN @@collection FILTER CONTAINS_ARRAY(@keys, doc._key) RETURN doc", bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId()).batchSize(2), BaseDocument.class); + + List docs = cursor.asListRemaining(); + + // assert that all the keys are returned from the query + assertThat(docs.stream().map(BaseDocument::getKey).collect(Collectors.toList()), + containsInAnyOrder(keys.toArray())); + + db.abortStreamTransaction(tx.getId()); + } + +}