From ed0a0f2956561f269ceb3285a680f8c7040c0a95 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Wed, 4 Sep 2024 18:25:09 -0400 Subject: [PATCH 1/3] Fixing spelling and minor whitespace issues in tests --- tests/integration/__init__.py | 2 +- tests/integration/advanced/__init__.py | 4 ++-- .../advanced/graph/test_graph_query.py | 4 ++-- tests/integration/advanced/test_auth.py | 6 ++--- .../integration/advanced/test_cont_paging.py | 10 ++++---- tests/integration/advanced/test_geometry.py | 2 +- tests/integration/advanced/test_spark.py | 4 ++-- tests/integration/cloud/conftest.py | 3 ++- tests/integration/cloud/test_cloud_schema.py | 4 ++-- tests/integration/conftest.py | 3 ++- .../integration/cqlengine/test_connections.py | 4 ++-- tests/integration/cqlengine/test_ifexists.py | 13 +++++----- .../integration/cqlengine/test_ifnotexists.py | 1 - .../cqlengine/test_lwt_conditional.py | 1 - tests/integration/cqlengine/test_timestamp.py | 4 +--- tests/integration/cqlengine/test_ttl.py | 6 ++--- .../column_encryption/test_policies.py | 24 ++++++++++--------- tests/integration/standard/test_concurrent.py | 8 +++---- .../standard/test_custom_payload.py | 2 ++ .../standard/test_custom_protocol_handler.py | 8 +++---- .../standard/test_cython_protocol_handlers.py | 2 +- tests/integration/util.py | 4 ++-- tests/unit/test_concurrent.py | 8 +++---- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 2 +- tests/unit/test_segment.py | 9 +++---- tests/unit/test_types.py | 18 +++++++------- tests/unit/test_util_types.py | 4 ++-- tests/util.py | 2 +- 29 files changed, 83 insertions(+), 81 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e389742b74..c8c111f211 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -699,7 +699,7 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): try: execute_with_long_wait_retry(session, "DROP KEYSPACE {0}".format(keyspace_name)) except: - log.warning("Error encountered when droping keyspace {0}".format(keyspace_name)) + log.warning("Error encountered when dropping keyspace {0}".format(keyspace_name)) ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index dffaccd190..1238d2ed72 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -88,7 +88,7 @@ def use_singledc_wth_graph_and_spark(start=True): def use_cluster_with_graph(num_nodes): """ - This is a work around to account for the fact that spark nodes will conflict over master assignment + This is a workaround to account for the fact that spark nodes will conflict over master assignment when started all at once. """ if USE_CASS_EXTERNAL: @@ -125,7 +125,7 @@ def use_cluster_with_graph(num_nodes): class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): """ - This base test class is used by all the geomteric tests. It contains class level teardown and setup + This base test class is used by all the geometric tests. It contains class level teardown and setup methods. It also contains the test fixtures used by those tests """ diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 0c889938d8..d1b3ef62b1 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -244,7 +244,7 @@ def _test_range_query(self, schema, graphson): """ Test to validate range queries are handled correctly. - Creates a very large line graph script and executes it. Then proceeds to to a range + Creates a very large line graph script and executes it. Then proceeds to a range limited query against it, and ensure that the results are formatted correctly and that the result set is properly sized. @@ -331,7 +331,7 @@ def _test_large_create_script(self, schema, graphson): @test_category dse graph """ self.execute_graph(schema.fixtures.line(150), graphson) - self.execute_graph(schema.fixtures.line(300), graphson) # This should passed since the queries are splitted + self.execute_graph(schema.fixtures.line(300), graphson) # This should pass since the queries are split self.assertRaises(SyntaxException, self.execute_graph, schema.fixtures.line(300, single_script=True), graphson) # this is not and too big def _test_large_result_set(self, schema, graphson): diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 3443419ab4..cf8b66df55 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -67,7 +67,7 @@ class BasicDseAuthTest(unittest.TestCase): @classmethod def setUpClass(self): """ - This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable + This will setup the necessary infrastructure to run our authentication tests. It requires the ADS_HOME environment variable and our custom embedded apache directory server jar in order to run. """ if not DSE_VERSION: @@ -86,7 +86,7 @@ def setUpClass(self): self.charlie_keytab = os.path.join(self.conf_file_dir, "charlie.keytab") actual_jar = os.path.join(ADS_HOME, "embedded-ads.jar") - # Create configuration directories if they don't already exists + # Create configuration directories if they don't already exist if not os.path.exists(self.conf_file_dir): os.makedirs(self.conf_file_dir) if not os.path.exists(actual_jar): @@ -175,7 +175,7 @@ def test_should_not_authenticate_with_bad_user_ticket(self): auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) - def test_should_not_athenticate_without_ticket(self): + def test_should_not_authenticate_without_ticket(self): """ This tests will attempt to authenticate with a user that is valid but has no ticket @since 3.20 diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 99de82647d..191d6f1faf 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -70,12 +70,12 @@ def create_cluster(cls): cls.select_all_statement = "SELECT * FROM {0}.{0}".format(cls.ks_name) - def test_continous_paging(self): + def test_continuous_paging(self): """ Test to ensure that various continuous paging schemes return the full set of results. @since 3.20 @jira_ticket PYTHON-615 - @expected_result various continous paging options should fetch all the results + @expected_result various continuous paging options should fetch all the results @test_category queries """ @@ -131,9 +131,9 @@ def test_paging_cancel(self): self.session_with_profiles.default_fetch_size = 1 # This combination should fetch one result a second. We should see a very few results results = self.session_with_profiles.execute_async(self.select_all_statement, execution_profile= "SLOW") - result_set =results.result() + result_set = results.result() result_set.cancel_continuous_paging() - result_lst =list(result_set) + result_lst = list(result_set) self.assertLess(len(result_lst), 2, "Cancel should have aborted fetch immediately") def test_con_paging_verify_writes(self): @@ -183,7 +183,7 @@ def test_con_paging_verify_writes(self): def test_can_get_results_when_no_more_pages(self): """ - Test to validate that the resutls can be fetched when + Test to validate that the results can be fetched when has_more_pages is False @since 3.20 @jira_ticket PYTHON-946 diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index 6a6737bd50..f40e27bf48 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -35,7 +35,7 @@ class AbstractGeometricTypeTest(): def test_should_insert_simple(self): """ - This tests will attempt to insert a point, polygon, or line, using simple inline formating. + This tests will attempt to insert a point, polygon, or line, using simple inline formatting. @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index a307913abb..ca37dc6b53 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -30,7 +30,7 @@ def setup_module(): @requiredse class SparkLBTests(BasicGraphUnitTestCase): """ - Test to validate that analtics query can run in a multi-node enviroment. Also check to to ensure + Test to validate that analytics query can run in a multi-node environment. Also check to ensure that the master spark node is correctly targeted when OLAP queries are run @since 3.20 @@ -42,7 +42,7 @@ def test_spark_analytic_query(self): self.session.execute_graph(ClassicGraphFixtures.classic()) spark_master = find_spark_master(self.session) - # Run multipltle times to ensure we don't round robin + # Run multiple times to ensure we don't round-robin for i in range(3): to_run = SimpleGraphStatement("g.V().count()") rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) diff --git a/tests/integration/cloud/conftest.py b/tests/integration/cloud/conftest.py index fb08b04194..6bfda32534 100644 --- a/tests/integration/cloud/conftest.py +++ b/tests/integration/cloud/conftest.py @@ -2,8 +2,9 @@ from tests.integration.cloud import setup_package, teardown_package + @pytest.fixture(scope='session', autouse=True) def setup_and_teardown_packages(): setup_package() yield - teardown_package() \ No newline at end of file + teardown_package() diff --git a/tests/integration/cloud/test_cloud_schema.py b/tests/integration/cloud/test_cloud_schema.py index 1d52e8e428..8dff49508a 100644 --- a/tests/integration/cloud/test_cloud_schema.py +++ b/tests/integration/cloud/test_cloud_schema.py @@ -110,9 +110,9 @@ def test_for_schema_disagreement_attribute(self): self.check_and_wait_for_agreement(session, rs, True) cluster.shutdown() - def check_and_wait_for_agreement(self, session, rs, exepected): + def check_and_wait_for_agreement(self, session, rs, expected): # Wait for RESULT_KIND_SCHEMA_CHANGE message to arrive time.sleep(1) - self.assertEqual(rs.response_future.is_schema_agreed, exepected) + self.assertEqual(rs.response_future.is_schema_agreed, expected) if not rs.response_future.is_schema_agreed: session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) \ No newline at end of file diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index b2eb9a02fc..e17ac302c8 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,8 +2,9 @@ from tests.integration import teardown_package + @pytest.fixture(scope='session', autouse=True) def setup_and_teardown_packages(): print('setup') yield - teardown_package() \ No newline at end of file + teardown_package() diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 15adff3380..28a0323f24 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -79,7 +79,7 @@ def test_context_connection_priority(self): """ Tests to ensure the proper connection priority is honored. - Explicit connection should have higest priority, + Explicit connection should have the highest priority, Followed by context query connection Default connection should be honored last. @@ -458,7 +458,7 @@ def test_keyspace(self): @since 3.7 @jira_ticket PYTHON-613 - @expected_result Keyspace segration is honored + @expected_result Keyspace segregation is honored @test_category object_mapper """ diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 1674bbd266..68efe077ba 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -100,7 +100,7 @@ def test_update_if_exists(self): m = TestIfExistsModel.get(id=id) self.assertEqual(m.text, 'changed_again') - m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + m = TestIfExistsModel(id=uuid4(), count=44) # do not exist with self.assertRaises(LWTException) as assertion: m.if_exists().update() @@ -155,7 +155,7 @@ def test_batch_update_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_mixed_update_if_exists_success(self): """ - Tests that batch update with with one bad query will still fail with LWTException + Tests that batch update with one bad query will still fail with LWTException @since 3.1 @jira_ticket PYTHON-432 @@ -177,7 +177,7 @@ def test_batch_mixed_update_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_delete_if_exists(self): """ - Tests that delete with if_exists work, and throw proper LWT exception when they are are not applied + Tests that delete with if_exists work, and throws proper LWT exception when they are not applied @since 3.1 @jira_ticket PYTHON-432 @@ -193,7 +193,7 @@ def test_delete_if_exists(self): q = TestIfExistsModel.objects(id=id) self.assertEqual(len(q), 0) - m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + m = TestIfExistsModel(id=uuid4(), count=44) # do not exist with self.assertRaises(LWTException) as assertion: m.if_exists().delete() @@ -212,7 +212,7 @@ def test_delete_if_exists(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_if_exists_success(self): """ - Tests that batch deletes with if_exists work, and throw proper LWTException when they are are not applied + Tests that batch deletes with if_exists work, and throws proper LWTException when they are not applied @since 3.1 @jira_ticket PYTHON-432 @@ -243,7 +243,7 @@ def test_batch_delete_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_mixed(self): """ - Tests that batch deletes with multiple queries and throw proper LWTException when they are are not all applicable + Tests that batch deletes with multiple queries and throws proper LWTException when they are not all applicable @since 3.1 @jira_ticket PYTHON-432 @@ -309,4 +309,3 @@ def test_instance_raise_exception(self): id = uuid4() with self.assertRaises(IfExistsWithCounterColumn): TestIfExistsWithCounterModel.if_exists() - diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 5b24070690..b2f2fd98c0 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -198,4 +198,3 @@ def test_instance_raise_exception(self): id = uuid4() with self.assertRaises(IfNotExistsWithCounterColumn): TestIfNotExistsWithCounterModel.if_not_exists() - diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 6f2e13361f..06fbf465da 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -158,7 +158,6 @@ def test_batch_update_conditional_several_rows(self): second_row.delete() b.execute() - def test_delete_conditional(self): # DML path t = TestConditionalModel.if_not_exists().create(text='something', count=5) diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index bfed7f9808..6ddce91099 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -199,9 +199,7 @@ def test_delete_in_the_past(self): TestTimestampModel.get(id=uid).should.be.ok - # delete the in past, should not affect the object created above + # delete in the past, should not affect the object created above TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=-60)).delete() TestTimestampModel.get(id=uid) - - diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index bd0c7f60aa..47359a03e3 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -179,7 +179,7 @@ def test_default_ttl_not_set(self): self.assertEqual(default_ttl, 0) with mock.patch.object(session, 'execute') as m: - TestTTLModel.objects(id=tid).update(text="aligators") + TestTTLModel.objects(id=tid).update(text="alligators") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query) @@ -197,7 +197,7 @@ def test_default_ttl_set(self): self.assertEqual(default_ttl, 20) with mock.patch.object(session, 'execute') as m: - TestTTLModel.objects(id=tid).update(text="aligators expired") + TestTTLModel.objects(id=tid).update(text="alligators expired") # Should not be set either query = m.call_args[0][0].query_string @@ -228,7 +228,7 @@ def test_override_default_ttl(self): self.assertEqual(o._ttl, 3600) with mock.patch.object(session, 'execute') as m: - TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired") + TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="alligators expired") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index 84c4e76871..b5caeebc96 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -22,9 +22,11 @@ from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, \ AES256_KEY_SIZE_BYTES, AES256_BLOCK_SIZE_BYTES + def setup_module(): use_singledc() + class ColumnEncryptionPolicyTest(unittest.TestCase): def _recreate_keyspace(self, session): @@ -34,7 +36,7 @@ def _recreate_keyspace(self, session): def _create_policy(self, key, iv = None): cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo','bar','encrypted') + col_desc = ColDesc('foo', ' bar', 'encrypted') cl_policy.add_column(col_desc, key, "int") return (col_desc, cl_policy) @@ -57,13 +59,13 @@ def test_end_to_end_prepared(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session.execute(prepared, [expected]).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -78,20 +80,20 @@ def test_end_to_end_simple(self): self._recreate_keyspace(session) # Use encode_and_encrypt helper function to populate date - for i in range(1,100): + for i in range(1, 100): self.assertIsNotNone(i) encrypted = cl_policy.encode_and_encrypt(col_desc, i) session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session.execute(prepared, [expected]).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -117,7 +119,7 @@ def test_end_to_end_different_cle_contexts_different_ivs(self): self._recreate_keyspace(session1) # Use encode_and_encrypt helper function to populate date - for i in range(1,100): + for i in range(1, 100): self.assertIsNotNone(i) encrypted = cl_policy1.encode_and_encrypt(col_desc1, i) session1.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) @@ -134,7 +136,7 @@ def test_end_to_end_different_cle_contexts_different_ivs(self): (_, cl_policy2) = self._create_policy(key, iv=iv2) cluster2 = TestCluster(column_encryption_policy=cl_policy2) session2 = cluster2.connect() - (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -152,7 +154,7 @@ def test_end_to_end_different_cle_contexts_different_policies(self): self._recreate_keyspace(session) # Use encode_and_encrypt helper function to populate date - session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (cl_policy.encode_and_encrypt(col_desc, expected), expected)) # We now open a new session _without_ the CLE policy specified. We should _not_ be able to read decrypted bits from this session. cluster2 = TestCluster() @@ -160,11 +162,11 @@ def test_end_to_end_different_cle_contexts_different_policies(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session2.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session2.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session2.execute(prepared, [expected]).one() self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index ba891b4bd0..0ab57f79ba 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -107,11 +107,11 @@ def execute_concurrent_base(self, test_fn, validate_fn, zip_args=True): test_fn(self.session, statement, parameters) validate_fn(num_statements, results) - def execute_concurrent_valiate_tuple(self, num_statements, results): + def execute_concurrent_validate_tuple(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) - def execute_concurrent_valiate_dict(self, num_statements, results): + def execute_concurrent_validate_dict(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [{"v":i}]) for i in range(num_statements)], results) @@ -138,7 +138,7 @@ def test_execute_concurrent_with_args_generator(self): """ Test to validate that generator based results are surfaced correctly - Repeatedly inserts data into a a table and attempts to query it. It then validates that the + Repeatedly inserts data into a table and attempts to query it. It then validates that the results are returned in the order expected @since 2.7.0 @@ -212,7 +212,7 @@ def test_execute_concurrent_paged_result_generator(self): """ Test to validate that generator based results are surfaced correctly when paging is used - Inserts data into a a table and attempts to query it. It then validates that the + Inserts data into a table and attempts to query it. It then validates that the results are returned as expected (no order specified) @since 2.7.0 diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index f33ab4f04f..a97efeaa68 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -25,6 +25,8 @@ def setup_module(): #These test rely on the custom payload being returned but by default C* #ignores all the payloads. + + @local class CustomPayloadTests(unittest.TestCase): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 16d43bbd65..68ef240795 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -73,7 +73,7 @@ def test_custom_raw_uuid_row_results(self): uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) - # use our custom protocol handlder + # use our custom protocol handler session.client_protocol_handler = CustomTestRawRowType result_set = session.execute("SELECT schema_version FROM system.local") raw_value = result_set[0][0] @@ -271,7 +271,7 @@ def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, class CustomTestRawRowType(ProtocolHandler): """ - This is the a custom protocol handler that will substitute the the + This is a custom protocol handler that will substitute the customResultMesageRowRaw Result message for our own implementation """ my_opcodes = ProtocolHandler.message_types_by_opcode.copy() @@ -281,7 +281,7 @@ class CustomTestRawRowType(ProtocolHandler): class CustomResultMessageTracked(ResultMessage): """ - This is a custom Result Message that is use to track what primitive types + This is a custom Result Message that is used to track what primitive types have been processed when it receives results """ my_type_codes = ResultMessage.type_codes.copy() @@ -305,7 +305,7 @@ def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): """ - This is the a custom protocol handler that will substitute the the + This is a custom protocol handler that will substitute the CustomTestRawRowTypeTracked Result message for our own implementation """ my_opcodes = ProtocolHandler.message_types_by_opcode.copy() diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 9cb5914f16..83d39407c4 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -234,7 +234,7 @@ def test_null_types(self): begin_unset = max(s.execute('select primkey from %s' % (table,))[0]['primkey']) + 1 keys_null = range(begin_unset, begin_unset + 10) - # scatter some emptry rows in here + # scatter some empty rows in here insert = "insert into %s (primkey) values (%%s)" % (table,) execute_concurrent_with_args(s, insert, ((k,) for k in keys_null)) diff --git a/tests/integration/util.py b/tests/integration/util.py index bcc4cb829b..64c101d9da 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -41,8 +41,8 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertFalse(state['shutdown']) test_case.assertGreater(state['open_count'], 0) no_in_flight = all((i == 0 for i in state['in_flights'])) - orphans_and_inflights = zip(state['orphan_requests'],state['in_flights']) - all_orphaned = all((len(orphans) == inflight for (orphans,inflight) in orphans_and_inflights)) + orphans_and_inflights = zip(state['orphan_requests'], state['in_flights']) + all_orphaned = all((len(orphans) == inflight for (orphans, inflight) in orphans_and_inflights)) test_case.assertTrue(no_in_flight or all_orphaned) for holder in cluster.get_connection_holders(): diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index bdfd08126e..db641b66a7 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -40,7 +40,7 @@ class MockResponseResponseFuture(): _col_names = None _col_types = None - # a list pending callbacks, these will be prioritized in reverse or normal orderd + # a list pending callbacks, these will be prioritized in reverse or normal order pending_callbacks = PriorityQueue() def __init__(self, reverse): @@ -179,7 +179,7 @@ def insert_and_validate_list_results(self, reverse, slowdown): This utility method will execute submit various statements for execution using the ConcurrentExecutorListResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. - Finally it will validate that the results were returned in the order they were submitted + Finally, it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly """ @@ -203,7 +203,7 @@ def insert_and_validate_list_generator(self, reverse, slowdown): This utility method will execute submit various statements for execution using the ConcurrentExecutorGenResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. - Finally it will validate that the results were returned in the order they were submitted + Finally, it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly """ @@ -232,7 +232,7 @@ def validate_result_ordering(self, results): self.assertTrue(success) current_time_added = list(result)[0] - #Windows clock granularity makes this equal most of the times + # Windows clock granularity makes this equal most of the time if "Windows" in platform.system(): self.assertLessEqual(last_time_added, current_time_added) else: diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index e1bd17a00c..824b34c956 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -202,7 +202,7 @@ def test_with_remotes(self): local_hosts = set(h for h in hosts if h.datacenter == "dc1") remote_hosts = set(h for h in hosts if h.datacenter != "dc1") - # allow all of the remote hosts to be used + # allow all the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 907f62f2bb..03910bb08a 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -95,7 +95,7 @@ def _check_calls(self, io, expected): def test_continuous_paging(self): """ - Test to check continuous paging throws an Exception if it's not supported and the correct valuesa + Test to check continuous paging throws an Exception if it's not supported and the correct values are written to the buffer if the option is enabled. @since DSE 2.0b3 GRAPH 1.0b1 diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index 0d0f146c16..a494e64414 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -24,6 +24,7 @@ def to_bits(b): return '{:08b}'.format(b) + class SegmentCodecTest(unittest.TestCase): small_msg = b'b' * 50 @@ -38,12 +39,12 @@ def _header_to_bits(data): data = data[:5] bits = ''.join([to_bits(b) for b in reversed(data)]) # return the compressed payload length, the uncompressed payload length, - # the self contained flag and the padding as bits + # the self-contained flag and the padding as bits return bits[23:40] + bits[6:23] + bits[5:6] + bits[:5] else: # uncompressed data = data[:3] bits = ''.join([to_bits(b) for b in reversed(data)]) - # return the payload length, the self contained flag and + # return the payload length, the self-contained flag and # the padding as bits return bits[7:24] + bits[6:7] + bits[:6] @@ -88,7 +89,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): self.assertEqual( self._header_to_bits(buffer.getvalue()), ("11111111111111111" - "0" # not self contained + "0" # not self-contained "000000")) @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') @@ -112,7 +113,7 @@ def test_encode_compressed_header_not_self_contained_msg(self): self._header_to_bits(buffer.getvalue()), ("{:017b}".format(compressed_length) + "11111111111111111" - "0" # not self contained + "0" # not self-contained "00000")) def test_decode_uncompressed_header(self): diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index aba11d4ced..8554d5e356 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -382,12 +382,12 @@ def test_round_trip_basic_types_without_fixed_serialized_size(self): # UTF8 text self._round_trip_test(["abc", "def", "ghi", "jkl"], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.UTF8Type, 4)") - # Time is something of a weird one. By rights it should be a fixed size type but C* code marks it as variable + # Time is something of a weird one. By rights, it should be a fixed size type but C* code marks it as variable # size. We're forced to follow the C* code base (since that's who'll be providing the data we're parsing) so # we match what they're doing. self._round_trip_test([datetime.time(1,1,1), datetime.time(2,2,2), datetime.time(3,3,3)], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeType, 3)") - # Duration (containts varints) + # Duration (contains varints) self._round_trip_test([util.Duration(1,1,1), util.Duration(2,2,2), util.Duration(3,3,3)], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DurationType, 3)") @@ -456,7 +456,7 @@ def test_round_trip_vector_of_vectors(self): "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.VectorType \ (org.apache.cassandra.db.marshal.AsciiType,2), 4)") - # parse_casstype_args() is tested above... we're explicitly concerned about cql_parapmeterized_type() output here + # parse_casstype_args() is tested above... we're explicitly concerned about cql_parameterized_type() output here def test_cql_parameterized_type(self): # Base vector functionality ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") @@ -914,7 +914,7 @@ def test_deserialize_date_range_day(self): 999, lambda original_value, i: original_value + i * 900 * 50 * 60 * 24) - @unittest.skip("This is currently failig, see PYTHON-912") + @unittest.skip("This is currently failing, see PYTHON-912") def test_deserialize_date_range_month(self): """ Test rounding from DateRange for months @@ -931,7 +931,7 @@ def get_upper_bound(seconds): but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 and days 28, 29, 30 or 31 depending on the month. The way to do this is to add one month and leave the date at YEAR-MONTH-01 00:00:00 000000. - Then substract one millisecond. + Then subtract one millisecond. """ dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=32) @@ -958,7 +958,7 @@ def get_upper_bound(seconds): but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 days 28, 29, 30 or 31 depending on the month and months to 12. The way to do this is to add one year and leave the date at YEAR-01-01 00:00:00 000000. - Then substract one millisecond. + Then subtract one millisecond. """ dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=370) @@ -980,14 +980,14 @@ def _deserialize_date_range(self, truncate_kwargs, precision, lower_value upper_value which are given as a value that represents seconds since the epoch. We want to make sure the lower_value is correctly rounded down and the upper value is correctly rounded up. In the case of rounding down we verify that the rounded down value - has the appropriate fields set to the minimum they could possible have. That is + has the appropriate fields set to the minimum they could possibly have. That is 1 for months, 1 for days, 0 for hours, 0 for minutes, 0 for seconds, 0 for microseconds. We use the generic function truncate_date which depends on truncate_kwargs for this In the case of rounding up we verify that the rounded up value has the appropriate fields set - to the maximum they could possible have. This is calculated by round_up_truncated_upper_value + to the maximum they could possibly have. This is calculated by round_up_truncated_upper_value which input is the truncated value from before. It is passed as an argument as the way - of calculating this is is different for every precision. + of calculating this is different for every precision. :param truncate_kwargs: determine what values to truncate in truncate_date :param precision: :class:`~util.DateRangePrecision` diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 5d6058b394..7afec29372 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -51,7 +51,7 @@ def test_limits(self): max_builtin = Date(datetime.date(9999, 12, 31)) self.assertEqual(Date(min_builtin.days_from_epoch), min_builtin) self.assertEqual(Date(max_builtin.days_from_epoch), max_builtin) - # just proving we can construct with on offset outside buildin range + # just proving we can construct with on offset outside builtin range self.assertEqual(Date(min_builtin.days_from_epoch - 1).days_from_epoch, min_builtin.days_from_epoch - 1) self.assertEqual(Date(max_builtin.days_from_epoch + 1).days_from_epoch, @@ -191,7 +191,7 @@ def test_equality(self): second = Duration(1000, 10000, 2345345) self.assertEqual(first, second) - first = Duration(12, 0 , 100) + first = Duration(12, 0, 100) second = Duration(nanoseconds=100, months=12) self.assertEqual(first, second) diff --git a/tests/util.py b/tests/util.py index 5c7ac2416f..c28a94b1c7 100644 --- a/tests/util.py +++ b/tests/util.py @@ -40,7 +40,7 @@ def wait_until_not_raised(condition, delay, max_attempts): doesn't raise an exception and the amount of attempts < maxAttempts. :param condition: a function :param delay: the delay in second - :param max_attempts: the maximum number of attemps. So the timeout + :param max_attempts: the maximum number of attempts. So the timeout of this function will be delay*max_attempts """ def wrapped_condition(): From d1aeffbf8beeaccf057a4b8702ab938b2aeb3aa5 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Wed, 25 Sep 2024 00:27:33 -0400 Subject: [PATCH 2/3] added additional spelling corrections for execute_concurrent_validate_tuple and rolled back _create_policy() whitespace correction --- .../standard/column_encryption/test_policies.py | 2 +- tests/integration/standard/test_concurrent.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index b5caeebc96..7adf1731f7 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -36,7 +36,7 @@ def _recreate_keyspace(self, session): def _create_policy(self, key, iv = None): cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo', ' bar', 'encrypted') + col_desc = ColDesc('foo',' bar','encrypted') cl_policy.add_column(col_desc, key, "int") return (col_desc, cl_policy) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 0ab57f79ba..c076d9f553 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -117,22 +117,22 @@ def execute_concurrent_validate_dict(self, num_statements, results): def test_execute_concurrent(self): self.execute_concurrent_base(self.execute_concurrent_helper, \ - self.execute_concurrent_valiate_tuple) + self.execute_concurrent_validate_tuple) def test_execute_concurrent_with_args(self): self.execute_concurrent_base(self.execute_concurrent_args_helper, \ - self.execute_concurrent_valiate_tuple, \ + self.execute_concurrent_validate_tuple, \ zip_args=False) def test_execute_concurrent_with_execution_profile(self): def run_fn(*args, **kwargs): return self.execute_concurrent_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) - self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict) + self.execute_concurrent_base(run_fn, self.execute_concurrent_validate_dict) def test_execute_concurrent_with_args_and_execution_profile(self): def run_fn(*args, **kwargs): return self.execute_concurrent_args_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) - self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict, zip_args=False) + self.execute_concurrent_base(run_fn, self.execute_concurrent_validate_dict, zip_args=False) def test_execute_concurrent_with_args_generator(self): """ From 39e92b7f72751b11005bdde360906548d4fee859 Mon Sep 17 00:00:00 2001 From: Brad Schoening Date: Wed, 25 Sep 2024 00:32:25 -0400 Subject: [PATCH 3/3] fixed typo with space inside of string value --- tests/integration/standard/column_encryption/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index 7adf1731f7..325c19cf3a 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -36,7 +36,7 @@ def _recreate_keyspace(self, session): def _create_policy(self, key, iv = None): cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo',' bar','encrypted') + col_desc = ColDesc('foo','bar','encrypted') cl_policy.add_column(col_desc, key, "int") return (col_desc, cl_policy)