-
Notifications
You must be signed in to change notification settings - Fork 186
feat: add Explicit bucket schemas API #528
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
b057ca2
9b8e182
6b91b07
9d952df
c963e76
cffd55a
509cccd
b102dc3
193a459
d2ab4d7
38fc35c
62ba5f7
bab51a4
9767ef0
e705ee9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,95 @@ | ||
""" | ||
This example is related to `InfluxDB Cloud <https://docs.influxdata.com/influxdb/cloud/>`_ and not available | ||
on a local InfluxDB OSS instance. | ||
|
||
How to manage explicit bucket schemas to enforce column names, tags, fields, and data types for your data. | ||
""" | ||
import datetime | ||
|
||
from influxdb_client import InfluxDBClient, BucketSchemasService, PostBucketRequest, SchemaType, \ | ||
MeasurementSchemaCreateRequest, MeasurementSchemaColumn, ColumnSemanticType, ColumnDataType, \ | ||
MeasurementSchemaUpdateRequest | ||
|
||
""" | ||
Define credentials | ||
""" | ||
influx_cloud_url = 'https://us-west-2-1.aws.cloud2.influxdata.com' | ||
influx_cloud_token = '...' | ||
org_name = '...' | ||
|
||
with InfluxDBClient(url=influx_cloud_url, token=influx_cloud_token, org=org_name, debug=False) as client: | ||
uniqueId = str(datetime.datetime.now()) | ||
org_id = client.organizations_api().find_organizations(org=org_name)[0].id | ||
bucket_schemas_api = BucketSchemasService(api_client=client.api_client) | ||
|
||
""" | ||
Create a bucket with the schema_type flag set to explicit | ||
""" | ||
print("------- Create Bucket -------\n") | ||
created_bucket = client \ | ||
.buckets_api() \ | ||
.create_bucket(bucket=PostBucketRequest(name=f"my_schema_bucket_{uniqueId}", | ||
org_id=org_id, | ||
retention_rules=[], | ||
schema_type=SchemaType.EXPLICIT)) | ||
print(created_bucket) | ||
|
||
""" | ||
Sets the schema for a measurement: Usage CPU | ||
|
||
[ | ||
{"name": "time", "type": "timestamp"}, | ||
{"name": "service", "type": "tag"}, | ||
{"name": "host", "type": "tag"}, | ||
{"name": "usage_user", "type": "field", "dataType": "float"}, | ||
{"name": "usage_system", "type": "field", "dataType": "float"} | ||
] | ||
""" | ||
print("------- Create Schema -------\n") | ||
columns = [ | ||
MeasurementSchemaColumn(name="time", | ||
type=ColumnSemanticType.TIMESTAMP), | ||
MeasurementSchemaColumn(name="service", | ||
type=ColumnSemanticType.TAG), | ||
MeasurementSchemaColumn(name="host", | ||
type=ColumnSemanticType.TAG), | ||
MeasurementSchemaColumn(name="usage_user", | ||
type=ColumnSemanticType.FIELD, | ||
data_type=ColumnDataType.FLOAT), | ||
MeasurementSchemaColumn(name="usage_system", | ||
type=ColumnSemanticType.FIELD, | ||
data_type=ColumnDataType.FLOAT) | ||
] | ||
create_request = MeasurementSchemaCreateRequest(name="usage_cpu", columns=columns) | ||
created_schema = bucket_schemas_api.create_measurement_schema(bucket_id=created_bucket.id, | ||
org_id=org_id, | ||
measurement_schema_create_request=create_request) | ||
print(created_bucket) | ||
|
||
""" | ||
Lists the Schemas | ||
""" | ||
print("\n------- Lists the Schemas -------\n") | ||
measurement_schemas = bucket_schemas_api.get_measurement_schemas(bucket_id=created_bucket.id).measurement_schemas | ||
print("\n".join([f"---\n ID: {ms.id}\n Name: {ms.name}\n Columns: {ms.columns}" for ms in measurement_schemas])) | ||
print("---") | ||
|
||
""" | ||
Update a bucket schema | ||
""" | ||
print("------- Update a bucket schema -------\n") | ||
columns.append(MeasurementSchemaColumn(name="usage_total", | ||
type=ColumnSemanticType.FIELD, | ||
data_type=ColumnDataType.FLOAT)) | ||
update_request = MeasurementSchemaUpdateRequest(columns=columns) | ||
updated_schema = bucket_schemas_api.update_measurement_schema(bucket_id=created_bucket.id, | ||
measurement_id=created_schema.id, | ||
measurement_schema_update_request=update_request) | ||
print(updated_schema) | ||
|
||
""" | ||
Delete previously created bucket | ||
""" | ||
print("------- Delete Bucket -------\n") | ||
client.buckets_api().delete_bucket(created_bucket) | ||
print(f" successfully deleted bucket: {created_bucket.name}") |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -29,3 +29,24 @@ def print_warning(query: str): | |||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/ | ||||||
""" | ||||||
warnings.warn(message, MissingPivotFunction) | ||||||
|
||||||
|
||||||
class CloudOnlyWarning(UserWarning): | ||||||
"""User warning about availability only on the InfluxDB Cloud.""" | ||||||
|
||||||
@staticmethod | ||||||
def print_warning(api_name: str, doc_url: str): | ||||||
"""Print warning about availability only on the InfluxDB Cloud.""" | ||||||
message = f"""The '{api_name}' is available only on the InfluxDB Cloud. | ||||||
|
||||||
For more info see: | ||||||
- {doc_url} | ||||||
- https://docs.influxdata.com/influxdb/cloud/ | ||||||
|
||||||
You can disable this warning by: | ||||||
import warnings | ||||||
from influxdb_client.client.warnings import CloudOnlyWarning | ||||||
|
||||||
warnings.simplefilter("ignore", CloudOnlyWarning) | ||||||
""" | ||||||
warnings.warn(message, CloudOnlyWarning) | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This means users who are using this with cloud suddenly will get additional text for every usage correct? That seems less than ideal to make a user who is using this correctly to need to ignore a warning. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You are right, I will add a checking to the type of instance before warning. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The implementation of warning is changed. The warnings are displayed on individual actions and only if the InfluxD instance is not Cloud. For more info see:
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
# coding: utf-8 | ||
|
||
""" | ||
InfluxDB OSS API Service. | ||
|
||
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501 | ||
|
||
OpenAPI spec version: 2.0.0 | ||
Generated by: https://openapi-generator.tech | ||
""" | ||
|
||
|
||
import pprint | ||
import re # noqa: F401 | ||
|
||
|
||
class ColumnDataType(object): | ||
"""NOTE: This class is auto generated by OpenAPI Generator. | ||
|
||
Ref: https://openapi-generator.tech | ||
|
||
Do not edit the class manually. | ||
""" | ||
|
||
""" | ||
allowed enum values | ||
""" | ||
INTEGER = "integer" | ||
FLOAT = "float" | ||
BOOLEAN = "boolean" | ||
STRING = "string" | ||
UNSIGNED = "unsigned" | ||
|
||
""" | ||
Attributes: | ||
openapi_types (dict): The key is attribute name | ||
and the value is attribute type. | ||
attribute_map (dict): The key is attribute name | ||
and the value is json key in definition. | ||
""" | ||
openapi_types = { | ||
} | ||
|
||
attribute_map = { | ||
} | ||
|
||
def __init__(self): # noqa: E501,D401,D403 | ||
"""ColumnDataType - a model defined in OpenAPI.""" # noqa: E501 self.discriminator = None | ||
|
||
def to_dict(self): | ||
"""Return the model properties as a dict.""" | ||
result = {} | ||
|
||
for attr, _ in self.openapi_types.items(): | ||
value = getattr(self, attr) | ||
if isinstance(value, list): | ||
result[attr] = list(map( | ||
lambda x: x.to_dict() if hasattr(x, "to_dict") else x, | ||
value | ||
)) | ||
elif hasattr(value, "to_dict"): | ||
result[attr] = value.to_dict() | ||
elif isinstance(value, dict): | ||
result[attr] = dict(map( | ||
lambda item: (item[0], item[1].to_dict()) | ||
if hasattr(item[1], "to_dict") else item, | ||
value.items() | ||
)) | ||
else: | ||
result[attr] = value | ||
|
||
return result | ||
|
||
def to_str(self): | ||
"""Return the string representation of the model.""" | ||
return pprint.pformat(self.to_dict()) | ||
|
||
def __repr__(self): | ||
"""For `print` and `pprint`.""" | ||
return self.to_str() | ||
|
||
def __eq__(self, other): | ||
"""Return true if both objects are equal.""" | ||
if not isinstance(other, ColumnDataType): | ||
return False | ||
|
||
return self.__dict__ == other.__dict__ | ||
|
||
def __ne__(self, other): | ||
"""Return true if both objects are not equal.""" | ||
return not self == other |
Uh oh!
There was an error while loading. Please reload this page.