Skip to content

Commit 73ef188

Browse files
Auto-generated API code (#2715)
1 parent c988c44 commit 73ef188

File tree

3 files changed

+18
-206
lines changed

3 files changed

+18
-206
lines changed

docs/reference/api-reference.md

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -7552,23 +7552,6 @@ client.inference.get({ ... })
75527552
- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type
75537553
- **`inference_id` (Optional, string)**: The inference Id
75547554

7555-
## client.inference.postEisChatCompletion [_inference.post_eis_chat_completion]
7556-
Perform a chat completion task through the Elastic Inference Service (EIS).
7557-
7558-
Perform a chat completion inference task with the `elastic` service.
7559-
7560-
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion)
7561-
7562-
```ts
7563-
client.inference.postEisChatCompletion({ eis_inference_id })
7564-
```
7565-
7566-
### Arguments [_arguments_inference.post_eis_chat_completion]
7567-
7568-
#### Request (object) [_request_inference.post_eis_chat_completion]
7569-
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
7570-
- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })**
7571-
75727555
## client.inference.put [_inference.put]
75737556
Create an inference endpoint.
75747557
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
@@ -7775,26 +7758,6 @@ These settings are specific to the `cohere` service.
77757758
- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task.
77767759
These settings are specific to the task type you specified.
77777760

7778-
## client.inference.putEis [_inference.put_eis]
7779-
Create an Elastic Inference Service (EIS) inference endpoint.
7780-
7781-
Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
7782-
7783-
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis)
7784-
7785-
```ts
7786-
client.inference.putEis({ task_type, eis_inference_id, service, service_settings })
7787-
```
7788-
7789-
### Arguments [_arguments_inference.put_eis]
7790-
7791-
#### Request (object) [_request_inference.put_eis]
7792-
- **`task_type` (Enum("chat_completion"))**: The type of the inference task that the model will perform.
7793-
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
7794-
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
7795-
- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`.
7796-
- **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service.
7797-
77987761
## client.inference.putElasticsearch [_inference.put_elasticsearch]
77997762
Create an Elasticsearch inference endpoint.
78007763

src/api/api/inference.ts

Lines changed: 0 additions & 125 deletions
Original file line numberDiff line numberDiff line change
@@ -77,15 +77,6 @@ export default class Inference {
7777
body: [],
7878
query: []
7979
},
80-
'inference.post_eis_chat_completion': {
81-
path: [
82-
'eis_inference_id'
83-
],
84-
body: [
85-
'chat_completion_request'
86-
],
87-
query: []
88-
},
8980
'inference.put': {
9081
path: [
9182
'task_type',
@@ -174,17 +165,6 @@ export default class Inference {
174165
],
175166
query: []
176167
},
177-
'inference.put_eis': {
178-
path: [
179-
'task_type',
180-
'eis_inference_id'
181-
],
182-
body: [
183-
'service',
184-
'service_settings'
185-
],
186-
query: []
187-
},
188168
'inference.put_elasticsearch': {
189169
path: [
190170
'task_type',
@@ -583,53 +563,6 @@ export default class Inference {
583563
return await this.transport.request({ path, method, querystring, body, meta }, options)
584564
}
585565

586-
/**
587-
* Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service.
588-
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion | Elasticsearch API documentation}
589-
*/
590-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePostEisChatCompletionResponse>
591-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePostEisChatCompletionResponse, unknown>>
592-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<T.InferencePostEisChatCompletionResponse>
593-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<any> {
594-
const {
595-
path: acceptedPath,
596-
body: acceptedBody,
597-
query: acceptedQuery
598-
} = this.acceptedParams['inference.post_eis_chat_completion']
599-
600-
const userQuery = params?.querystring
601-
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
602-
603-
let body: any = params.body ?? undefined
604-
for (const key in params) {
605-
if (acceptedBody.includes(key)) {
606-
// @ts-expect-error
607-
body = params[key]
608-
} else if (acceptedPath.includes(key)) {
609-
continue
610-
} else if (key !== 'body' && key !== 'querystring') {
611-
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
612-
// @ts-expect-error
613-
querystring[key] = params[key]
614-
} else {
615-
body = body ?? {}
616-
// @ts-expect-error
617-
body[key] = params[key]
618-
}
619-
}
620-
}
621-
622-
const method = 'POST'
623-
const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream`
624-
const meta: TransportRequestMetadata = {
625-
name: 'inference.post_eis_chat_completion',
626-
pathParts: {
627-
eis_inference_id: params.eis_inference_id
628-
}
629-
}
630-
return await this.transport.request({ path, method, querystring, body, meta }, options)
631-
}
632-
633566
/**
634567
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
635568
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation}
@@ -1033,64 +966,6 @@ export default class Inference {
1033966
return await this.transport.request({ path, method, querystring, body, meta }, options)
1034967
}
1035968

1036-
/**
1037-
* Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
1038-
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis | Elasticsearch API documentation}
1039-
*/
1040-
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutEisResponse>
1041-
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutEisResponse, unknown>>
1042-
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<T.InferencePutEisResponse>
1043-
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<any> {
1044-
const {
1045-
path: acceptedPath,
1046-
body: acceptedBody,
1047-
query: acceptedQuery
1048-
} = this.acceptedParams['inference.put_eis']
1049-
1050-
const userQuery = params?.querystring
1051-
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
1052-
1053-
let body: Record<string, any> | string | undefined
1054-
const userBody = params?.body
1055-
if (userBody != null) {
1056-
if (typeof userBody === 'string') {
1057-
body = userBody
1058-
} else {
1059-
body = { ...userBody }
1060-
}
1061-
}
1062-
1063-
for (const key in params) {
1064-
if (acceptedBody.includes(key)) {
1065-
body = body ?? {}
1066-
// @ts-expect-error
1067-
body[key] = params[key]
1068-
} else if (acceptedPath.includes(key)) {
1069-
continue
1070-
} else if (key !== 'body' && key !== 'querystring') {
1071-
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
1072-
// @ts-expect-error
1073-
querystring[key] = params[key]
1074-
} else {
1075-
body = body ?? {}
1076-
// @ts-expect-error
1077-
body[key] = params[key]
1078-
}
1079-
}
1080-
}
1081-
1082-
const method = 'PUT'
1083-
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}`
1084-
const meta: TransportRequestMetadata = {
1085-
name: 'inference.put_eis',
1086-
pathParts: {
1087-
task_type: params.task_type,
1088-
eis_inference_id: params.eis_inference_id
1089-
}
1090-
}
1091-
return await this.transport.request({ path, method, querystring, body, meta }, options)
1092-
}
1093-
1094969
/**
1095970
* Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
1096971
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation}

src/api/types.ts

Lines changed: 18 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -15503,8 +15503,10 @@ export interface ClusterHealthHealthResponseBody {
1550315503
active_primary_shards: integer
1550415504
/** The total number of active primary and replica shards. */
1550515505
active_shards: integer
15506+
/** The ratio of active shards in the cluster expressed as a string formatted percentage. */
15507+
active_shards_percent?: string
1550615508
/** The ratio of active shards in the cluster expressed as a percentage. */
15507-
active_shards_percent_as_number: Percentage
15509+
active_shards_percent_as_number: double
1550815510
/** The name of the cluster. */
1550915511
cluster_name: Name
1551015512
/** The number of shards whose allocation has been delayed by the timeout settings. */
@@ -15566,7 +15568,7 @@ export interface ClusterHealthRequest extends RequestBase {
1556615568
/** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */
1556715569
wait_for_events?: WaitForEvents
1556815570
/** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and <N. Alternatively, it is possible to use ge(N), le(N), gt(N) and lt(N) notation. */
15569-
wait_for_nodes?: string | integer
15571+
wait_for_nodes?: ClusterHealthWaitForNodes
1557015572
/** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. */
1557115573
wait_for_no_initializing_shards?: boolean
1557215574
/** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. */
@@ -15591,6 +15593,8 @@ export interface ClusterHealthShardHealthStats {
1559115593
unassigned_primary_shards: integer
1559215594
}
1559315595

15596+
export type ClusterHealthWaitForNodes = string | integer
15597+
1559415598
export interface ClusterInfoRequest extends RequestBase {
1559515599
/** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */
1559615600
target: ClusterInfoTargets
@@ -21652,18 +21656,6 @@ export type InferenceDenseByteVector = byte[]
2165221656

2165321657
export type InferenceDenseVector = float[]
2165421658

21655-
export interface InferenceEisServiceSettings {
21656-
/** The name of the model to use for the inference task. */
21657-
model_id: string
21658-
/** This setting helps to minimize the number of rate limit errors returned.
21659-
* By default, the `elastic` service sets the number of requests allowed per minute to `240` in case of `chat_completion`. */
21660-
rate_limit?: InferenceRateLimitSetting
21661-
}
21662-
21663-
export type InferenceEisServiceType = 'elastic'
21664-
21665-
export type InferenceEisTaskType = 'chat_completion'
21666-
2166721659
export interface InferenceElasticsearchServiceSettings {
2166821660
/** Adaptive allocations configuration details.
2166921661
* If `enabled` is true, the number of allocations of the model is set based on the current load the process gets.
@@ -22154,18 +22146,6 @@ export interface InferenceGetResponse {
2215422146
endpoints: InferenceInferenceEndpointInfo[]
2215522147
}
2215622148

22157-
export interface InferencePostEisChatCompletionRequest extends RequestBase {
22158-
/** The unique identifier of the inference endpoint. */
22159-
eis_inference_id: Id
22160-
chat_completion_request?: InferenceRequestChatCompletion
22161-
/** All values in `body` will be added to the request body. */
22162-
body?: string | { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never }
22163-
/** All values in `querystring` will be added to the request querystring. */
22164-
querystring?: { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never }
22165-
}
22166-
22167-
export type InferencePostEisChatCompletionResponse = StreamResult
22168-
2216922149
export interface InferencePutRequest extends RequestBase {
2217022150
/** The task type */
2217122151
task_type?: InferenceTaskType
@@ -22315,24 +22295,6 @@ export interface InferencePutCohereRequest extends RequestBase {
2231522295

2231622296
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
2231722297

22318-
export interface InferencePutEisRequest extends RequestBase {
22319-
/** The type of the inference task that the model will perform.
22320-
* NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */
22321-
task_type: InferenceEisTaskType
22322-
/** The unique identifier of the inference endpoint. */
22323-
eis_inference_id: Id
22324-
/** The type of service supported for the specified task type. In this case, `elastic`. */
22325-
service: InferenceEisServiceType
22326-
/** Settings used to install the inference model. These settings are specific to the `elastic` service. */
22327-
service_settings: InferenceEisServiceSettings
22328-
/** All values in `body` will be added to the request body. */
22329-
body?: string | { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never }
22330-
/** All values in `querystring` will be added to the request querystring. */
22331-
querystring?: { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never }
22332-
}
22333-
22334-
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
22335-
2233622298
export interface InferencePutElasticsearchRequest extends RequestBase {
2233722299
/** The type of the inference task that the model will perform. */
2233822300
task_type: InferenceElasticsearchTaskType
@@ -23044,6 +23006,13 @@ export interface IngestInferenceProcessor extends IngestProcessorBase {
2304423006
field_map?: Record<Field, any>
2304523007
/** Contains the inference type and its options. */
2304623008
inference_config?: IngestInferenceConfig
23009+
/** Input fields for inference and output (destination) fields for the inference results.
23010+
* This option is incompatible with the target_field and field_map options. */
23011+
input_output?: IngestInputConfig | IngestInputConfig[]
23012+
/** If true and any of the input fields defined in input_ouput are missing
23013+
* then those missing fields are quietly ignored, otherwise a missing field causes a failure.
23014+
* Only applies when using input_output configurations to explicitly list the input fields. */
23015+
ignore_missing?: boolean
2304723016
}
2304823017

2304923018
export interface IngestIngest {
@@ -23052,6 +23021,11 @@ export interface IngestIngest {
2305223021
pipeline?: Name
2305323022
}
2305423023

23024+
export interface IngestInputConfig {
23025+
input_field: string
23026+
output_field: string
23027+
}
23028+
2305523029
export interface IngestIpLocationProcessor extends IngestProcessorBase {
2305623030
/** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */
2305723031
database_file?: string

0 commit comments

Comments
 (0)