diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index b32173f99..01a927372 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -8038,6 +8038,42 @@ client.inference.get({ ... }) ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type ** *`inference_id` (Optional, string)*: The inference Id +[discrete] +==== inference +Perform inference on the service. + +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. + +For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. + +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +{ref}/post-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.inference({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The type of inference task that the model performs. +** *`query` (Optional, string)*: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete. + [discrete] ==== put Create an inference endpoint. diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index abf953180..abf9b67ae 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -209,6 +209,58 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation} + */ + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['query', 'input', 'task_settings'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'inference.inference', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index 9c73e94b2..352ac8685 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -13099,6 +13099,15 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + export interface InferenceJinaAIServiceSettings { api_key: string model_id?: string @@ -13288,6 +13297,17 @@ export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[] } +export interface InferenceInferenceRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + timeout?: Duration + query?: string + input: string | string[] + task_settings?: InferenceTaskSettings +} + +export type InferenceInferenceResponse = InferenceInferenceResult + export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index e064a4644..41c4e129b 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -13341,6 +13341,15 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + export interface InferenceJinaAIServiceSettings { api_key: string model_id?: string @@ -13534,6 +13543,20 @@ export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[] } +export interface InferenceInferenceRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + timeout?: Duration + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + query?: string + input: string | string[] + task_settings?: InferenceTaskSettings + } +} + +export type InferenceInferenceResponse = InferenceInferenceResult + export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id