From 55c05a03925a219a2c91f67b8c371605c556e139 Mon Sep 17 00:00:00 2001
From: Daniel La Rocque
Date: Wed, 23 Apr 2025 11:19:30 -0400
Subject: [PATCH 1/6] Hybrid inference code changes
---
packages/ai/src/api.test.ts | 15 +
packages/ai/src/api.ts | 29 +-
.../ai/src/backwards-compatbility.test.ts | 7 +-
packages/ai/src/methods/chat-session.test.ts | 19 +-
packages/ai/src/methods/chat-session.ts | 4 +
.../ai/src/methods/chrome-adapter.test.ts | 626 ++++++++++++++++++
packages/ai/src/methods/chrome-adapter.ts | 327 +++++++++
packages/ai/src/methods/count-tokens.test.ts | 44 +-
packages/ai/src/methods/count-tokens.ts | 17 +-
.../ai/src/methods/generate-content.test.ts | 66 +-
packages/ai/src/methods/generate-content.ts | 53 +-
.../ai/src/models/generative-model.test.ts | 191 ++++--
packages/ai/src/models/generative-model.ts | 16 +-
packages/ai/src/types/language-model.ts | 83 +++
packages/ai/src/types/requests.ts | 38 ++
15 files changed, 1430 insertions(+), 105 deletions(-)
create mode 100644 packages/ai/src/methods/chrome-adapter.test.ts
create mode 100644 packages/ai/src/methods/chrome-adapter.ts
create mode 100644 packages/ai/src/types/language-model.ts
diff --git a/packages/ai/src/api.test.ts b/packages/ai/src/api.test.ts
index 27237b4edd3..6ce353107ac 100644
--- a/packages/ai/src/api.test.ts
+++ b/packages/ai/src/api.test.ts
@@ -102,6 +102,21 @@ describe('Top level API', () => {
expect(genModel).to.be.an.instanceOf(GenerativeModel);
expect(genModel.model).to.equal('publishers/google/models/my-model');
});
+ it('getGenerativeModel with HybridParams sets a default model', () => {
+ const genModel = getGenerativeModel(fakeAI, {
+ mode: 'only_on_device'
+ });
+ expect(genModel.model).to.equal(
+ `publishers/google/models/${GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL}`
+ );
+ });
+ it('getGenerativeModel with HybridParams honors a model override', () => {
+ const genModel = getGenerativeModel(fakeAI, {
+ mode: 'prefer_on_device',
+ inCloudParams: { model: 'my-model' }
+ });
+ expect(genModel.model).to.equal('publishers/google/models/my-model');
+ });
it('getImagenModel throws if no model is provided', () => {
try {
getImagenModel(fakeAI, {} as ImagenModelParams);
diff --git a/packages/ai/src/api.ts b/packages/ai/src/api.ts
index d2229c067fc..4a27be8786f 100644
--- a/packages/ai/src/api.ts
+++ b/packages/ai/src/api.ts
@@ -23,6 +23,7 @@ import { AIService } from './service';
import { AI, AIOptions, VertexAI, VertexAIOptions } from './public-types';
import {
ImagenModelParams,
+ HybridParams,
ModelParams,
RequestOptions,
AIErrorCode
@@ -31,6 +32,8 @@ import { AIError } from './errors';
import { AIModel, GenerativeModel, ImagenModel } from './models';
import { encodeInstanceIdentifier } from './helpers';
import { GoogleAIBackend, VertexAIBackend } from './backend';
+import { ChromeAdapter } from './methods/chrome-adapter';
+import { LanguageModel } from './types/language-model';
export { ChatSession } from './methods/chat-session';
export * from './requests/schema-builder';
@@ -147,16 +150,36 @@ export function getAI(
*/
export function getGenerativeModel(
ai: AI,
- modelParams: ModelParams,
+ modelParams: ModelParams | HybridParams,
requestOptions?: RequestOptions
): GenerativeModel {
- if (!modelParams.model) {
+ // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.
+ const hybridParams = modelParams as HybridParams;
+ let inCloudParams: ModelParams;
+ if (hybridParams.mode) {
+ inCloudParams = hybridParams.inCloudParams || {
+ model: GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL
+ };
+ } else {
+ inCloudParams = modelParams as ModelParams;
+ }
+
+ if (!inCloudParams.model) {
throw new AIError(
AIErrorCode.NO_MODEL,
`Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`
);
}
- return new GenerativeModel(ai, modelParams, requestOptions);
+ return new GenerativeModel(
+ ai,
+ inCloudParams,
+ new ChromeAdapter(
+ window.LanguageModel as LanguageModel,
+ hybridParams.mode,
+ hybridParams.onDeviceParams
+ ),
+ requestOptions
+ );
}
/**
diff --git a/packages/ai/src/backwards-compatbility.test.ts b/packages/ai/src/backwards-compatbility.test.ts
index 62463009b24..da0b613bf21 100644
--- a/packages/ai/src/backwards-compatbility.test.ts
+++ b/packages/ai/src/backwards-compatbility.test.ts
@@ -28,6 +28,7 @@ import {
} from './api';
import { AI, VertexAI, AIErrorCode } from './public-types';
import { VertexAIBackend } from './backend';
+import { ChromeAdapter } from './methods/chrome-adapter';
function assertAssignable(): void {}
@@ -65,7 +66,11 @@ describe('backwards-compatible types', () => {
it('AIModel is backwards compatible with VertexAIModel', () => {
assertAssignable();
- const model = new GenerativeModel(fakeAI, { model: 'model-name' });
+ const model = new GenerativeModel(
+ fakeAI,
+ { model: 'model-name' },
+ new ChromeAdapter()
+ );
expect(model).to.be.instanceOf(AIModel);
expect(model).to.be.instanceOf(VertexAIModel);
});
diff --git a/packages/ai/src/methods/chat-session.test.ts b/packages/ai/src/methods/chat-session.test.ts
index 0564aa84ed6..ed0b4d4877f 100644
--- a/packages/ai/src/methods/chat-session.test.ts
+++ b/packages/ai/src/methods/chat-session.test.ts
@@ -24,6 +24,7 @@ import { GenerateContentStreamResult } from '../types';
import { ChatSession } from './chat-session';
import { ApiSettings } from '../types/internal';
import { VertexAIBackend } from '../backend';
+import { ChromeAdapter } from './chrome-adapter';
use(sinonChai);
use(chaiAsPromised);
@@ -46,7 +47,11 @@ describe('ChatSession', () => {
generateContentMethods,
'generateContent'
).rejects('generateContent failed');
- const chatSession = new ChatSession(fakeApiSettings, 'a-model');
+ const chatSession = new ChatSession(
+ fakeApiSettings,
+ 'a-model',
+ new ChromeAdapter()
+ );
await expect(chatSession.sendMessage('hello')).to.be.rejected;
expect(generateContentStub).to.be.calledWith(
fakeApiSettings,
@@ -63,7 +68,11 @@ describe('ChatSession', () => {
generateContentMethods,
'generateContentStream'
).rejects('generateContentStream failed');
- const chatSession = new ChatSession(fakeApiSettings, 'a-model');
+ const chatSession = new ChatSession(
+ fakeApiSettings,
+ 'a-model',
+ new ChromeAdapter()
+ );
await expect(chatSession.sendMessageStream('hello')).to.be.rejected;
expect(generateContentStreamStub).to.be.calledWith(
fakeApiSettings,
@@ -82,7 +91,11 @@ describe('ChatSession', () => {
generateContentMethods,
'generateContentStream'
).resolves({} as unknown as GenerateContentStreamResult);
- const chatSession = new ChatSession(fakeApiSettings, 'a-model');
+ const chatSession = new ChatSession(
+ fakeApiSettings,
+ 'a-model',
+ new ChromeAdapter()
+ );
await chatSession.sendMessageStream('hello');
expect(generateContentStreamStub).to.be.calledWith(
fakeApiSettings,
diff --git a/packages/ai/src/methods/chat-session.ts b/packages/ai/src/methods/chat-session.ts
index 60794001e37..112ddf5857e 100644
--- a/packages/ai/src/methods/chat-session.ts
+++ b/packages/ai/src/methods/chat-session.ts
@@ -30,6 +30,7 @@ import { validateChatHistory } from './chat-session-helpers';
import { generateContent, generateContentStream } from './generate-content';
import { ApiSettings } from '../types/internal';
import { logger } from '../logger';
+import { ChromeAdapter } from './chrome-adapter';
/**
* Do not log a message for this error.
@@ -50,6 +51,7 @@ export class ChatSession {
constructor(
apiSettings: ApiSettings,
public model: string,
+ private chromeAdapter: ChromeAdapter,
public params?: StartChatParams,
public requestOptions?: RequestOptions
) {
@@ -95,6 +97,7 @@ export class ChatSession {
this._apiSettings,
this.model,
generateContentRequest,
+ this.chromeAdapter,
this.requestOptions
)
)
@@ -146,6 +149,7 @@ export class ChatSession {
this._apiSettings,
this.model,
generateContentRequest,
+ this.chromeAdapter,
this.requestOptions
);
diff --git a/packages/ai/src/methods/chrome-adapter.test.ts b/packages/ai/src/methods/chrome-adapter.test.ts
new file mode 100644
index 00000000000..fbe7ec1a5c5
--- /dev/null
+++ b/packages/ai/src/methods/chrome-adapter.test.ts
@@ -0,0 +1,626 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { AIError } from '../errors';
+import { expect, use } from 'chai';
+import sinonChai from 'sinon-chai';
+import chaiAsPromised from 'chai-as-promised';
+import { ChromeAdapter } from './chrome-adapter';
+import {
+ Availability,
+ LanguageModel,
+ LanguageModelCreateOptions,
+ LanguageModelMessageContent
+} from '../types/language-model';
+import { match, stub } from 'sinon';
+import { GenerateContentRequest, AIErrorCode } from '../types';
+import { Schema } from '../api';
+
+use(sinonChai);
+use(chaiAsPromised);
+
+/**
+ * Converts the ReadableStream from response.body to an array of strings.
+ */
+async function toStringArray(
+ stream: ReadableStream
+): Promise {
+ const decoder = new TextDecoder();
+ const actual = [];
+ const reader = stream.getReader();
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) {
+ break;
+ }
+ actual.push(decoder.decode(value));
+ }
+ return actual;
+}
+
+describe('ChromeAdapter', () => {
+ describe('constructor', () => {
+ it('sets image as expected input type by default', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.available)
+ } as LanguageModel;
+ const availabilityStub = stub(
+ languageModelProvider,
+ 'availability'
+ ).resolves(Availability.available);
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+ await adapter.isAvailable({
+ contents: [
+ {
+ role: 'user',
+ parts: [{ text: 'hi' }]
+ }
+ ]
+ });
+ expect(availabilityStub).to.have.been.calledWith({
+ expectedInputs: [{ type: 'image' }]
+ });
+ });
+ it('honors explicitly set expected inputs', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.available)
+ } as LanguageModel;
+ const availabilityStub = stub(
+ languageModelProvider,
+ 'availability'
+ ).resolves(Availability.available);
+ const createOptions = {
+ // Explicitly sets expected inputs.
+ expectedInputs: [{ type: 'text' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ {
+ createOptions
+ }
+ );
+ await adapter.isAvailable({
+ contents: [
+ {
+ role: 'user',
+ parts: [{ text: 'hi' }]
+ }
+ ]
+ });
+ expect(availabilityStub).to.have.been.calledWith(createOptions);
+ });
+ });
+ describe('isAvailable', () => {
+ it('returns false if mode is only cloud', async () => {
+ const adapter = new ChromeAdapter(undefined, 'only_in_cloud');
+ expect(
+ await adapter.isAvailable({
+ contents: []
+ })
+ ).to.be.false;
+ });
+ it('returns false if LanguageModel API is undefined', async () => {
+ const adapter = new ChromeAdapter(undefined, 'prefer_on_device');
+ expect(
+ await adapter.isAvailable({
+ contents: []
+ })
+ ).to.be.false;
+ });
+ it('returns false if request contents empty', async () => {
+ const adapter = new ChromeAdapter(
+ {
+ availability: async () => Availability.available
+ } as LanguageModel,
+ 'prefer_on_device'
+ );
+ expect(
+ await adapter.isAvailable({
+ contents: []
+ })
+ ).to.be.false;
+ });
+ it('returns false if request content has non-user role', async () => {
+ const adapter = new ChromeAdapter(
+ {
+ availability: async () => Availability.available
+ } as LanguageModel,
+ 'prefer_on_device'
+ );
+ expect(
+ await adapter.isAvailable({
+ contents: [
+ {
+ role: 'model',
+ parts: []
+ }
+ ]
+ })
+ ).to.be.false;
+ });
+ it('returns true if request has image with supported mime type', async () => {
+ const adapter = new ChromeAdapter(
+ {
+ availability: async () => Availability.available
+ } as LanguageModel,
+ 'prefer_on_device'
+ );
+ for (const mimeType of ChromeAdapter.SUPPORTED_MIME_TYPES) {
+ expect(
+ await adapter.isAvailable({
+ contents: [
+ {
+ role: 'user',
+ parts: [
+ {
+ inlineData: {
+ mimeType,
+ data: ''
+ }
+ }
+ ]
+ }
+ ]
+ })
+ ).to.be.true;
+ }
+ });
+ it('returns true if model is readily available', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.available)
+ } as LanguageModel;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+ expect(
+ await adapter.isAvailable({
+ contents: [
+ {
+ role: 'user',
+ parts: [
+ { text: 'describe this image' },
+ { inlineData: { mimeType: 'image/jpeg', data: 'asd' } }
+ ]
+ }
+ ]
+ })
+ ).to.be.true;
+ });
+ it('returns false and triggers download when model is available after download', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.downloadable),
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ {} as LanguageModel
+ );
+ const createOptions = {
+ expectedInputs: [{ type: 'image' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { createOptions }
+ );
+ expect(
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ })
+ ).to.be.false;
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
+ });
+ it('avoids redundant downloads', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.downloadable),
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const downloadPromise = new Promise(() => {
+ /* never resolves */
+ });
+ const createStub = stub(languageModelProvider, 'create').returns(
+ downloadPromise
+ );
+ const adapter = new ChromeAdapter(languageModelProvider);
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ });
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ });
+ expect(createStub).to.have.been.calledOnce;
+ });
+ it('clears state when download completes', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.downloadable),
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ let resolveDownload;
+ const downloadPromise = new Promise(resolveCallback => {
+ resolveDownload = resolveCallback;
+ });
+ const createStub = stub(languageModelProvider, 'create').returns(
+ downloadPromise
+ );
+ const adapter = new ChromeAdapter(languageModelProvider);
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ });
+ resolveDownload!();
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ });
+ expect(createStub).to.have.been.calledTwice;
+ });
+ it('returns false when model is never available', async () => {
+ const languageModelProvider = {
+ availability: () => Promise.resolve(Availability.unavailable),
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+ expect(
+ await adapter.isAvailable({
+ contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
+ })
+ ).to.be.false;
+ });
+ });
+ describe('generateContent', () => {
+ it('throws if Chrome API is undefined', async () => {
+ const adapter = new ChromeAdapter(undefined, 'only_on_device');
+ await expect(
+ adapter.generateContent({
+ contents: []
+ })
+ )
+ .to.eventually.be.rejectedWith(
+ AIError,
+ 'Chrome AI requested for unsupported browser version.'
+ )
+ .and.have.property('code', AIErrorCode.REQUEST_ERROR);
+ });
+ it('generates content', async () => {
+ const languageModelProvider = {
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ languageModel
+ );
+ const promptOutput = 'hi';
+ const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
+ const createOptions = {
+ systemPrompt: 'be yourself',
+ expectedInputs: [{ type: 'image' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { createOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ const response = await adapter.generateContent(request);
+ // Asserts initialization params are proxied.
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
+ // Asserts Vertex input type is mapped to Chrome type.
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ]);
+ // Asserts expected output.
+ expect(await response.json()).to.deep.equal({
+ candidates: [
+ {
+ content: {
+ parts: [{ text: promptOutput }]
+ }
+ }
+ ]
+ });
+ });
+ it('generates content using image type input', async () => {
+ const languageModelProvider = {
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ languageModel
+ );
+ const promptOutput = 'hi';
+ const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
+ const createOptions = {
+ systemPrompt: 'be yourself',
+ expectedInputs: [{ type: 'image' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { createOptions }
+ );
+ const request = {
+ contents: [
+ {
+ role: 'user',
+ parts: [
+ { text: 'anything' },
+ {
+ inlineData: {
+ data: sampleBase64EncodedImage,
+ mimeType: 'image/jpeg'
+ }
+ }
+ ]
+ }
+ ]
+ } as GenerateContentRequest;
+ const response = await adapter.generateContent(request);
+ // Asserts initialization params are proxied.
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
+ // Asserts Vertex input type is mapped to Chrome type.
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ },
+ {
+ type: 'image',
+ content: match.instanceOf(ImageBitmap)
+ }
+ ]);
+ // Asserts expected output.
+ expect(await response.json()).to.deep.equal({
+ candidates: [
+ {
+ content: {
+ parts: [{ text: promptOutput }]
+ }
+ }
+ ]
+ });
+ });
+ it('honors prompt options', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ } as LanguageModel;
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const promptOutput = '{}';
+ const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
+ const promptOptions = {
+ responseConstraint: Schema.object({
+ properties: {}
+ })
+ };
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { promptOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContent(request);
+ expect(promptStub).to.have.been.calledOnceWith(
+ [
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ],
+ promptOptions
+ );
+ });
+ });
+ describe('countTokens', () => {
+ it('counts tokens is not yet available', async () => {
+ const inputText = 'first';
+ // setting up stubs
+ const languageModelProvider = {
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const languageModel = {
+ measureInputUsage: _i => Promise.resolve(123)
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ languageModel
+ );
+
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+
+ const countTokenRequest = {
+ contents: [{ role: 'user', parts: [{ text: inputText }] }]
+ } as GenerateContentRequest;
+
+ try {
+ await adapter.countTokens(countTokenRequest);
+ } catch (e) {
+ // the call to countToken should be rejected with Error
+ expect((e as AIError).code).to.equal(AIErrorCode.REQUEST_ERROR);
+ expect((e as AIError).message).includes('not yet available');
+ }
+
+ // Asserts that no language model was initialized
+ expect(createStub).not.called;
+ });
+ });
+ describe('generateContentStream', () => {
+ it('generates content stream', async () => {
+ const languageModelProvider = {
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const languageModel = {
+ promptStreaming: _i => new ReadableStream()
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ languageModel
+ );
+ const part = 'hi';
+ const promptStub = stub(languageModel, 'promptStreaming').returns(
+ new ReadableStream({
+ start(controller) {
+ controller.enqueue([part]);
+ controller.close();
+ }
+ })
+ );
+ const createOptions = {
+ expectedInputs: [{ type: 'image' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { createOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ const response = await adapter.generateContentStream(request);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ]);
+ const actual = await toStringArray(response.body!);
+ expect(actual).to.deep.equal([
+ `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n`
+ ]);
+ });
+ it('generates content stream with image input', async () => {
+ const languageModelProvider = {
+ create: () => Promise.resolve({})
+ } as LanguageModel;
+ const languageModel = {
+ promptStreaming: _i => new ReadableStream()
+ } as LanguageModel;
+ const createStub = stub(languageModelProvider, 'create').resolves(
+ languageModel
+ );
+ const part = 'hi';
+ const promptStub = stub(languageModel, 'promptStreaming').returns(
+ new ReadableStream({
+ start(controller) {
+ controller.enqueue([part]);
+ controller.close();
+ }
+ })
+ );
+ const createOptions = {
+ expectedInputs: [{ type: 'image' }]
+ } as LanguageModelCreateOptions;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { createOptions }
+ );
+ const request = {
+ contents: [
+ {
+ role: 'user',
+ parts: [
+ { text: 'anything' },
+ {
+ inlineData: {
+ data: sampleBase64EncodedImage,
+ mimeType: 'image/jpeg'
+ }
+ }
+ ]
+ }
+ ]
+ } as GenerateContentRequest;
+ const response = await adapter.generateContentStream(request);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ },
+ {
+ type: 'image',
+ content: match.instanceOf(ImageBitmap)
+ }
+ ]);
+ const actual = await toStringArray(response.body!);
+ expect(actual).to.deep.equal([
+ `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n`
+ ]);
+ });
+ it('honors prompt options', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ promptStreaming: p => new ReadableStream()
+ } as LanguageModel;
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const promptStub = stub(languageModel, 'promptStreaming').returns(
+ new ReadableStream()
+ );
+ const promptOptions = {
+ responseConstraint: Schema.object({
+ properties: {}
+ })
+ };
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { promptOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContentStream(request);
+ expect(promptStub).to.have.been.calledOnceWith(
+ [
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ],
+ promptOptions
+ );
+ });
+ });
+});
+
+// TODO: Move to using image from test-utils.
+const sampleBase64EncodedImage =
+ '/9j/4QDeRXhpZgAASUkqAAgAAAAGABIBAwABAAAAAQAAABoBBQABAAAAVgAAABsBBQABAAAAXgAAACgBAwABAAAAAgAAABMCAwABAAAAAQAAAGmHBAABAAAAZgAAAAAAAABIAAAAAQAAAEgAAAABAAAABwAAkAcABAAAADAyMTABkQcABAAAAAECAwCGkgcAFgAAAMAAAAAAoAcABAAAADAxMDABoAMAAQAAAP//AAACoAQAAQAAAMgAAAADoAQAAQAAACwBAAAAAAAAQVNDSUkAAABQaWNzdW0gSUQ6IDM5MP/bAEMACAYGBwYFCAcHBwkJCAoMFA0MCwsMGRITDxQdGh8eHRocHCAkLicgIiwjHBwoNyksMDE0NDQfJzk9ODI8LjM0Mv/bAEMBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/CABEIASwAyAMBIgACEQEDEQH/xAAbAAABBQEBAAAAAAAAAAAAAAAAAQIDBAUGB//EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/9oADAMBAAIQAxAAAAHfA7ZFFgBQAAUUBQFBFABSUBQBQBZQUiqC7wAoigooQKACgCigKIoAosIKSigABWBdZAUAUAUQUUUAFIBQAWAFAUVFABSKoLqAKAKAKJVt4BvrFLAqKooArHgoQAoKiqDyKKoaiqhSqhCqgLFKHKdBiZmbodX5n2MbWHkdZS2kWhUBQIVUBwgUucv8Oad7nUzey3vPO5q4UrlOEWjzT0vhssDpea9Gy03BsqooKhCgCgCgHIcd0fN5DnuWHseY0Ureh+ZelLIqFq+f+gQJ5f6V5r6pE4i2ioDhCFVAVWrCiBxvJdlzFzVc56GjFoy4/a8d2q2TmpN3V1OF2MWp1/NrL0hzinRnO5Sdwc+L0Jz5HQLzyy9AYQYmDrZfXkyxVs5m4yVt3F0/M7l1YotpQnScdumqsFSb0yElm4zf5hjvV56bOtteViXq3ecRMbJgG+L4tzGqNyTDJNqMx5rfSHGRdpAcidPqLyFbuBeWrdmyONg7TJTBTrqZg3b6GGzbSzILYW8uSuF2hPG9l6uFdbPQRxzU8M2Lc62fpUJZNGC5TXAseNuVc2abO0pSKUsjdI+OdNoTzYc3fIANzF1LVTalK9KU72e1coa1TOqe3naA8inKGZ0QV5ZGzSywKWVrSAUROTjuno8lSLQbFq5kNrXsYAvQu5xmW9y18l0tjmrFu8ZM66C0nLabEsPGrT3xOlnIyXjkzC8tSxh2zRbWlsVNZtY6a9SKq1ZCd0rLHS17SPlgUtvpvatrVetlYJJZRpNcOOfmRaEN+s3Vctl0qCWs+PLljs19iWw+RdZEcU1VBFVUR6Kr5a6rplEzvnH5krF9Y33LnNFkqWIynAqZ3Zno3U03xO1mVY1HrGDxgOREpURkjiMXDUXOlsVpjRIJ0RXhix3KbUuzn6DLla6nK1RwFAKKK+GNsuigXReXW6mpRS2yWu6Zgr64Rq90abqclllYVJiJxIrAkI1JXRvJZoJJqUcY1yzmrvLnMLJX1QngWQrF9hTW01IZmwlt1F5bWtMTPruLc+fYltSVo83SKpnX/8QALRAAAQQCAQMDBAIBBQAAAAAAAQACAwQREgUQExQgITAVIjEyI0AkJTM0QXD/2gAIAQEAAQUC/wDH5Z2wu/scrHmBjg+P0hzXf0pGCSPjpnwT2bDa0LOWe6dEgCW06yYIWwRf0uVrbNdf79Grg2ZeUrxkMsco+CFleP4uRuyQvPITOjdyLzS4yy+Znqts7dtcbSZOgAB8V6Yw1nlziCE39obclR8EzZ4YrUM7vRy2PLVBpbT+Plv+Nn0RPZU42jJpc9HIwOhtqk8yU/j5dxMq+1YbrVaH2eUd/lsDpJG516zRMnjLSHRt0i+PlYss613Fli5OLBhOkwv1ShNG4PlDIqdzyunjd/l/k5NwFWu0dw/gMLlXhfFyHLD+SpGZbTq8GIR3Y7NCGKvRrd9fT5F4VgLxboXZ5ALXkgs8mFZt3I5vIvLzLYXnzL6lhfVYwvq9dfVqy5IEpzTG93618me0P9S5T96GPNQDWm+f8HifZuVlZWVlZXJnPILKysoytXsuUe0y27LHxzS92Y/ca72xzmWOW1cMcklSSKIMkbIzzYNrs8b6dO1HXYLsBaHAqS0yOTKyvLb37crZOQm5Bkcw5GFykuyqZ81iJ0mru9JgJ8bmHoGly1ds+KSNMikkXZsAduVo+5HKBwmW5mFzy5z70r43WJXEyuKz9ywjs8wzSQPdkuwUAcch/u9InavA0s2maqnMYpC1rmtjAV1zvHpVi1hiiQghz4cC8SsnUqxX0+svDrix9KgzLxeHHiiG/SX4+lyI8ZMFLVmgFz9nY2UELioNnqSRz5KEa/6AUpe0Miyrf8Dadnug6uQwOjgSyKye+WyIbAEgLuRoSxORwVLU2tTyOfJj2QlkY3ua8dGN0MhO2LmkK3bkgn7Ykjk4+KQ14BXj67YNkydqtE/VahagLVqwFo3f0PHlwe4NOSWRrh7agqxUEyZmGF9+IKG/G53Q7YPfaou9amEzV+wAI9BkY0k5PWtHOwy1d3V4zC38oKaq6WQfiw+FrIIqxXutiPRlfatWLVi0YvZTU4bDnVV4zkKpRrvUbS1F3tG4hbhbhbhS2WxtmmM0nHt0gysrZZWfR7rPXKysrZbFblblbruFZ990Nc7BCYpsxXdXcWy2WyysrPXuxrvMK7sa1ytF212120RqMZGFhY6BAoFArZZWVlZWfTC1zi+0c15y9+q1WgT4F33KOUl+0a7jMtfl2PTn4K+S0xPDoIe2srKyrE2vSGPuP7LF22/EEFq5dtybDlMAYMrZbLdOsgJ7t3KJj4xn4crK2QkKDgfTnpMThmNU1jXMbNogc/DlZWVno1+FsAvz6H5x0/KhZ7/GR0wgPd7tjD1x0f8Auoxs/wCHCwtemOuUx4ag8FZHV8bcqu33+LKysArt5WpWq1WOmShIQnSZBTBs4eyz1z8AKygvZaharC1RYsdQcESLcL8rJWVn0Z6gdG9MrKys9CAUWLtuWvUEhCRbDp7rZbLKCCygvx6s9AUCisBYRCPTKyUPQ0ooOKBK/8QAIhEAAwACAgIBBQAAAAAAAAAAAAEREBIgIQIwURMiMUBQ/9oACAEDAQE/Af5k9E9yWITC9S7RCCIQhCEGuyEcPFMTYrCYsxTrDYmVQTKhPouPJ9GyNj6iG7mEIRkZGPxZGR8aTofiRkZGM6OjY/OahNFp38lZWX5NkXxPtxuzZlNjZm5ubmxc01RqakIak4XhSl9NJxf6cJxvNCxCelMp/8QAIhEAAwACAgIBBQAAAAAAAAAAAAERECASMAIhIjFAQVBx/9oACAECAQE/Af1d6LumXZs5MTLhn51pR5WlKUulz5JLFLrR/XH8ITEIQhCCHld3IbRUesez2Px0jI8PERxIz5HyPZxRxWkIQmvI5FLil6Z137C9NJ2XFL0MhD//xAA2EAABAwEFBQcDBAEFAAAAAAABAAIRIQMQEjFBEyAiMlEEMDNSYXGRQIGhIzRCklAUQ1Nwcv/aAAgBAQAGPwL/AKfYHfyMfUttf+M1TXNyIpvHCQY+icw5OEI9ktdKBbR3sAmjZDZkxnW6TQI2HZK+a00CDG/Ri3Zm3mjonWNtGMZOTJgCdTCIaS8+ixOOCyCDLMU7sWVnQxJKaHEyMy2kqWyLSYxJwtHS5u/atiOK5z7USGmIQAHdktMONAsTnEn1WQKnojgjCdE21FAUW2b5I3aHStzZ1r3jP/d5uDbV1XyWgKzrAy3Xn+L+IXWTj5e8s2aRN2SOhVm1woXLDo1oQazmOSGLOK7hY9shYdckxvQDvGWvQxuMeBiIOSbNjs36kpjvKZXihSHhOfnhE0TuDDHrdaECGMdLu9w6khYncrBiKlBozJhWTHiHAqyd6Qms+VJsmfCwhh9k97C8EDqn/quZHlVO2Wi4e2OVO2KnamrxbIr/AGimi0OA9GL9qFXsZVeyPVezWirY2qq20H2Wbv6qy+E5hzFEFZgecKwI1Vh91bOGmV1B6K1Vr9t9vsN3mCqAm7N7SOjdE0NqQZTrTrc1ztCrJ4PC3VWDcQnF+FbvLhzfhYmmicMfKuF04skQ+eI6LFtBms0xhNXH4v2MVWIHhELCDiGvoqHWE6rWwadUHTJb5dQuE16ojaEjOt0OEX0ErDBk6IF7YnqjgYTGcLw3wpwOj2WqqFTNE4qnOViJWCaR0VXnKKKr/wAKTfJMlTEjVsolZXNoAIzRuBmEHWwaGnJzRRbTZ8PnCLZaGn0WS5KrCLM1WK0xD0OS8Jhn0RH+nZ/VeC1eC1eEFyflYHWsTkAuZ/yoZaf2Xij7hTtW/YLnb+Vzs+VLsvRybaEV6SjhENu2kNwN8yfbFoMcrf4p1o9pwikTQIl1nXQkXVXCGhYiYJ8rl+4tGTlAR5nR/IthQVS4j4WztHEnQlgVLX5YtFUwvFHyqWjflcy2r3WZZ5SjifiAyXpdha8hvRCGzwprA0kzWEABT3XCQPcKpCwsIy6IY/xRTjeD7ysAM+u5ov07LaHoVithx9JyvoB8LIfCyU7Ie+60sPG3MXHEeEZIVr7qoaUDQP6obR0x0CptPhBhDhN9Ci9xDoya0IutHusmt/iFBIXDakey8QlZ31c0fdTuY2wAeqxC0OI5yoxk+l+MWpb6XfrAV0WOyAprcOAn23ch8LLcxPxfK4XfKzCqVkhxqhquMrNZrNTzegWM0U6uP00rJThF2ar3WfdSPo5mAFDcuqwu3JYYN3EQAuZRKw4e+e3QhYYWI825hGt0aLJZd5kslxKBu5IuN2hnvc+4gIzdzQVhNfX6CqpuZX0VR39d83D6ckG7F/kafT0/xf8A/8QAKhABAAIBAwMDBAIDAQAAAAAAAQARITFBURBhcSCBkTChscHR8EBQ4fH/2gAIAQEAAT8h/wAiv8iof60/24fSvm0naH+R2aUdppQR8PVerRTWafXUA+lrvlRRsJt2f+xcK5o6rMHN0LZb9Fagaq0EyEPYezzAGwavL67l+jb1sex1ucH2lNKQvo1+4DXUq1qO8JQuOPmZPNWNPbllNUa93l+m+Nx3niXqZkfLEtIvwwS75Bt1qXL9H43mjIKjs5hxLIxhtWEwAKAMH07uBuNpYwtVXCGs7xLQcmZjdZmpBJoLnaFJ1hXpOcFSE2YaxxFP5/qcz+iXToFmTpK7yt+RC1GWVyrPaHXZjILVX8kNe0A+l+w+psg/PfTViLG0CD8QCO8wRgYDiC7aYcs8evd6Brtt3jBCFweZUJVb7fUI7W74YEcS8LFVhJzjk4dy8SodQh3BdmyEXRzd7TFspRGYByYeUzF14jPPEuXLly5cuX1voJWze2sQ9Q9zg+amaprCQ2IEoCSuY63Ir4MUahd+BmIVIZuUJECnsXWXLxBDX26+XmU6Xz/7B6iXK05n8hGGqPmbfyP/ACbwnQ2SxsPmU6p4Z+gVlGn8XL6L7f8AJtJ7Q/KUi17sMo5YxypaCW4JWPpGGnmOw2v8iFmYsfKLYjkdZeDFDDg0nxh+YLPL+3rAovb+8vPUvzA65saxNfuiJo4RLXF13F2lmFXuvaKkPabIc4ZYEFrumMtNnH9E5U7Xd/MEFXvNB7FuMe0c02mB3mVhstCBhU0/pNAtCaNTXRMJW6svWpfUs6vbSB84N+NZSDuiCsttdle72mPNFBy4gHLLvAbbzAzStbf3M1+rqfeaZZioic9GqZcBKxw6mYehtWyxgJ6A0l8UrYI2w+TpmbVfCc8e01A7G4Am8NmW9XzxHqqqOF68w02AWwwaR0UXXYymRduZhOHzFc3L8ydyHa660DiXiJbc7qbQ68TJeQN5lUp3IxjxlldJXAGhvzGQDjQla/mO1nlbX8SpaWtplxI3wfuMXhYM1gea6UwzwhqIoFb6IX3dfboerh4s/c7Ku7jYbcZBKfAP4hEIvg/xCqWcYJrnusF0L2ilrPtY/UeCdwsCgzQq1kzPaNZXE8vB0QuFCtP2R/SzWKmP5lZq66aINj8zdH3JY2L3b/EUWNVZT7SgKpYEv6iCaNkipsd5QBFfMK7/ADLhKuriEWio7PmWrwcAzdF4xALHlbKs4Z1wsK+kLuRnGtlWvBMmobbEsBvLa4Ra2bGWPmIdgfeWyhbQxMealG6ViFVJbmACj/e8MOBdG1M5KoWzlPfQP2TdqXYgVMbhBCOIfJjqCjWwEDunsDxEaxiLGc+YGofiC6/tph0fEbq08FzOOphG5asjVVFSkYRPapngwWxcu0vBdTFabfWF2AxjqRcMdpCHIuhjHRaq1shjR+YLyRaBfeDFw3B95hI3XGcc98n5iGQXeCM9ykB5sGtyXMwjvSacC9j0UgA0epLcxoY1vwIuGsVEyJgECgfuUxBo3SqX0bqmOle5Fwz9XSSp7y5TclPW+DjyysaQ2D7yoIZQUVASNWtGaMDyJZG1bMueKBkF4emONKdQe8fmlpZKmGwDaCjdRVzyl+r5RZctlwODPeW5l5eWnej0a07kyste7Cuz4iOp+IbRXiF0fvmcLfaBgGB59RCuYRi1grWpmq3zACxuMsW4ipmHSFCF5eEAxPoFO6HfPOX6g+h0Hr241UgcciUSu9EJR2iYsUkpMCjTWLHiCiA7Cd0TDl5ljaUzMJfQMGEBfQvMZ3mqnuQnZf4ej09wdMswMrA4BbDfiY6VK6VAgQ6e2d5Ei4qWqn5s+itCbuWLqhlWkq2LKEXLOty5cvqlICFMPQZcHouVl00QXXQwuRGdtTZDAmnruX12bcwwxnnJGlohhFSuj0Ybtvo6KU/mKNxw06XL6X6UuLMxjxEbIUS+eOldNT7zpWodT1r8S0So9Fsy1mBrWLawbfpjeawPRVbNOteu6hB2RJpKbpkjKiWOgWj0pKSXuUpKCg6bJfRcuX1GX0CxLzOdyKnhMtou0sa9L5JmoXcg2sE0PQOcoy+lstCp7dIO81QWXhJAJh0Zhme2lG0EaxxLeickGmHRljeW3gYGMiJWUqDT0rLS24nU3GkrAgLhBQ5orOopHhhHWKMs/9oADAMBAAIAAwAAABASIMVBgAVIggAJsGy6fNBiyj4Y5ptsnyTbFtvCz9pNNPGuqMCNo42YQIEExL6CRYMEGT8YCBzUGdVEHKQHraFgCRaW/wDNpnycuGNdceiyLtY4mcgOiOu29EEGuHlAnRrvBwEb0uqOJE43dRwqzkz2egbGwwUOslkwzPIcsSwSNhRUkWEw1v62L+JMcNPr2AmjywACL2YgqfCuq0/Cz+/jqnaGEcefx1OE4WV4cia8oyMQ8U8lMsIgsWO//8QAHREAAwACAwEBAAAAAAAAAAAAAAERECEgMVFBMP/aAAgBAwEBPxBc1+a/BIhCcITMI8QhCYQhCEJkvMQmYQhMwSNeZGhNUhCEIQb2JLs6VO48HoK5+AEVawVlRxOosomXwd8GnZFXhBRoo6jcWhEUOTSFpEsbUKcC6hquh+Q9qiTHo2Gy+i7hlYQVKEyMkG6xMadEsQVNWsKSdaxKa3svsSIaTUmSLsaJEyxoR7dxN2w294KG1dcCJhIQvQkXwVG3IpKLNtFFEf038E3ME6JsbQ4LKEhtzEIQgmkJBlpkEt46D4xkZcREF0PMJiix8T5k1yH+A//EAB4RAAMBAQADAQEBAAAAAAAAAAABERAhIDFBMFFh/9oACAECAQE/EPwf5PaPLlKXwo8u0pSlHxtGUpcdGmMo/RWlC6rOhZS5zhwLrp0UmC+CpFGXTp0aFzo0Khvgvd8QpR+8Uo8UY3hhO7WUKvQfs9qhB/Q1cMLofRRZwoyLzYIjmNwtyoqx5BNoX9YkbbejnwfUEgxiqXWPwCf4cfBQoKFzOCBKesbMOHCLwvBFnCFFE4bIRBUylKUqIyEEGxKimUpcjwmijeLKUuVFHlekUospdpk/Fii0nkmn/8QAJhABAAICAgICAgIDAQAAAAAAAQARITFBURBhcYGRobHBINHw4f/aAAgBAQABPxDweDX+J4P8jfk14NeVQJUNf4G/J4NeKleKh4JQyvDDwHipXivFQJUJUrxUrxUDuVK8ceArxUJUqVA8HioeK8VAzKglSoVUqVDLKhiV4rzUCoFwxKlSpXgPBAuVK8VKrwF+K8VApm5UCV4rxmVCVA81KlngPAY8V4qV1L8DfCB7N8RCCVTnDfgMeK8G5UJXgPJhh5NeefBszFrbCQytzUeUao/D74+vBr/AgAyf4TDfk8BC0HvMPJrzz5Du/sDX4afqAmGh09Z6tZ8y6HhnL0DxVZuAzNHW4FtX6iIo7J/LlggsaQei6lY9npH/AFNo2ptfvweTUuoeUhnWfias6ur9zmvJvwbOtJ6ixUpjK35UfuXT0sbc6a5cGnnUL5mcCXrzLchY3eC3HuH3Uh0/D9mofTOTtN9iw35PBr/Ac8U7vqA+qD5uBejEvV1kHSBKE5R22G1rFxXpUFJYPmYeA58heEtci8c45jURYWjAr6YsPtTBr6p1QtXvZiUhnAA9EqG/BL8GvF+HPAhZtt/Ep6IEFjWWXZEyZxhjcAsIVY6kJuM7G4jJYFaxpL6xBJXdgs7L3DZCXPuskrndJk1KfdVNat1CRLa/LF/QQxLhuX4PA/4VRxeHLBSZcWf99S27qvcugnIGo2dXu2sS82b2g/GU/MunLN0XKR9RXnZipcJeTeMnCR4FO+1/In8VEYLeinvEoIwVXoGXnxcJcGpfi/Fy21LB7I/QfuXRjHXqK8gK5zKKcge5qpOkLtH81MXGMwG1V9/qBRMNPJuMY1SJ6Zg5lwzDEepTJTCOyvUSXhBnJM/khigpQ1Qv9+L8DDEuGZcuXLmJy595j8JEMc8nuC1NlOYZQwYgoYo0vrHxDJYqMeAChgzKA1gouBzr1iKCjyip+TcPydMB03LYrV5B7uOogpwsP/EaDsTkPzzK6RwxgYYzbLC2ZleUPuA7/crA3mse/AtMIMvwuKgIR/JSndEl3GvmUJdIWrx7blVdY7bq36i1x4YU2iJHJpkW20V/ZNdWx0Fv1REywUgayt8QlCxGmUPVal73duXYUnWY+VQ5Vkvp1Ag0hWzxDsCsXKtreYa0/wDbifph/wDkpH0qKek5slT+CIaofwlXT1a/9MP+GH5h/wB0PqaXb0oftGVjP1D/ALmeGP0e9zIIYbq2kjuNCnKUn9MAvw3aQZgIXxSv8XKN2Iv0f+yWSW7IOyCu8DX+CATBIHSMWMyI3ofUAs5L8mJc6D+IMN6h7ePz/cKYvEpSSoVxhPc7rmPMHW38zcW1eWqOWAiW1MVH4jixHSNPq63CEMEwbVAtddYleJbjRl+6qUt1UOMD8x6hdbNH3OdTEKNn3uYnWIotw22VL6i1l282Y3BCipGSWhRzahznsOD76iAbC4lVV25rqG3MRWFkeviCur66Mct/MICcbEf7V7ghVYEpzTpqFMewB7H7lg2lxHBUByqDApdpbLOHlsg7m7CgEPbvqc3VboZs7UcmYEolD8gcGV/UE4ubQVrDspUiXl23DrBwRa6lX2IrB2HTqLvOkKi3pemJetOKgvvC7GOIgruagHj22wp4akoviWsDVT8BmYYyWD9LnBBXAfoYpCBtFdrgibPAo/mGxbGKaEFBQIhVs1BrbVCoYrPUGI40OBqpS3BgF9lwUjdg5be4fSpbgAbN6lmQ2Jw5hzC5q1qIuyH3/uYsKtqcFEDqLQa8BadkDjGVt7gxY52EBmfsodOLYW6TiLZmtcnpllt3zKfRULQeUNkDIQVQ9Ff5lSnC/dWRunxDrAWE/T/CKLUlTl81iG04NeTdNFhBjiqVjdUX+Suos14DB3m7/UOlfVaPshiMBuGIXw1mWaer/wCkSLT+T/2Jf936ilV+I/7iREraYdFtsuA2+RGbJMKx8lJYIdJ/YV/UCVpV0n+iYILiy/qU5FqApirNIF6v1dxZbfwGYPzAryVXA85iHAPqGrsbZbeqMsKUJysHNv7I/FtkKAdFZwOIWOYw1Zsbz+IgC2um/lhhRL7yfqGKZ7xXaBmJzVNxbsY+KgZZbSfOFX3AboByDpRcx0HPYk/gIWAGjp9wJXC+oGmdIVbhE/uPyjmUfUb9WRDCBz+3CRAtrtSX6iStHACJ00uQJG30oN/zKAObBH5ghoDQbNAZh0hYGwesRpxTYNn3M8XUvGTdAbhRDqWQ5RfxLD8hS2NZ0IWX0ypT1Yqgdo3KBm0HyWMsIkDDQv7QutMrDgjS9trKAWqfiVhQ0OEdVHLE4pVKutai4IfbcRaHwVMBT9kIKi7Mv43KuOoPkbgk66BXXANRgEnuq/qUdpdmQ/1HgPoCBsd/B+poNfRSMQzT7Vxof3CgoFBxqV1DBEmURG919Ra5zFyNa+O4EC9qA4O+YLAIWyXNPMVlScBr5qcc8llH2wMABLUvYO/cGGRtbVwVnqYQBQ1/lg49ExPtDEHJvqC8nyxGE4ZV9wS4xFo6tbFUaFKj1/b+ojAGFMH1RhzbxQv7shIe6Av4JyvmEsVZAvISkembc1pl36c0Hmqz+5VygUUjd0R6OEhZTwJxHTZzQpPUpWRUKrftCMsCANFcymG0C8uqmp7kBXsgC3pZW4zFwW+kJkYmEfZbK8MpBpD8za0H5LYpgE5HmLL4S6a/E4AHRiLberLAAIU3doNi6JaY16Kl3gMYQQpHqXCTGK7iiHAEfctwAMl1ACDZGZIjAHhP9gmxYd0uZuDgbf8AyJllcAPVzMwCAqjBDDZgm385nymeL8C93FMbMMoyZIXZLu/zBTUZr2mXdxLcTNsaNvzO1Ms51/cA1T5ifvUIfUIUCO6GYMBDWH8SyIsutf4gQfGEPKHVDNpOYIr0gO7gJRge4B5I+k+5R4RBU1OiEBXdSdBaaYgwASymJ0xOmNu0DxLy8HMxgR5IdcC4IhiA9koep6SYdwzbCrCJ8qWgo3cHRiW6i1t8uplil/Gm+EDlhl7+IQriMAIlZgIkN1wwlhiFNqmbEbag5Z+WVoNtRWRiYR/HxADMInphBTljsbtmU1Z/gbzMPSuJWSeADDBlpK9R844ZlatMdyuLdW9S1tSrb3KFEVL9Eq0s0bgUsaYAOAPipUv1LmagX4Lwxu4kjlTQJqPVKbt6jpQ8BuZKUtrtcE6f3BHMwzcvFNF7iaBOiwmzwsOjqWBytSlBIVYSImoGtQTiAMqnDiEA6geoV4hhglzidqIWLEpFPq4I5H7lBiHJntZbuDhMI21AlSVV7uN2K5gwnXtqV7OxsqN3aLINwxATklvqX8RQiHuNdXFDzHOdDEsiibDDMuKdysqyYxKoqwgiWhZDUs7auJaGZbGLNcNRmwMZ4mIAqoKcwvLy3uWlstiyyDpAe40mHDcNKMM4mrBo9Rql+0o0V4q6xLhQY9w1j6eBRspuziNNtwcwblPH35CF9ZnqSnZHWZbiUjAm7j7cIfkQo4s4nLrTcUFojCAm0WJlBumAvA0YCENztcMQS5Y+BCDbCzczZgiXYl6wgbC/MM1MTBZNUS1kgJOBItSqTRheZaluO2c2/Ex/A6gOYM4Z8LlvH4wctYPgKMrrNz0kaSFfBcQMbTjNkVebSsAZEYVpqUXFUIMTOEVEzSZaSS9QXSoEwwdZSWPNSnWYcxGiy1hd7QEtxE6VC8oBhFOZbOXuCXgQz1JRZhEsa8GAimGoqB4BcGhixA8DEQc3Fc1LW7gsweg3Lo024ah5Q0wDmHMZ3IicQl3RmGShHATpwWJEjhZUcytCWLOYRDCktgtnuAFhmYO5vRP/2Q==';
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
new file mode 100644
index 00000000000..aa3709048a2
--- /dev/null
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -0,0 +1,327 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { AIError } from '../errors';
+import { logger } from '../logger';
+import {
+ CountTokensRequest,
+ GenerateContentRequest,
+ InferenceMode,
+ Part,
+ AIErrorCode,
+ OnDeviceParams
+} from '../types';
+import {
+ Availability,
+ LanguageModel,
+ LanguageModelMessageContent
+} from '../types/language-model';
+
+/**
+ * Defines an inference "backend" that uses Chrome's on-device model,
+ * and encapsulates logic for detecting when on-device is possible.
+ */
+export class ChromeAdapter {
+ // Visible for testing
+ static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
+ private isDownloading = false;
+ private downloadPromise: Promise | undefined;
+ private oldSession: LanguageModel | undefined;
+ constructor(
+ private languageModelProvider?: LanguageModel,
+ private mode?: InferenceMode,
+ private onDeviceParams: OnDeviceParams = {
+ createOptions: {
+ // Defaults to support image inputs for convenience.
+ expectedInputs: [{ type: 'image' }]
+ }
+ }
+ ) {}
+
+ /**
+ * Checks if a given request can be made on-device.
+ *
+ * Encapsulates a few concerns:
+ * - the mode
+ * - API existence
+ * - prompt formatting
+ * - model availability, including triggering download if necessary
+ *
+ *
+ * Pros: callers needn't be concerned with details of on-device availability.
+ * Cons: this method spans a few concerns and splits request validation from usage.
+ * If instance variables weren't already part of the API, we could consider a better
+ * separation of concerns.
+ */
+ async isAvailable(request: GenerateContentRequest): Promise {
+ if (this.mode === 'only_in_cloud') {
+ logger.debug(
+ `On-device inference unavailable because mode is "only_in_cloud".`
+ );
+ return false;
+ }
+
+ // Triggers out-of-band download so model will eventually become available.
+ const availability = await this.downloadIfAvailable();
+
+ if (this.mode === 'only_on_device') {
+ return true;
+ }
+
+ // Applies prefer_on_device logic.
+ if (availability !== Availability.available) {
+ logger.debug(
+ `On-device inference unavailable because availability is "${availability}".`
+ );
+ return false;
+ }
+ if (!ChromeAdapter.isOnDeviceRequest(request)) {
+ logger.debug(
+ `On-device inference unavailable because request is incompatible.`
+ );
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Generates content on device.
+ *
+ * This is comparable to {@link GenerativeModel.generateContent} for generating content in
+ * Cloud.
+ * @param request a standard Vertex {@link GenerateContentRequest}
+ * @returns {@link Response}, so we can reuse common response formatting.
+ */
+ async generateContent(request: GenerateContentRequest): Promise {
+ const session = await this.createSession();
+ // TODO: support multiple content objects when Chrome supports
+ // sequence
+ const contents = await Promise.all(
+ request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
+ );
+ const text = await session.prompt(
+ contents,
+ this.onDeviceParams.promptOptions
+ );
+ return ChromeAdapter.toResponse(text);
+ }
+
+ /**
+ * Generates content stream on device.
+ *
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
+ * Cloud.
+ * @param request a standard Vertex {@link GenerateContentRequest}
+ * @returns {@link Response}, so we can reuse common response formatting.
+ */
+ async generateContentStream(
+ request: GenerateContentRequest
+ ): Promise {
+ const session = await this.createSession();
+ // TODO: support multiple content objects when Chrome supports
+ // sequence
+ const contents = await Promise.all(
+ request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
+ );
+ const stream = await session.promptStreaming(
+ contents,
+ this.onDeviceParams.promptOptions
+ );
+ return ChromeAdapter.toStreamResponse(stream);
+ }
+
+ async countTokens(_request: CountTokensRequest): Promise {
+ throw new AIError(
+ AIErrorCode.REQUEST_ERROR,
+ 'Count Tokens is not yet available for on-device model.'
+ );
+ }
+
+ /**
+ * Asserts inference for the given request can be performed by an on-device model.
+ */
+ private static isOnDeviceRequest(request: GenerateContentRequest): boolean {
+ // Returns false if the prompt is empty.
+ if (request.contents.length === 0) {
+ logger.debug('Empty prompt rejected for on-device inference.');
+ return false;
+ }
+
+ for (const content of request.contents) {
+ // Returns false if the request contains multiple roles, eg a chat history.
+ // TODO: remove this guard once LanguageModelMessage is supported.
+ if (content.role !== 'user') {
+ logger.debug(
+ `Non-user role "${content.role}" rejected for on-device inference.`
+ );
+ return false;
+ }
+
+ // Returns false if request contains an image with an unsupported mime type.
+ for (const part of content.parts) {
+ if (
+ part.inlineData &&
+ ChromeAdapter.SUPPORTED_MIME_TYPES.indexOf(
+ part.inlineData.mimeType
+ ) === -1
+ ) {
+ logger.debug(
+ `Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`
+ );
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ /**
+ * Encapsulates logic to get availability and download a model if one is downloadable.
+ */
+ private async downloadIfAvailable(): Promise {
+ const availability = await this.languageModelProvider?.availability(
+ this.onDeviceParams.createOptions
+ );
+
+ if (availability === Availability.downloadable) {
+ this.download();
+ }
+
+ return availability;
+ }
+
+ /**
+ * Triggers out-of-band download of an on-device model.
+ *
+ * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
+ * LanguageModel.create.
+ *
+ * Since Chrome manages the download, the SDK can only avoid redundant download requests by
+ * tracking if a download has previously been requested.
+ */
+ private download(): void {
+ if (this.isDownloading) {
+ return;
+ }
+ this.isDownloading = true;
+ this.downloadPromise = this.languageModelProvider
+ ?.create(this.onDeviceParams.createOptions)
+ .then(() => {
+ this.isDownloading = false;
+ });
+ }
+
+ /**
+ * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
+ */
+ private static async toLanguageModelMessageContent(
+ part: Part
+ ): Promise {
+ if (part.text) {
+ return {
+ type: 'text',
+ content: part.text
+ };
+ } else if (part.inlineData) {
+ const formattedImageContent = await fetch(
+ `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`
+ );
+ const imageBlob = await formattedImageContent.blob();
+ const imageBitmap = await createImageBitmap(imageBlob);
+ return {
+ type: 'image',
+ content: imageBitmap
+ };
+ }
+ // Assumes contents have been verified to contain only a single TextPart.
+ // TODO: support other input types
+ throw new Error('Not yet implemented');
+ }
+
+ /**
+ * Abstracts Chrome session creation.
+ *
+ * Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
+ * inference.
+ *
+ * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
+ * new session is created before an old session is destroyed.
+ */
+ private async createSession(): Promise {
+ if (!this.languageModelProvider) {
+ throw new AIError(
+ AIErrorCode.REQUEST_ERROR,
+ 'Chrome AI requested for unsupported browser version.'
+ );
+ }
+ const newSession = await this.languageModelProvider.create(
+ this.onDeviceParams.createOptions
+ );
+ if (this.oldSession) {
+ this.oldSession.destroy();
+ }
+ // Holds session reference, so model isn't unloaded from memory.
+ this.oldSession = newSession;
+ return newSession;
+ }
+
+ /**
+ * Formats string returned by Chrome as a {@link Response} returned by Vertex.
+ */
+ private static toResponse(text: string): Response {
+ return {
+ json: async () => ({
+ candidates: [
+ {
+ content: {
+ parts: [{ text }]
+ }
+ }
+ ]
+ })
+ } as Response;
+ }
+
+ /**
+ * Formats string stream returned by Chrome as SSE returned by Vertex.
+ */
+ private static toStreamResponse(stream: ReadableStream): Response {
+ const encoder = new TextEncoder();
+ return {
+ body: stream.pipeThrough(
+ new TransformStream({
+ transform(chunk, controller) {
+ const json = JSON.stringify({
+ candidates: [
+ {
+ content: {
+ role: 'model',
+ parts: [{ text: chunk }]
+ }
+ }
+ ]
+ });
+ controller.enqueue(encoder.encode(`data: ${json}\n\n`));
+ }
+ })
+ )
+ } as Response;
+ }
+}
diff --git a/packages/ai/src/methods/count-tokens.test.ts b/packages/ai/src/methods/count-tokens.test.ts
index 7e04ddb3561..78c51d3f5b7 100644
--- a/packages/ai/src/methods/count-tokens.test.ts
+++ b/packages/ai/src/methods/count-tokens.test.ts
@@ -27,6 +27,7 @@ import { ApiSettings } from '../types/internal';
import { Task } from '../requests/request';
import { mapCountTokensRequest } from '../googleai-mappers';
import { GoogleAIBackend, VertexAIBackend } from '../backend';
+import { ChromeAdapter } from './chrome-adapter';
use(sinonChai);
use(chaiAsPromised);
@@ -66,7 +67,8 @@ describe('countTokens()', () => {
const result = await countTokens(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.totalTokens).to.equal(6);
expect(result.totalBillableCharacters).to.equal(16);
@@ -92,7 +94,8 @@ describe('countTokens()', () => {
const result = await countTokens(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.totalTokens).to.equal(1837);
expect(result.totalBillableCharacters).to.equal(117);
@@ -120,7 +123,8 @@ describe('countTokens()', () => {
const result = await countTokens(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.totalTokens).to.equal(258);
expect(result).to.not.have.property('totalBillableCharacters');
@@ -146,7 +150,12 @@ describe('countTokens()', () => {
json: mockResponse.json
} as Response);
await expect(
- countTokens(fakeApiSettings, 'model', fakeRequestParams)
+ countTokens(
+ fakeApiSettings,
+ 'model',
+ fakeRequestParams,
+ new ChromeAdapter()
+ )
).to.be.rejectedWith(/404.*not found/);
expect(mockFetch).to.be.called;
});
@@ -164,7 +173,12 @@ describe('countTokens()', () => {
it('maps request to GoogleAI format', async () => {
makeRequestStub.resolves({ ok: true, json: () => {} } as Response); // Unused
- await countTokens(fakeGoogleAIApiSettings, 'model', fakeRequestParams);
+ await countTokens(
+ fakeGoogleAIApiSettings,
+ 'model',
+ fakeRequestParams,
+ new ChromeAdapter()
+ );
expect(makeRequestStub).to.be.calledWith(
'model',
@@ -176,4 +190,24 @@ describe('countTokens()', () => {
);
});
});
+ it('on-device', async () => {
+ const chromeAdapter = new ChromeAdapter();
+ const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true);
+ const mockResponse = getMockResponse(
+ 'vertexAI',
+ 'unary-success-total-tokens.json'
+ );
+ const countTokensStub = stub(chromeAdapter, 'countTokens').resolves(
+ mockResponse as Response
+ );
+ const result = await countTokens(
+ fakeApiSettings,
+ 'model',
+ fakeRequestParams,
+ chromeAdapter
+ );
+ expect(result.totalTokens).eq(6);
+ expect(isAvailableStub).to.be.called;
+ expect(countTokensStub).to.be.calledWith(fakeRequestParams);
+ });
});
diff --git a/packages/ai/src/methods/count-tokens.ts b/packages/ai/src/methods/count-tokens.ts
index b1e60e3a182..81fb3ad061d 100644
--- a/packages/ai/src/methods/count-tokens.ts
+++ b/packages/ai/src/methods/count-tokens.ts
@@ -24,8 +24,9 @@ import { Task, makeRequest } from '../requests/request';
import { ApiSettings } from '../types/internal';
import * as GoogleAIMapper from '../googleai-mappers';
import { BackendType } from '../public-types';
+import { ChromeAdapter } from './chrome-adapter';
-export async function countTokens(
+export async function countTokensOnCloud(
apiSettings: ApiSettings,
model: string,
params: CountTokensRequest,
@@ -48,3 +49,17 @@ export async function countTokens(
);
return response.json();
}
+
+export async function countTokens(
+ apiSettings: ApiSettings,
+ model: string,
+ params: CountTokensRequest,
+ chromeAdapter: ChromeAdapter,
+ requestOptions?: RequestOptions
+): Promise {
+ if (await chromeAdapter.isAvailable(params)) {
+ return (await chromeAdapter.countTokens(params)).json();
+ }
+
+ return countTokensOnCloud(apiSettings, model, params, requestOptions);
+}
diff --git a/packages/ai/src/methods/generate-content.test.ts b/packages/ai/src/methods/generate-content.test.ts
index 13250fd83dd..16a48f473ad 100644
--- a/packages/ai/src/methods/generate-content.test.ts
+++ b/packages/ai/src/methods/generate-content.test.ts
@@ -34,6 +34,7 @@ import { Task } from '../requests/request';
import { AIError } from '../api';
import { mapGenerateContentRequest } from '../googleai-mappers';
import { GoogleAIBackend, VertexAIBackend } from '../backend';
+import { ChromeAdapter } from './chrome-adapter';
use(sinonChai);
use(chaiAsPromised);
@@ -96,7 +97,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text()).to.include('Mountain View, California');
expect(makeRequestStub).to.be.calledWith(
@@ -119,7 +121,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text()).to.include('Use Freshly Ground Coffee');
expect(result.response.text()).to.include('30 minutes of brewing');
@@ -142,7 +145,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.usageMetadata?.totalTokenCount).to.equal(1913);
expect(result.response.usageMetadata?.candidatesTokenCount).to.equal(76);
@@ -177,7 +181,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text()).to.include(
'Some information cited from an external source'
@@ -204,7 +209,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text).to.throw('SAFETY');
expect(makeRequestStub).to.be.calledWith(
@@ -226,7 +232,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text).to.throw('SAFETY');
expect(makeRequestStub).to.be.calledWith(
@@ -248,7 +255,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text()).to.equal('');
expect(makeRequestStub).to.be.calledWith(
@@ -270,7 +278,8 @@ describe('generateContent()', () => {
const result = await generateContent(
fakeApiSettings,
'model',
- fakeRequestParams
+ fakeRequestParams,
+ new ChromeAdapter()
);
expect(result.response.text()).to.include('Some text');
expect(makeRequestStub).to.be.calledWith(
@@ -292,7 +301,12 @@ describe('generateContent()', () => {
json: mockResponse.json
} as Response);
await expect(
- generateContent(fakeApiSettings, 'model', fakeRequestParams)
+ generateContent(
+ fakeApiSettings,
+ 'model',
+ fakeRequestParams,
+ new ChromeAdapter()
+ )
).to.be.rejectedWith(/400.*invalid argument/);
expect(mockFetch).to.be.called;
});
@@ -307,7 +321,12 @@ describe('generateContent()', () => {
json: mockResponse.json
} as Response);
await expect(
- generateContent(fakeApiSettings, 'model', fakeRequestParams)
+ generateContent(
+ fakeApiSettings,
+ 'model',
+ fakeRequestParams,
+ new ChromeAdapter()
+ )
).to.be.rejectedWith(
/firebasevertexai\.googleapis[\s\S]*my-project[\s\S]*api-not-enabled/
);
@@ -347,7 +366,8 @@ describe('generateContent()', () => {
generateContent(
fakeGoogleAIApiSettings,
'model',
- requestParamsWithMethod
+ requestParamsWithMethod,
+ new ChromeAdapter()
)
).to.be.rejectedWith(AIError, AIErrorCode.UNSUPPORTED);
expect(makeRequestStub).to.not.be.called;
@@ -362,7 +382,8 @@ describe('generateContent()', () => {
await generateContent(
fakeGoogleAIApiSettings,
'model',
- fakeGoogleAIRequestParams
+ fakeGoogleAIRequestParams,
+ new ChromeAdapter()
);
expect(makeRequestStub).to.be.calledWith(
@@ -375,4 +396,25 @@ describe('generateContent()', () => {
);
});
});
+ // TODO: define a similar test for generateContentStream
+ it('on-device', async () => {
+ const chromeAdapter = new ChromeAdapter();
+ const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true);
+ const mockResponse = getMockResponse(
+ 'vertexAI',
+ 'unary-success-basic-reply-short.json'
+ );
+ const generateContentStub = stub(chromeAdapter, 'generateContent').resolves(
+ mockResponse as Response
+ );
+ const result = await generateContent(
+ fakeApiSettings,
+ 'model',
+ fakeRequestParams,
+ chromeAdapter
+ );
+ expect(result.response.text()).to.include('Mountain View, California');
+ expect(isAvailableStub).to.be.called;
+ expect(generateContentStub).to.be.calledWith(fakeRequestParams);
+ });
});
diff --git a/packages/ai/src/methods/generate-content.ts b/packages/ai/src/methods/generate-content.ts
index 5f7902f5954..ff99b306855 100644
--- a/packages/ai/src/methods/generate-content.ts
+++ b/packages/ai/src/methods/generate-content.ts
@@ -28,17 +28,18 @@ import { processStream } from '../requests/stream-reader';
import { ApiSettings } from '../types/internal';
import * as GoogleAIMapper from '../googleai-mappers';
import { BackendType } from '../public-types';
+import { ChromeAdapter } from './chrome-adapter';
-export async function generateContentStream(
+async function generateContentStreamOnCloud(
apiSettings: ApiSettings,
model: string,
params: GenerateContentRequest,
requestOptions?: RequestOptions
-): Promise {
+): Promise {
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
params = GoogleAIMapper.mapGenerateContentRequest(params);
}
- const response = await makeRequest(
+ return makeRequest(
model,
Task.STREAM_GENERATE_CONTENT,
apiSettings,
@@ -46,19 +47,39 @@ export async function generateContentStream(
JSON.stringify(params),
requestOptions
);
+}
+
+export async function generateContentStream(
+ apiSettings: ApiSettings,
+ model: string,
+ params: GenerateContentRequest,
+ chromeAdapter: ChromeAdapter,
+ requestOptions?: RequestOptions
+): Promise {
+ let response;
+ if (await chromeAdapter.isAvailable(params)) {
+ response = await chromeAdapter.generateContentStream(params);
+ } else {
+ response = await generateContentStreamOnCloud(
+ apiSettings,
+ model,
+ params,
+ requestOptions
+ );
+ }
return processStream(response, apiSettings); // TODO: Map streaming responses
}
-export async function generateContent(
+async function generateContentOnCloud(
apiSettings: ApiSettings,
model: string,
params: GenerateContentRequest,
requestOptions?: RequestOptions
-): Promise {
+): Promise {
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
params = GoogleAIMapper.mapGenerateContentRequest(params);
}
- const response = await makeRequest(
+ return makeRequest(
model,
Task.GENERATE_CONTENT,
apiSettings,
@@ -66,6 +87,26 @@ export async function generateContent(
JSON.stringify(params),
requestOptions
);
+}
+
+export async function generateContent(
+ apiSettings: ApiSettings,
+ model: string,
+ params: GenerateContentRequest,
+ chromeAdapter: ChromeAdapter,
+ requestOptions?: RequestOptions
+): Promise {
+ let response;
+ if (await chromeAdapter.isAvailable(params)) {
+ response = await chromeAdapter.generateContent(params);
+ } else {
+ response = await generateContentOnCloud(
+ apiSettings,
+ model,
+ params,
+ requestOptions
+ );
+ }
const generateContentResponse = await processGenerateContentResponse(
response,
apiSettings
diff --git a/packages/ai/src/models/generative-model.test.ts b/packages/ai/src/models/generative-model.test.ts
index d055b82b1be..e3d8f7fe011 100644
--- a/packages/ai/src/models/generative-model.test.ts
+++ b/packages/ai/src/models/generative-model.test.ts
@@ -22,6 +22,7 @@ import { match, restore, stub } from 'sinon';
import { getMockResponse } from '../../test-utils/mock-response';
import sinonChai from 'sinon-chai';
import { VertexAIBackend } from '../backend';
+import { ChromeAdapter } from '../methods/chrome-adapter';
use(sinonChai);
@@ -41,21 +42,27 @@ const fakeAI: AI = {
describe('GenerativeModel', () => {
it('passes params through to generateContent', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- tools: [
- {
- functionDeclarations: [
- {
- name: 'myfunc',
- description: 'mydesc'
- }
- ]
- }
- ],
- toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } },
- systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ tools: [
+ {
+ functionDeclarations: [
+ {
+ name: 'myfunc',
+ description: 'mydesc'
+ }
+ ]
+ }
+ ],
+ toolConfig: {
+ functionCallingConfig: { mode: FunctionCallingMode.NONE }
+ },
+ systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
+ },
+ new ChromeAdapter()
+ );
expect(genModel.tools?.length).to.equal(1);
expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal(
FunctionCallingMode.NONE
@@ -86,10 +93,14 @@ describe('GenerativeModel', () => {
restore();
});
it('passes text-only systemInstruction through to generateContent', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- systemInstruction: 'be friendly'
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ systemInstruction: 'be friendly'
+ },
+ new ChromeAdapter()
+ );
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
const mockResponse = getMockResponse(
'vertexAI',
@@ -112,21 +123,27 @@ describe('GenerativeModel', () => {
restore();
});
it('generateContent overrides model values', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- tools: [
- {
- functionDeclarations: [
- {
- name: 'myfunc',
- description: 'mydesc'
- }
- ]
- }
- ],
- toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } },
- systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ tools: [
+ {
+ functionDeclarations: [
+ {
+ name: 'myfunc',
+ description: 'mydesc'
+ }
+ ]
+ }
+ ],
+ toolConfig: {
+ functionCallingConfig: { mode: FunctionCallingMode.NONE }
+ },
+ systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
+ },
+ new ChromeAdapter()
+ );
expect(genModel.tools?.length).to.equal(1);
expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal(
FunctionCallingMode.NONE
@@ -168,12 +185,16 @@ describe('GenerativeModel', () => {
restore();
});
it('passes base model params through to ChatSession when there are no startChatParams', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- generationConfig: {
- topK: 1
- }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ generationConfig: {
+ topK: 1
+ }
+ },
+ new ChromeAdapter()
+ );
const chatSession = genModel.startChat();
expect(chatSession.params?.generationConfig).to.deep.equal({
topK: 1
@@ -181,12 +202,16 @@ describe('GenerativeModel', () => {
restore();
});
it('overrides base model params with startChatParams', () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- generationConfig: {
- topK: 1
- }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ generationConfig: {
+ topK: 1
+ }
+ },
+ new ChromeAdapter()
+ );
const chatSession = genModel.startChat({
generationConfig: {
topK: 2
@@ -197,17 +222,23 @@ describe('GenerativeModel', () => {
});
});
it('passes params through to chat.sendMessage', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- tools: [
- { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] }
- ],
- toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } },
- systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] },
- generationConfig: {
- topK: 1
- }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ tools: [
+ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] }
+ ],
+ toolConfig: {
+ functionCallingConfig: { mode: FunctionCallingMode.NONE }
+ },
+ systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] },
+ generationConfig: {
+ topK: 1
+ }
+ },
+ new ChromeAdapter()
+ );
expect(genModel.tools?.length).to.equal(1);
expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal(
FunctionCallingMode.NONE
@@ -239,10 +270,14 @@ describe('GenerativeModel', () => {
restore();
});
it('passes text-only systemInstruction through to chat.sendMessage', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- systemInstruction: 'be friendly'
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ systemInstruction: 'be friendly'
+ },
+ new ChromeAdapter()
+ );
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
const mockResponse = getMockResponse(
'vertexAI',
@@ -265,17 +300,23 @@ describe('GenerativeModel', () => {
restore();
});
it('startChat overrides model values', async () => {
- const genModel = new GenerativeModel(fakeAI, {
- model: 'my-model',
- tools: [
- { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] }
- ],
- toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } },
- systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] },
- generationConfig: {
- responseMimeType: 'image/jpeg'
- }
- });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ {
+ model: 'my-model',
+ tools: [
+ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] }
+ ],
+ toolConfig: {
+ functionCallingConfig: { mode: FunctionCallingMode.NONE }
+ },
+ systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] },
+ generationConfig: {
+ responseMimeType: 'image/jpeg'
+ }
+ },
+ new ChromeAdapter()
+ );
expect(genModel.tools?.length).to.equal(1);
expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal(
FunctionCallingMode.NONE
@@ -325,7 +366,11 @@ describe('GenerativeModel', () => {
restore();
});
it('calls countTokens', async () => {
- const genModel = new GenerativeModel(fakeAI, { model: 'my-model' });
+ const genModel = new GenerativeModel(
+ fakeAI,
+ { model: 'my-model' },
+ new ChromeAdapter()
+ );
const mockResponse = getMockResponse(
'vertexAI',
'unary-success-total-tokens.json'
diff --git a/packages/ai/src/models/generative-model.ts b/packages/ai/src/models/generative-model.ts
index b09a9290aa4..98b662ebdb9 100644
--- a/packages/ai/src/models/generative-model.ts
+++ b/packages/ai/src/models/generative-model.ts
@@ -43,12 +43,17 @@ import {
} from '../requests/request-helpers';
import { AI } from '../public-types';
import { AIModel } from './ai-model';
+import { ChromeAdapter } from '../methods/chrome-adapter';
/**
* Class for generative model APIs.
* @public
*/
export class GenerativeModel extends AIModel {
+ /**
+ * Defines the name of the default in-cloud model to use for hybrid inference.
+ */
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';
generationConfig: GenerationConfig;
safetySettings: SafetySetting[];
requestOptions?: RequestOptions;
@@ -59,6 +64,7 @@ export class GenerativeModel extends AIModel {
constructor(
ai: AI,
modelParams: ModelParams,
+ private chromeAdapter: ChromeAdapter,
requestOptions?: RequestOptions
) {
super(ai, modelParams.model);
@@ -91,6 +97,7 @@ export class GenerativeModel extends AIModel {
systemInstruction: this.systemInstruction,
...formattedParams
},
+ this.chromeAdapter,
this.requestOptions
);
}
@@ -116,6 +123,7 @@ export class GenerativeModel extends AIModel {
systemInstruction: this.systemInstruction,
...formattedParams
},
+ this.chromeAdapter,
this.requestOptions
);
}
@@ -128,6 +136,7 @@ export class GenerativeModel extends AIModel {
return new ChatSession(
this._apiSettings,
this.model,
+ this.chromeAdapter,
{
tools: this.tools,
toolConfig: this.toolConfig,
@@ -152,6 +161,11 @@ export class GenerativeModel extends AIModel {
request: CountTokensRequest | string | Array
): Promise {
const formattedParams = formatGenerateContentInput(request);
- return countTokens(this._apiSettings, this.model, formattedParams);
+ return countTokens(
+ this._apiSettings,
+ this.model,
+ formattedParams,
+ this.chromeAdapter
+ );
}
}
diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts
new file mode 100644
index 00000000000..22916e7ff96
--- /dev/null
+++ b/packages/ai/src/types/language-model.ts
@@ -0,0 +1,83 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface LanguageModel extends EventTarget {
+ create(options?: LanguageModelCreateOptions): Promise;
+ availability(options?: LanguageModelCreateCoreOptions): Promise;
+ prompt(
+ input: LanguageModelPrompt,
+ options?: LanguageModelPromptOptions
+ ): Promise;
+ promptStreaming(
+ input: LanguageModelPrompt,
+ options?: LanguageModelPromptOptions
+ ): ReadableStream;
+ measureInputUsage(
+ input: LanguageModelPrompt,
+ options?: LanguageModelPromptOptions
+ ): Promise;
+ destroy(): undefined;
+}
+export enum Availability {
+ 'unavailable' = 'unavailable',
+ 'downloadable' = 'downloadable',
+ 'downloading' = 'downloading',
+ 'available' = 'available'
+}
+export interface LanguageModelCreateCoreOptions {
+ topK?: number;
+ temperature?: number;
+ expectedInputs?: LanguageModelExpectedInput[];
+}
+export interface LanguageModelCreateOptions
+ extends LanguageModelCreateCoreOptions {
+ signal?: AbortSignal;
+ systemPrompt?: string;
+ initialPrompts?: LanguageModelInitialPrompts;
+}
+export interface LanguageModelPromptOptions {
+ responseConstraint?: object;
+ // TODO: Restore AbortSignal once the API is defined.
+}
+interface LanguageModelExpectedInput {
+ type: LanguageModelMessageType;
+ languages?: string[];
+}
+// TODO: revert to type from Prompt API explainer once it's supported.
+export type LanguageModelPrompt = LanguageModelMessageContent[];
+type LanguageModelInitialPrompts =
+ | LanguageModelMessage[]
+ | LanguageModelMessageShorthand[];
+interface LanguageModelMessage {
+ role: LanguageModelMessageRole;
+ content: LanguageModelMessageContent[];
+}
+interface LanguageModelMessageShorthand {
+ role: LanguageModelMessageRole;
+ content: string;
+}
+export interface LanguageModelMessageContent {
+ type: LanguageModelMessageType;
+ content: LanguageModelMessageContentValue;
+}
+type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+type LanguageModelMessageType = 'text' | 'image' | 'audio';
+type LanguageModelMessageContentValue =
+ | ImageBitmapSource
+ | AudioBuffer
+ | BufferSource
+ | string;
diff --git a/packages/ai/src/types/requests.ts b/packages/ai/src/types/requests.ts
index 67f45095c2a..f7d0cc558b9 100644
--- a/packages/ai/src/types/requests.ts
+++ b/packages/ai/src/types/requests.ts
@@ -17,6 +17,10 @@
import { TypedSchema } from '../requests/schema-builder';
import { Content, Part } from './content';
+import {
+ LanguageModelCreateOptions,
+ LanguageModelPromptOptions
+} from './language-model';
import {
FunctionCallingMode,
HarmBlockMethod,
@@ -231,3 +235,37 @@ export interface FunctionCallingConfig {
mode?: FunctionCallingMode;
allowedFunctionNames?: string[];
}
+
+/**
+ * Encapsulates configuration for on-device inference.
+ */
+export interface OnDeviceParams {
+ createOptions?: LanguageModelCreateOptions;
+ promptOptions?: LanguageModelPromptOptions;
+}
+
+/**
+ * Toggles hybrid inference.
+ */
+export interface HybridParams {
+ /**
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
+ */
+ mode: InferenceMode;
+ /**
+ * Optional. Specifies advanced params for on-device inference.
+ */
+ onDeviceParams?: OnDeviceParams;
+ /**
+ * Optional. Specifies advanced params for in-cloud inference.
+ */
+ inCloudParams?: ModelParams;
+}
+
+/**
+ * Determines whether inference happens on-device or in-cloud.
+ */
+export type InferenceMode =
+ | 'prefer_on_device'
+ | 'only_on_device'
+ | 'only_in_cloud';
From c16cbf1a31416216c6067a39d4bfc6770c731fa0 Mon Sep 17 00:00:00 2001
From: Siddharth Gupta
Date: Tue, 13 May 2025 09:15:08 -0700
Subject: [PATCH 2/6] Adding docs
---
common/api-review/ai.api.md | 30 ++++++++++++++--
docs-devsite/_toc.yaml | 4 +++
docs-devsite/ai.chatsession.md | 5 +--
docs-devsite/ai.generativemodel.md | 16 +++++++--
docs-devsite/ai.hybridparams.md | 57 ++++++++++++++++++++++++++++++
docs-devsite/ai.md | 25 +++++++++----
docs-devsite/ai.modelparams.md | 2 +-
docs-devsite/ai.ondeviceparams.md | 42 ++++++++++++++++++++++
docs-devsite/ai.requestoptions.md | 2 +-
9 files changed, 168 insertions(+), 15 deletions(-)
create mode 100644 docs-devsite/ai.hybridparams.md
create mode 100644 docs-devsite/ai.ondeviceparams.md
diff --git a/common/api-review/ai.api.md b/common/api-review/ai.api.md
index d096d4c27f6..a603a531358 100644
--- a/common/api-review/ai.api.md
+++ b/common/api-review/ai.api.md
@@ -112,7 +112,8 @@ export class BooleanSchema extends Schema {
// @public
export class ChatSession {
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
+ // Warning: (ae-forgotten-export) The symbol "ChromeAdapter" needs to be exported by the entry point index.d.ts
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
getHistory(): Promise;
// (undocumented)
model: string;
@@ -395,8 +396,9 @@ export interface GenerativeContentBlob {
// @public
export class GenerativeModel extends AIModel {
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
+ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
countTokens(request: CountTokensRequest | string | Array): Promise;
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
generateContent(request: GenerateContentRequest | string | Array): Promise;
generateContentStream(request: GenerateContentRequest | string | Array): Promise;
// (undocumented)
@@ -418,7 +420,7 @@ export class GenerativeModel extends AIModel {
export function getAI(app?: FirebaseApp, options?: AIOptions): AI;
// @public
-export function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
+export function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
// @beta
export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
@@ -550,6 +552,13 @@ export enum HarmSeverity {
HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
}
+// @public
+export interface HybridParams {
+ inCloudParams?: ModelParams;
+ mode: InferenceMode;
+ onDeviceParams?: OnDeviceParams;
+}
+
// @beta
export enum ImagenAspectRatio {
LANDSCAPE_16x9 = "16:9",
@@ -634,6 +643,9 @@ export interface ImagenSafetySettings {
safetyFilterLevel?: ImagenSafetyFilterLevel;
}
+// @public
+export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
+
// @public
export interface InlineDataPart {
// (undocumented)
@@ -708,6 +720,18 @@ export interface ObjectSchemaInterface extends SchemaInterface {
type: SchemaType.OBJECT;
}
+// @public
+export interface OnDeviceParams {
+ // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts
+ //
+ // (undocumented)
+ createOptions?: LanguageModelCreateOptions;
+ // Warning: (ae-forgotten-export) The symbol "LanguageModelPromptOptions" needs to be exported by the entry point index.d.ts
+ //
+ // (undocumented)
+ promptOptions?: LanguageModelPromptOptions;
+}
+
// @public
export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart;
diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml
index b77a6b5910e..4076c443ddc 100644
--- a/docs-devsite/_toc.yaml
+++ b/docs-devsite/_toc.yaml
@@ -80,6 +80,8 @@ toc:
path: /docs/reference/js/ai.groundingattribution.md
- title: GroundingMetadata
path: /docs/reference/js/ai.groundingmetadata.md
+ - title: HybridParams
+ path: /docs/reference/js/ai.hybridparams.md
- title: ImagenGCSImage
path: /docs/reference/js/ai.imagengcsimage.md
- title: ImagenGenerationConfig
@@ -110,6 +112,8 @@ toc:
path: /docs/reference/js/ai.objectschema.md
- title: ObjectSchemaInterface
path: /docs/reference/js/ai.objectschemainterface.md
+ - title: OnDeviceParams
+ path: /docs/reference/js/ai.ondeviceparams.md
- title: PromptFeedback
path: /docs/reference/js/ai.promptfeedback.md
- title: RequestOptions
diff --git a/docs-devsite/ai.chatsession.md b/docs-devsite/ai.chatsession.md
index 1d6e403b6a8..610fb2274dd 100644
--- a/docs-devsite/ai.chatsession.md
+++ b/docs-devsite/ai.chatsession.md
@@ -22,7 +22,7 @@ export declare class ChatSession
| Constructor | Modifiers | Description |
| --- | --- | --- |
-| [(constructor)(apiSettings, model, params, requestOptions)](./ai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession
class |
+| [(constructor)(apiSettings, model, chromeAdapter, params, requestOptions)](./ai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession
class |
## Properties
@@ -47,7 +47,7 @@ Constructs a new instance of the `ChatSession` class
Signature:
```typescript
-constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
+constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
```
#### Parameters
@@ -56,6 +56,7 @@ constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams |
| --- | --- | --- |
| apiSettings | ApiSettings | |
| model | string | |
+| chromeAdapter | ChromeAdapter | |
| params | [StartChatParams](./ai.startchatparams.md#startchatparams_interface) \| undefined | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) \| undefined | |
diff --git a/docs-devsite/ai.generativemodel.md b/docs-devsite/ai.generativemodel.md
index d91cf80e881..17c9d3c0863 100644
--- a/docs-devsite/ai.generativemodel.md
+++ b/docs-devsite/ai.generativemodel.md
@@ -23,12 +23,13 @@ export declare class GenerativeModel extends AIModel
| Constructor | Modifiers | Description |
| --- | --- | --- |
-| [(constructor)(ai, modelParams, requestOptions)](./ai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel
class |
+| [(constructor)(ai, modelParams, chromeAdapter, requestOptions)](./ai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel
class |
## Properties
| Property | Modifiers | Type | Description |
| --- | --- | --- | --- |
+| [DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL](./ai.generativemodel.md#generativemodeldefault_hybrid_in_cloud_model) | static
| string | Defines the name of the default in-cloud model to use for hybrid inference. |
| [generationConfig](./ai.generativemodel.md#generativemodelgenerationconfig) | | [GenerationConfig](./ai.generationconfig.md#generationconfig_interface) | |
| [requestOptions](./ai.generativemodel.md#generativemodelrequestoptions) | | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
| [safetySettings](./ai.generativemodel.md#generativemodelsafetysettings) | | [SafetySetting](./ai.safetysetting.md#safetysetting_interface)\[\] | |
@@ -52,7 +53,7 @@ Constructs a new instance of the `GenerativeModel` class
Signature:
```typescript
-constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
+constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
```
#### Parameters
@@ -61,8 +62,19 @@ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
| --- | --- | --- |
| ai | [AI](./ai.ai.md#ai_interface) | |
| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) | |
+| chromeAdapter | ChromeAdapter | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
+## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL
+
+Defines the name of the default in-cloud model to use for hybrid inference.
+
+Signature:
+
+```typescript
+static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
+```
+
## GenerativeModel.generationConfig
Signature:
diff --git a/docs-devsite/ai.hybridparams.md b/docs-devsite/ai.hybridparams.md
new file mode 100644
index 00000000000..b2b3b1030fe
--- /dev/null
+++ b/docs-devsite/ai.hybridparams.md
@@ -0,0 +1,57 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# HybridParams interface
+Toggles hybrid inference.
+
+Signature:
+
+```typescript
+export interface HybridParams
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [inCloudParams](./ai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./ai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. |
+| [mode](./ai.hybridparams.md#hybridparamsmode) | [InferenceMode](./ai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. |
+| [onDeviceParams](./ai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Optional. Specifies advanced params for on-device inference. |
+
+## HybridParams.inCloudParams
+
+Optional. Specifies advanced params for in-cloud inference.
+
+Signature:
+
+```typescript
+inCloudParams?: ModelParams;
+```
+
+## HybridParams.mode
+
+Specifies on-device or in-cloud inference. Defaults to prefer on-device.
+
+Signature:
+
+```typescript
+mode: InferenceMode;
+```
+
+## HybridParams.onDeviceParams
+
+Optional. Specifies advanced params for on-device inference.
+
+Signature:
+
+```typescript
+onDeviceParams?: OnDeviceParams;
+```
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index c43c0391ba4..01b3a455682 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -20,7 +20,7 @@ The Firebase AI Web SDK.
| [getAI(app, options)](./ai.md#getai_a94a413) | Returns the default [AI](./ai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. |
| [getVertexAI(app, options)](./ai.md#getvertexai_04094cf) | |
| function(ai, ...) |
-| [getGenerativeModel(ai, modelParams, requestOptions)](./ai.md#getgenerativemodel_80bd839) | Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
+| [getGenerativeModel(ai, modelParams, requestOptions)](./ai.md#getgenerativemodel_c63f46a) | Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
| [getImagenModel(ai, modelParams, requestOptions)](./ai.md#getimagenmodel_e1f6645) | (Public Preview) Returns an [ImagenModel](./ai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*
) are supported. |
## Classes
@@ -97,6 +97,7 @@ The Firebase AI Web SDK.
| [GenerativeContentBlob](./ai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. |
| [GroundingAttribution](./ai.groundingattribution.md#groundingattribution_interface) | |
| [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned to client when grounding is enabled. |
+| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. |
| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
| [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. |
@@ -105,10 +106,11 @@ The Firebase AI Web SDK.
| [ImagenSafetySettings](./ai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. |
| [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
-| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839). |
+| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaInterface](./ai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./ai.objectschema.md#objectschema_class) class. |
+| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
-| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839). |
+| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [RetrievedContextAttribution](./ai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
| [SafetyRating](./ai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./ai.generatecontentcandidate.md#generatecontentcandidate_interface) |
| [SafetySetting](./ai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. |
@@ -140,6 +142,7 @@ The Firebase AI Web SDK.
| Type Alias | Description |
| --- | --- |
| [BackendType](./ai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI'
or 'GOOGLE_AI'
. |
+| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
| [Role](./ai.md#role) | Role is the producer of the content. |
@@ -226,14 +229,14 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions
## function(ai, ...)
-### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_80bd839}
+### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_c63f46a}
Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality.
Signature:
```typescript
-export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
+export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
```
#### Parameters
@@ -241,7 +244,7 @@ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, req
| Parameter | Type | Description |
| --- | --- | --- |
| ai | [AI](./ai.ai.md#ai_interface) | |
-| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) | |
+| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) \| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
Returns:
@@ -360,6 +363,16 @@ Type alias representing valid backend types. It can be either `'VERTEX_AI'` or `
export type BackendType = (typeof BackendType)[keyof typeof BackendType];
```
+## InferenceMode
+
+Determines whether inference happens on-device or in-cloud.
+
+Signature:
+
+```typescript
+export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
+```
+
## Part
Content part - includes text, image/video, or function call/response part types.
diff --git a/docs-devsite/ai.modelparams.md b/docs-devsite/ai.modelparams.md
index a92b2e9035d..a5722e7d69d 100644
--- a/docs-devsite/ai.modelparams.md
+++ b/docs-devsite/ai.modelparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ModelParams interface
-Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839).
+Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a).
Signature:
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
new file mode 100644
index 00000000000..f4bfcbb5cff
--- /dev/null
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -0,0 +1,42 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# OnDeviceParams interface
+Encapsulates configuration for on-device inference.
+
+Signature:
+
+```typescript
+export interface OnDeviceParams
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | |
+| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
+
+## OnDeviceParams.createOptions
+
+Signature:
+
+```typescript
+createOptions?: LanguageModelCreateOptions;
+```
+
+## OnDeviceParams.promptOptions
+
+Signature:
+
+```typescript
+promptOptions?: LanguageModelPromptOptions;
+```
diff --git a/docs-devsite/ai.requestoptions.md b/docs-devsite/ai.requestoptions.md
index 73aa03c1d25..8178ef5b696 100644
--- a/docs-devsite/ai.requestoptions.md
+++ b/docs-devsite/ai.requestoptions.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# RequestOptions interface
-Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839).
+Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a).
Signature:
From 50f142a9805d316c9606f0520351e910aadefd83 Mon Sep 17 00:00:00 2001
From: Erik Eldridge
Date: Wed, 14 May 2025 13:32:50 -0700
Subject: [PATCH 3/6] VinF Hybrid Inference: Document exported LanguageModel
types (#9035)
---
common/api-review/ai.api.md | 66 ++++++++++++++++++-
docs-devsite/_toc.yaml | 12 ++++
.../ai.languagemodelcreatecoreoptions.md | 49 ++++++++++++++
docs-devsite/ai.languagemodelcreateoptions.md | 50 ++++++++++++++
docs-devsite/ai.languagemodelexpectedinput.md | 40 +++++++++++
docs-devsite/ai.languagemodelmessage.md | 40 +++++++++++
.../ai.languagemodelmessagecontent.md | 40 +++++++++++
.../ai.languagemodelmessageshorthand.md | 40 +++++++++++
docs-devsite/ai.md | 42 ++++++++++++
docs-devsite/ai.ondeviceparams.md | 2 +-
packages/ai/src/methods/chrome-adapter.ts | 4 +-
packages/ai/src/types/index.ts | 12 ++++
packages/ai/src/types/language-model.ts | 14 ++--
13 files changed, 399 insertions(+), 12 deletions(-)
create mode 100644 docs-devsite/ai.languagemodelcreatecoreoptions.md
create mode 100644 docs-devsite/ai.languagemodelcreateoptions.md
create mode 100644 docs-devsite/ai.languagemodelexpectedinput.md
create mode 100644 docs-devsite/ai.languagemodelmessage.md
create mode 100644 docs-devsite/ai.languagemodelmessagecontent.md
create mode 100644 docs-devsite/ai.languagemodelmessageshorthand.md
diff --git a/common/api-review/ai.api.md b/common/api-review/ai.api.md
index a603a531358..a7da6210ada 100644
--- a/common/api-review/ai.api.md
+++ b/common/api-review/ai.api.md
@@ -664,6 +664,70 @@ export class IntegerSchema extends Schema {
constructor(schemaParams?: SchemaParams);
}
+// @public (undocumented)
+export interface LanguageModelCreateCoreOptions {
+ // (undocumented)
+ expectedInputs?: LanguageModelExpectedInput[];
+ // (undocumented)
+ temperature?: number;
+ // (undocumented)
+ topK?: number;
+}
+
+// @public (undocumented)
+export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
+ // (undocumented)
+ initialPrompts?: LanguageModelInitialPrompts;
+ // (undocumented)
+ signal?: AbortSignal;
+ // (undocumented)
+ systemPrompt?: string;
+}
+
+// @public (undocumented)
+export interface LanguageModelExpectedInput {
+ // (undocumented)
+ languages?: string[];
+ // (undocumented)
+ type: LanguageModelMessageType;
+}
+
+// @public (undocumented)
+export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
+
+// @public (undocumented)
+export interface LanguageModelMessage {
+ // (undocumented)
+ content: LanguageModelMessageContent[];
+ // (undocumented)
+ role: LanguageModelMessageRole;
+}
+
+// @public (undocumented)
+export interface LanguageModelMessageContent {
+ // (undocumented)
+ content: LanguageModelMessageContentValue;
+ // (undocumented)
+ type: LanguageModelMessageType;
+}
+
+// @public (undocumented)
+export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
+
+// @public (undocumented)
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+
+// @public (undocumented)
+export interface LanguageModelMessageShorthand {
+ // (undocumented)
+ content: string;
+ // (undocumented)
+ role: LanguageModelMessageRole;
+}
+
+// @public (undocumented)
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+
// @public
export enum Modality {
AUDIO = "AUDIO",
@@ -722,8 +786,6 @@ export interface ObjectSchemaInterface extends SchemaInterface {
// @public
export interface OnDeviceParams {
- // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts
- //
// (undocumented)
createOptions?: LanguageModelCreateOptions;
// Warning: (ae-forgotten-export) The symbol "LanguageModelPromptOptions" needs to be exported by the entry point index.d.ts
diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml
index 4076c443ddc..e8359727cda 100644
--- a/docs-devsite/_toc.yaml
+++ b/docs-devsite/_toc.yaml
@@ -102,6 +102,18 @@ toc:
path: /docs/reference/js/ai.inlinedatapart.md
- title: IntegerSchema
path: /docs/reference/js/ai.integerschema.md
+ - title: LanguageModelCreateCoreOptions
+ path: /docs/reference/js/ai.languagemodelcreatecoreoptions.md
+ - title: LanguageModelCreateOptions
+ path: /docs/reference/js/ai.languagemodelcreateoptions.md
+ - title: LanguageModelExpectedInput
+ path: /docs/reference/js/ai.languagemodelexpectedinput.md
+ - title: LanguageModelMessage
+ path: /docs/reference/js/ai.languagemodelmessage.md
+ - title: LanguageModelMessageContent
+ path: /docs/reference/js/ai.languagemodelmessagecontent.md
+ - title: LanguageModelMessageShorthand
+ path: /docs/reference/js/ai.languagemodelmessageshorthand.md
- title: ModalityTokenCount
path: /docs/reference/js/ai.modalitytokencount.md
- title: ModelParams
diff --git a/docs-devsite/ai.languagemodelcreatecoreoptions.md b/docs-devsite/ai.languagemodelcreatecoreoptions.md
new file mode 100644
index 00000000000..2c9f61b149f
--- /dev/null
+++ b/docs-devsite/ai.languagemodelcreatecoreoptions.md
@@ -0,0 +1,49 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelCreateCoreOptions interface
+Signature:
+
+```typescript
+export interface LanguageModelCreateCoreOptions
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [expectedInputs](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionsexpectedinputs) | [LanguageModelExpectedInput](./ai.languagemodelexpectedinput.md#languagemodelexpectedinput_interface)\[\] | |
+| [temperature](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstemperature) | number | |
+| [topK](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstopk) | number | |
+
+## LanguageModelCreateCoreOptions.expectedInputs
+
+Signature:
+
+```typescript
+expectedInputs?: LanguageModelExpectedInput[];
+```
+
+## LanguageModelCreateCoreOptions.temperature
+
+Signature:
+
+```typescript
+temperature?: number;
+```
+
+## LanguageModelCreateCoreOptions.topK
+
+Signature:
+
+```typescript
+topK?: number;
+```
diff --git a/docs-devsite/ai.languagemodelcreateoptions.md b/docs-devsite/ai.languagemodelcreateoptions.md
new file mode 100644
index 00000000000..44edcf7e221
--- /dev/null
+++ b/docs-devsite/ai.languagemodelcreateoptions.md
@@ -0,0 +1,50 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelCreateOptions interface
+Signature:
+
+```typescript
+export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions
+```
+Extends: [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface)
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [initialPrompts](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionsinitialprompts) | [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
+| [signal](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssignal) | AbortSignal | |
+| [systemPrompt](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssystemprompt) | string | |
+
+## LanguageModelCreateOptions.initialPrompts
+
+Signature:
+
+```typescript
+initialPrompts?: LanguageModelInitialPrompts;
+```
+
+## LanguageModelCreateOptions.signal
+
+Signature:
+
+```typescript
+signal?: AbortSignal;
+```
+
+## LanguageModelCreateOptions.systemPrompt
+
+Signature:
+
+```typescript
+systemPrompt?: string;
+```
diff --git a/docs-devsite/ai.languagemodelexpectedinput.md b/docs-devsite/ai.languagemodelexpectedinput.md
new file mode 100644
index 00000000000..d6cbe028fc1
--- /dev/null
+++ b/docs-devsite/ai.languagemodelexpectedinput.md
@@ -0,0 +1,40 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelExpectedInput interface
+Signature:
+
+```typescript
+export interface LanguageModelExpectedInput
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [languages](./ai.languagemodelexpectedinput.md#languagemodelexpectedinputlanguages) | string\[\] | |
+| [type](./ai.languagemodelexpectedinput.md#languagemodelexpectedinputtype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
+
+## LanguageModelExpectedInput.languages
+
+Signature:
+
+```typescript
+languages?: string[];
+```
+
+## LanguageModelExpectedInput.type
+
+Signature:
+
+```typescript
+type: LanguageModelMessageType;
+```
diff --git a/docs-devsite/ai.languagemodelmessage.md b/docs-devsite/ai.languagemodelmessage.md
new file mode 100644
index 00000000000..420059e4892
--- /dev/null
+++ b/docs-devsite/ai.languagemodelmessage.md
@@ -0,0 +1,40 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelMessage interface
+Signature:
+
+```typescript
+export interface LanguageModelMessage
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [content](./ai.languagemodelmessage.md#languagemodelmessagecontent) | [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface)\[\] | |
+| [role](./ai.languagemodelmessage.md#languagemodelmessagerole) | [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
+
+## LanguageModelMessage.content
+
+Signature:
+
+```typescript
+content: LanguageModelMessageContent[];
+```
+
+## LanguageModelMessage.role
+
+Signature:
+
+```typescript
+role: LanguageModelMessageRole;
+```
diff --git a/docs-devsite/ai.languagemodelmessagecontent.md b/docs-devsite/ai.languagemodelmessagecontent.md
new file mode 100644
index 00000000000..06830ace272
--- /dev/null
+++ b/docs-devsite/ai.languagemodelmessagecontent.md
@@ -0,0 +1,40 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelMessageContent interface
+Signature:
+
+```typescript
+export interface LanguageModelMessageContent
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [content](./ai.languagemodelmessagecontent.md#languagemodelmessagecontentcontent) | [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
+| [type](./ai.languagemodelmessagecontent.md#languagemodelmessagecontenttype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
+
+## LanguageModelMessageContent.content
+
+Signature:
+
+```typescript
+content: LanguageModelMessageContentValue;
+```
+
+## LanguageModelMessageContent.type
+
+Signature:
+
+```typescript
+type: LanguageModelMessageType;
+```
diff --git a/docs-devsite/ai.languagemodelmessageshorthand.md b/docs-devsite/ai.languagemodelmessageshorthand.md
new file mode 100644
index 00000000000..bf821b31d52
--- /dev/null
+++ b/docs-devsite/ai.languagemodelmessageshorthand.md
@@ -0,0 +1,40 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelMessageShorthand interface
+Signature:
+
+```typescript
+export interface LanguageModelMessageShorthand
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [content](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthandcontent) | string | |
+| [role](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthandrole) | [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
+
+## LanguageModelMessageShorthand.content
+
+Signature:
+
+```typescript
+content: string;
+```
+
+## LanguageModelMessageShorthand.role
+
+Signature:
+
+```typescript
+role: LanguageModelMessageRole;
+```
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index 01b3a455682..699d3a83cd6 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -105,6 +105,12 @@ The Firebase AI Web SDK.
| [ImagenModelParams](./ai.imagenmodelparams.md#imagenmodelparams_interface) | (Public Preview) Parameters for configuring an [ImagenModel](./ai.imagenmodel.md#imagenmodel_class). |
| [ImagenSafetySettings](./ai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. |
| [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
+| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | |
+| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
+| [LanguageModelExpectedInput](./ai.languagemodelexpectedinput.md#languagemodelexpectedinput_interface) | |
+| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | |
+| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | |
+| [LanguageModelMessageShorthand](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthand_interface) | |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaInterface](./ai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./ai.objectschema.md#objectschema_class) class. |
@@ -143,6 +149,10 @@ The Firebase AI Web SDK.
| --- | --- |
| [BackendType](./ai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI'
or 'GOOGLE_AI'
. |
| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
+| [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
+| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
+| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
+| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
| [Role](./ai.md#role) | Role is the producer of the content. |
@@ -373,6 +383,38 @@ Determines whether inference happens on-device or in-cloud.
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
```
+## LanguageModelInitialPrompts
+
+Signature:
+
+```typescript
+export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
+```
+
+## LanguageModelMessageContentValue
+
+Signature:
+
+```typescript
+export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
+```
+
+## LanguageModelMessageRole
+
+Signature:
+
+```typescript
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+```
+
+## LanguageModelMessageType
+
+Signature:
+
+```typescript
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+```
+
## Part
Content part - includes text, image/video, or function call/response part types.
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
index f4bfcbb5cff..16fed65560d 100644
--- a/docs-devsite/ai.ondeviceparams.md
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -22,7 +22,7 @@ export interface OnDeviceParams
| Property | Type | Description |
| --- | --- | --- |
-| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | |
+| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
## OnDeviceParams.createOptions
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index aa3709048a2..d6de108668d 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -104,7 +104,7 @@ export class ChromeAdapter {
*
* This is comparable to {@link GenerativeModel.generateContent} for generating content in
* Cloud.
- * @param request a standard Vertex {@link GenerateContentRequest}
+ * @param request - a standard Vertex {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
async generateContent(request: GenerateContentRequest): Promise {
@@ -126,7 +126,7 @@ export class ChromeAdapter {
*
* This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
* Cloud.
- * @param request a standard Vertex {@link GenerateContentRequest}
+ * @param request - a standard Vertex {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
async generateContentStream(
diff --git a/packages/ai/src/types/index.ts b/packages/ai/src/types/index.ts
index 01f3e7a701a..698f15b8aea 100644
--- a/packages/ai/src/types/index.ts
+++ b/packages/ai/src/types/index.ts
@@ -23,3 +23,15 @@ export * from './error';
export * from './schema';
export * from './imagen';
export * from './googleai';
+export {
+ LanguageModelCreateOptions,
+ LanguageModelCreateCoreOptions,
+ LanguageModelExpectedInput,
+ LanguageModelInitialPrompts,
+ LanguageModelMessage,
+ LanguageModelMessageContent,
+ LanguageModelMessageContentValue,
+ LanguageModelMessageRole,
+ LanguageModelMessageShorthand,
+ LanguageModelMessageType
+} from './language-model';
diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts
index 22916e7ff96..de4020f66bf 100644
--- a/packages/ai/src/types/language-model.ts
+++ b/packages/ai/src/types/language-model.ts
@@ -53,20 +53,20 @@ export interface LanguageModelPromptOptions {
responseConstraint?: object;
// TODO: Restore AbortSignal once the API is defined.
}
-interface LanguageModelExpectedInput {
+export interface LanguageModelExpectedInput {
type: LanguageModelMessageType;
languages?: string[];
}
// TODO: revert to type from Prompt API explainer once it's supported.
export type LanguageModelPrompt = LanguageModelMessageContent[];
-type LanguageModelInitialPrompts =
+export type LanguageModelInitialPrompts =
| LanguageModelMessage[]
| LanguageModelMessageShorthand[];
-interface LanguageModelMessage {
+export interface LanguageModelMessage {
role: LanguageModelMessageRole;
content: LanguageModelMessageContent[];
}
-interface LanguageModelMessageShorthand {
+export interface LanguageModelMessageShorthand {
role: LanguageModelMessageRole;
content: string;
}
@@ -74,9 +74,9 @@ export interface LanguageModelMessageContent {
type: LanguageModelMessageType;
content: LanguageModelMessageContentValue;
}
-type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
-type LanguageModelMessageType = 'text' | 'image' | 'audio';
-type LanguageModelMessageContentValue =
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+export type LanguageModelMessageContentValue =
| ImageBitmapSource
| AudioBuffer
| BufferSource
From 72cd62665d0cbf10c3879cc27f5977acc4ad74ee Mon Sep 17 00:00:00 2001
From: Erik Eldridge
Date: Thu, 15 May 2025 10:08:10 -0700
Subject: [PATCH 4/6] AI Hybrid Inference: guard against undefined mode (#9045)
---
packages/ai/src/methods/chrome-adapter.test.ts | 18 ++++++++++++++++--
packages/ai/src/methods/chrome-adapter.ts | 6 ++++++
2 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/packages/ai/src/methods/chrome-adapter.test.ts b/packages/ai/src/methods/chrome-adapter.test.ts
index fbe7ec1a5c5..adb9ae47d87 100644
--- a/packages/ai/src/methods/chrome-adapter.test.ts
+++ b/packages/ai/src/methods/chrome-adapter.test.ts
@@ -109,6 +109,14 @@ describe('ChromeAdapter', () => {
});
});
describe('isAvailable', () => {
+ it('returns false if mode is undefined', async () => {
+ const adapter = new ChromeAdapter();
+ expect(
+ await adapter.isAvailable({
+ contents: []
+ })
+ ).to.be.false;
+ });
it('returns false if mode is only cloud', async () => {
const adapter = new ChromeAdapter(undefined, 'only_in_cloud');
expect(
@@ -239,7 +247,10 @@ describe('ChromeAdapter', () => {
const createStub = stub(languageModelProvider, 'create').returns(
downloadPromise
);
- const adapter = new ChromeAdapter(languageModelProvider);
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
await adapter.isAvailable({
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
});
@@ -260,7 +271,10 @@ describe('ChromeAdapter', () => {
const createStub = stub(languageModelProvider, 'create').returns(
downloadPromise
);
- const adapter = new ChromeAdapter(languageModelProvider);
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
await adapter.isAvailable({
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
});
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index d6de108668d..b61ad9b5f09 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -68,6 +68,12 @@ export class ChromeAdapter {
* separation of concerns.
*/
async isAvailable(request: GenerateContentRequest): Promise {
+ if (!this.mode) {
+ logger.debug(
+ `On-device inference unavailable because mode is undefined.`
+ );
+ return false;
+ }
if (this.mode === 'only_in_cloud') {
logger.debug(
`On-device inference unavailable because mode is "only_in_cloud".`
From 177f546912ee1ce5726b4ea3807886b607291197 Mon Sep 17 00:00:00 2001
From: Erik Eldridge
Date: Wed, 28 May 2025 14:25:43 -0700
Subject: [PATCH 5/6] AI Hybrid Inference: migrate to LanguageModelMessage
(#9027)
---
common/api-review/ai.api.md | 4 +-
.../ai/src/methods/chrome-adapter.test.ts | 144 ++++++++++++++----
packages/ai/src/methods/chrome-adapter.ts | 53 +++++--
packages/ai/src/types/language-model.ts | 12 +-
4 files changed, 165 insertions(+), 48 deletions(-)
diff --git a/common/api-review/ai.api.md b/common/api-review/ai.api.md
index a7da6210ada..97d25b9e03d 100644
--- a/common/api-review/ai.api.md
+++ b/common/api-review/ai.api.md
@@ -705,10 +705,10 @@ export interface LanguageModelMessage {
// @public (undocumented)
export interface LanguageModelMessageContent {
- // (undocumented)
- content: LanguageModelMessageContentValue;
// (undocumented)
type: LanguageModelMessageType;
+ // (undocumented)
+ value: LanguageModelMessageContentValue;
}
// @public (undocumented)
diff --git a/packages/ai/src/methods/chrome-adapter.test.ts b/packages/ai/src/methods/chrome-adapter.test.ts
index adb9ae47d87..f8ea80b0e09 100644
--- a/packages/ai/src/methods/chrome-adapter.test.ts
+++ b/packages/ai/src/methods/chrome-adapter.test.ts
@@ -24,7 +24,7 @@ import {
Availability,
LanguageModel,
LanguageModelCreateOptions,
- LanguageModelMessageContent
+ LanguageModelMessage
} from '../types/language-model';
import { match, stub } from 'sinon';
import { GenerateContentRequest, AIErrorCode } from '../types';
@@ -146,7 +146,7 @@ describe('ChromeAdapter', () => {
})
).to.be.false;
});
- it('returns false if request content has non-user role', async () => {
+ it('returns false if request content has "function" role', async () => {
const adapter = new ChromeAdapter(
{
availability: async () => Availability.available
@@ -157,7 +157,7 @@ describe('ChromeAdapter', () => {
await adapter.isAvailable({
contents: [
{
- role: 'model',
+ role: 'function',
parts: []
}
]
@@ -320,7 +320,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const languageModel = {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
- prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ prompt: (p: LanguageModelMessage[]) => Promise.resolve('')
} as LanguageModel;
const createStub = stub(languageModelProvider, 'create').resolves(
languageModel
@@ -345,8 +345,13 @@ describe('ChromeAdapter', () => {
// Asserts Vertex input type is mapped to Chrome type.
expect(promptStub).to.have.been.calledOnceWith([
{
- type: 'text',
- content: request.contents[0].parts[0].text
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
}
]);
// Asserts expected output.
@@ -366,7 +371,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const languageModel = {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
- prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ prompt: (p: LanguageModelMessage[]) => Promise.resolve('')
} as LanguageModel;
const createStub = stub(languageModelProvider, 'create').resolves(
languageModel
@@ -404,12 +409,17 @@ describe('ChromeAdapter', () => {
// Asserts Vertex input type is mapped to Chrome type.
expect(promptStub).to.have.been.calledOnceWith([
{
- type: 'text',
- content: request.contents[0].parts[0].text
- },
- {
- type: 'image',
- content: match.instanceOf(ImageBitmap)
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ },
+ {
+ type: 'image',
+ value: match.instanceOf(ImageBitmap)
+ }
+ ]
}
]);
// Asserts expected output.
@@ -426,7 +436,7 @@ describe('ChromeAdapter', () => {
it('honors prompt options', async () => {
const languageModel = {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
- prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ prompt: (p: LanguageModelMessage[]) => Promise.resolve('')
} as LanguageModel;
const languageModelProvider = {
create: () => Promise.resolve(languageModel)
@@ -450,13 +460,48 @@ describe('ChromeAdapter', () => {
expect(promptStub).to.have.been.calledOnceWith(
[
{
- type: 'text',
- content: request.contents[0].parts[0].text
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
}
],
promptOptions
);
});
+ it('normalizes roles', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ prompt: (p: LanguageModelMessage[]) => Promise.resolve('unused')
+ } as LanguageModel;
+ const promptStub = stub(languageModel, 'prompt').resolves('unused');
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+ const request = {
+ contents: [{ role: 'model', parts: [{ text: 'unused' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContent(request);
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ // Asserts Vertex's "model" role normalized to Chrome's "assistant" role.
+ role: 'assistant',
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
+ }
+ ]);
+ });
});
describe('countTokens', () => {
it('counts tokens is not yet available', async () => {
@@ -528,8 +573,13 @@ describe('ChromeAdapter', () => {
expect(createStub).to.have.been.calledOnceWith(createOptions);
expect(promptStub).to.have.been.calledOnceWith([
{
- type: 'text',
- content: request.contents[0].parts[0].text
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
}
]);
const actual = await toStringArray(response.body!);
@@ -584,12 +634,17 @@ describe('ChromeAdapter', () => {
expect(createStub).to.have.been.calledOnceWith(createOptions);
expect(promptStub).to.have.been.calledOnceWith([
{
- type: 'text',
- content: request.contents[0].parts[0].text
- },
- {
- type: 'image',
- content: match.instanceOf(ImageBitmap)
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ },
+ {
+ type: 'image',
+ value: match.instanceOf(ImageBitmap)
+ }
+ ]
}
]);
const actual = await toStringArray(response.body!);
@@ -625,13 +680,50 @@ describe('ChromeAdapter', () => {
expect(promptStub).to.have.been.calledOnceWith(
[
{
- type: 'text',
- content: request.contents[0].parts[0].text
+ role: request.contents[0].role,
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
}
],
promptOptions
);
});
+ it('normalizes roles', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ promptStreaming: p => new ReadableStream()
+ } as LanguageModel;
+ const promptStub = stub(languageModel, 'promptStreaming').returns(
+ new ReadableStream()
+ );
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device'
+ );
+ const request = {
+ contents: [{ role: 'model', parts: [{ text: 'unused' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContentStream(request);
+ expect(promptStub).to.have.been.calledOnceWith([
+ {
+ // Asserts Vertex's "model" role normalized to Chrome's "assistant" role.
+ role: 'assistant',
+ content: [
+ {
+ type: 'text',
+ value: request.contents[0].parts[0].text
+ }
+ ]
+ }
+ ]);
+ });
});
});
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index b61ad9b5f09..e7bb39c34c8 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -23,12 +23,16 @@ import {
InferenceMode,
Part,
AIErrorCode,
- OnDeviceParams
+ OnDeviceParams,
+ Content,
+ Role
} from '../types';
import {
Availability,
LanguageModel,
- LanguageModelMessageContent
+ LanguageModelMessage,
+ LanguageModelMessageContent,
+ LanguageModelMessageRole
} from '../types/language-model';
/**
@@ -115,10 +119,8 @@ export class ChromeAdapter {
*/
async generateContent(request: GenerateContentRequest): Promise {
const session = await this.createSession();
- // TODO: support multiple content objects when Chrome supports
- // sequence
const contents = await Promise.all(
- request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
+ request.contents.map(ChromeAdapter.toLanguageModelMessage)
);
const text = await session.prompt(
contents,
@@ -139,10 +141,8 @@ export class ChromeAdapter {
request: GenerateContentRequest
): Promise {
const session = await this.createSession();
- // TODO: support multiple content objects when Chrome supports
- // sequence
const contents = await Promise.all(
- request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
+ request.contents.map(ChromeAdapter.toLanguageModelMessage)
);
const stream = await session.promptStreaming(
contents,
@@ -169,12 +169,8 @@ export class ChromeAdapter {
}
for (const content of request.contents) {
- // Returns false if the request contains multiple roles, eg a chat history.
- // TODO: remove this guard once LanguageModelMessage is supported.
- if (content.role !== 'user') {
- logger.debug(
- `Non-user role "${content.role}" rejected for on-device inference.`
- );
+ if (content.role === 'function') {
+ logger.debug(`"Function" role rejected for on-device inference.`);
return false;
}
@@ -233,6 +229,21 @@ export class ChromeAdapter {
});
}
+ /**
+ * Converts Vertex {@link Content} object to a Chrome {@link LanguageModelMessage} object.
+ */
+ private static async toLanguageModelMessage(
+ content: Content
+ ): Promise {
+ const languageModelMessageContents = await Promise.all(
+ content.parts.map(ChromeAdapter.toLanguageModelMessageContent)
+ );
+ return {
+ role: ChromeAdapter.toLanguageModelMessageRole(content.role),
+ content: languageModelMessageContents
+ };
+ }
+
/**
* Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
*/
@@ -242,7 +253,7 @@ export class ChromeAdapter {
if (part.text) {
return {
type: 'text',
- content: part.text
+ value: part.text
};
} else if (part.inlineData) {
const formattedImageContent = await fetch(
@@ -252,7 +263,7 @@ export class ChromeAdapter {
const imageBitmap = await createImageBitmap(imageBlob);
return {
type: 'image',
- content: imageBitmap
+ value: imageBitmap
};
}
// Assumes contents have been verified to contain only a single TextPart.
@@ -260,6 +271,16 @@ export class ChromeAdapter {
throw new Error('Not yet implemented');
}
+ /**
+ * Converts a Vertex {@link Role} string to a {@link LanguageModelMessageRole} string.
+ */
+ private static toLanguageModelMessageRole(
+ role: Role
+ ): LanguageModelMessageRole {
+ // Assumes 'function' rule has been filtered by isOnDeviceRequest
+ return role === 'model' ? 'assistant' : 'user';
+ }
+
/**
* Abstracts Chrome session creation.
*
diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts
index de4020f66bf..503f3d49d05 100644
--- a/packages/ai/src/types/language-model.ts
+++ b/packages/ai/src/types/language-model.ts
@@ -14,7 +14,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
+/**
+ * {@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl}
+ */
export interface LanguageModel extends EventTarget {
create(options?: LanguageModelCreateOptions): Promise;
availability(options?: LanguageModelCreateCoreOptions): Promise;
@@ -57,8 +59,10 @@ export interface LanguageModelExpectedInput {
type: LanguageModelMessageType;
languages?: string[];
}
-// TODO: revert to type from Prompt API explainer once it's supported.
-export type LanguageModelPrompt = LanguageModelMessageContent[];
+export type LanguageModelPrompt =
+ | LanguageModelMessage[]
+ | LanguageModelMessageShorthand[]
+ | string;
export type LanguageModelInitialPrompts =
| LanguageModelMessage[]
| LanguageModelMessageShorthand[];
@@ -72,7 +76,7 @@ export interface LanguageModelMessageShorthand {
}
export interface LanguageModelMessageContent {
type: LanguageModelMessageType;
- content: LanguageModelMessageContentValue;
+ value: LanguageModelMessageContentValue;
}
export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
export type LanguageModelMessageType = 'text' | 'image' | 'audio';
From 58d92df3314c77f132002652cbe00b676eb3a00f Mon Sep 17 00:00:00 2001
From: Erik Eldridge
Date: Wed, 28 May 2025 15:02:18 -0700
Subject: [PATCH 6/6] AI Hybrid Inference: flatten initial prompts type (#9066)
---
common/api-review/ai.api.md | 19 ++-------
docs-devsite/_toc.yaml | 6 +--
.../ai.languagemodelcreatecoreoptions.md | 4 +-
docs-devsite/ai.languagemodelcreateoptions.md | 13 +-----
...edinput.md => ai.languagemodelexpected.md} | 12 +++---
.../ai.languagemodelmessagecontent.md | 10 ++---
.../ai.languagemodelmessageshorthand.md | 40 -------------------
docs-devsite/ai.md | 12 +-----
packages/ai/src/types/index.ts | 4 +-
packages/ai/src/types/language-model.ts | 23 ++++-------
10 files changed, 29 insertions(+), 114 deletions(-)
rename docs-devsite/{ai.languagemodelexpectedinput.md => ai.languagemodelexpected.md} (57%)
delete mode 100644 docs-devsite/ai.languagemodelmessageshorthand.md
diff --git a/common/api-review/ai.api.md b/common/api-review/ai.api.md
index 97d25b9e03d..76e0b48dda8 100644
--- a/common/api-review/ai.api.md
+++ b/common/api-review/ai.api.md
@@ -667,7 +667,7 @@ export class IntegerSchema extends Schema {
// @public (undocumented)
export interface LanguageModelCreateCoreOptions {
// (undocumented)
- expectedInputs?: LanguageModelExpectedInput[];
+ expectedInputs?: LanguageModelExpected[];
// (undocumented)
temperature?: number;
// (undocumented)
@@ -677,24 +677,19 @@ export interface LanguageModelCreateCoreOptions {
// @public (undocumented)
export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
// (undocumented)
- initialPrompts?: LanguageModelInitialPrompts;
+ initialPrompts?: LanguageModelMessage[];
// (undocumented)
signal?: AbortSignal;
- // (undocumented)
- systemPrompt?: string;
}
// @public (undocumented)
-export interface LanguageModelExpectedInput {
+export interface LanguageModelExpected {
// (undocumented)
languages?: string[];
// (undocumented)
type: LanguageModelMessageType;
}
-// @public (undocumented)
-export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
-
// @public (undocumented)
export interface LanguageModelMessage {
// (undocumented)
@@ -717,14 +712,6 @@ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer |
// @public (undocumented)
export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
-// @public (undocumented)
-export interface LanguageModelMessageShorthand {
- // (undocumented)
- content: string;
- // (undocumented)
- role: LanguageModelMessageRole;
-}
-
// @public (undocumented)
export type LanguageModelMessageType = 'text' | 'image' | 'audio';
diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml
index e8359727cda..621c6af9e91 100644
--- a/docs-devsite/_toc.yaml
+++ b/docs-devsite/_toc.yaml
@@ -106,14 +106,12 @@ toc:
path: /docs/reference/js/ai.languagemodelcreatecoreoptions.md
- title: LanguageModelCreateOptions
path: /docs/reference/js/ai.languagemodelcreateoptions.md
- - title: LanguageModelExpectedInput
- path: /docs/reference/js/ai.languagemodelexpectedinput.md
+ - title: LanguageModelExpected
+ path: /docs/reference/js/ai.languagemodelexpected.md
- title: LanguageModelMessage
path: /docs/reference/js/ai.languagemodelmessage.md
- title: LanguageModelMessageContent
path: /docs/reference/js/ai.languagemodelmessagecontent.md
- - title: LanguageModelMessageShorthand
- path: /docs/reference/js/ai.languagemodelmessageshorthand.md
- title: ModalityTokenCount
path: /docs/reference/js/ai.modalitytokencount.md
- title: ModelParams
diff --git a/docs-devsite/ai.languagemodelcreatecoreoptions.md b/docs-devsite/ai.languagemodelcreatecoreoptions.md
index 2c9f61b149f..45c2e7f5db4 100644
--- a/docs-devsite/ai.languagemodelcreatecoreoptions.md
+++ b/docs-devsite/ai.languagemodelcreatecoreoptions.md
@@ -20,7 +20,7 @@ export interface LanguageModelCreateCoreOptions
| Property | Type | Description |
| --- | --- | --- |
-| [expectedInputs](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionsexpectedinputs) | [LanguageModelExpectedInput](./ai.languagemodelexpectedinput.md#languagemodelexpectedinput_interface)\[\] | |
+| [expectedInputs](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionsexpectedinputs) | [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface)\[\] | |
| [temperature](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstemperature) | number | |
| [topK](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstopk) | number | |
@@ -29,7 +29,7 @@ export interface LanguageModelCreateCoreOptions
Signature:
```typescript
-expectedInputs?: LanguageModelExpectedInput[];
+expectedInputs?: LanguageModelExpected[];
```
## LanguageModelCreateCoreOptions.temperature
diff --git a/docs-devsite/ai.languagemodelcreateoptions.md b/docs-devsite/ai.languagemodelcreateoptions.md
index 44edcf7e221..417519a54b6 100644
--- a/docs-devsite/ai.languagemodelcreateoptions.md
+++ b/docs-devsite/ai.languagemodelcreateoptions.md
@@ -21,16 +21,15 @@ export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptio
| Property | Type | Description |
| --- | --- | --- |
-| [initialPrompts](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionsinitialprompts) | [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
+| [initialPrompts](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionsinitialprompts) | [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface)\[\] | |
| [signal](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssignal) | AbortSignal | |
-| [systemPrompt](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssystemprompt) | string | |
## LanguageModelCreateOptions.initialPrompts
Signature:
```typescript
-initialPrompts?: LanguageModelInitialPrompts;
+initialPrompts?: LanguageModelMessage[];
```
## LanguageModelCreateOptions.signal
@@ -40,11 +39,3 @@ initialPrompts?: LanguageModelInitialPrompts;
```typescript
signal?: AbortSignal;
```
-
-## LanguageModelCreateOptions.systemPrompt
-
-Signature:
-
-```typescript
-systemPrompt?: string;
-```
diff --git a/docs-devsite/ai.languagemodelexpectedinput.md b/docs-devsite/ai.languagemodelexpected.md
similarity index 57%
rename from docs-devsite/ai.languagemodelexpectedinput.md
rename to docs-devsite/ai.languagemodelexpected.md
index d6cbe028fc1..26ed28b741e 100644
--- a/docs-devsite/ai.languagemodelexpectedinput.md
+++ b/docs-devsite/ai.languagemodelexpected.md
@@ -9,21 +9,21 @@ overwritten. Changes should be made in the source code at
https://github.com/firebase/firebase-js-sdk
{% endcomment %}
-# LanguageModelExpectedInput interface
+# LanguageModelExpected interface
Signature:
```typescript
-export interface LanguageModelExpectedInput
+export interface LanguageModelExpected
```
## Properties
| Property | Type | Description |
| --- | --- | --- |
-| [languages](./ai.languagemodelexpectedinput.md#languagemodelexpectedinputlanguages) | string\[\] | |
-| [type](./ai.languagemodelexpectedinput.md#languagemodelexpectedinputtype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
+| [languages](./ai.languagemodelexpected.md#languagemodelexpectedlanguages) | string\[\] | |
+| [type](./ai.languagemodelexpected.md#languagemodelexpectedtype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
-## LanguageModelExpectedInput.languages
+## LanguageModelExpected.languages
Signature:
@@ -31,7 +31,7 @@ export interface LanguageModelExpectedInput
languages?: string[];
```
-## LanguageModelExpectedInput.type
+## LanguageModelExpected.type
Signature:
diff --git a/docs-devsite/ai.languagemodelmessagecontent.md b/docs-devsite/ai.languagemodelmessagecontent.md
index 06830ace272..40b4cc16bce 100644
--- a/docs-devsite/ai.languagemodelmessagecontent.md
+++ b/docs-devsite/ai.languagemodelmessagecontent.md
@@ -20,21 +20,21 @@ export interface LanguageModelMessageContent
| Property | Type | Description |
| --- | --- | --- |
-| [content](./ai.languagemodelmessagecontent.md#languagemodelmessagecontentcontent) | [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
| [type](./ai.languagemodelmessagecontent.md#languagemodelmessagecontenttype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
+| [value](./ai.languagemodelmessagecontent.md#languagemodelmessagecontentvalue) | [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
-## LanguageModelMessageContent.content
+## LanguageModelMessageContent.type
Signature:
```typescript
-content: LanguageModelMessageContentValue;
+type: LanguageModelMessageType;
```
-## LanguageModelMessageContent.type
+## LanguageModelMessageContent.value
Signature:
```typescript
-type: LanguageModelMessageType;
+value: LanguageModelMessageContentValue;
```
diff --git a/docs-devsite/ai.languagemodelmessageshorthand.md b/docs-devsite/ai.languagemodelmessageshorthand.md
deleted file mode 100644
index bf821b31d52..00000000000
--- a/docs-devsite/ai.languagemodelmessageshorthand.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Project: /docs/reference/js/_project.yaml
-Book: /docs/reference/_book.yaml
-page_type: reference
-
-{% comment %}
-DO NOT EDIT THIS FILE!
-This is generated by the JS SDK team, and any local changes will be
-overwritten. Changes should be made in the source code at
-https://github.com/firebase/firebase-js-sdk
-{% endcomment %}
-
-# LanguageModelMessageShorthand interface
-Signature:
-
-```typescript
-export interface LanguageModelMessageShorthand
-```
-
-## Properties
-
-| Property | Type | Description |
-| --- | --- | --- |
-| [content](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthandcontent) | string | |
-| [role](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthandrole) | [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
-
-## LanguageModelMessageShorthand.content
-
-Signature:
-
-```typescript
-content: string;
-```
-
-## LanguageModelMessageShorthand.role
-
-Signature:
-
-```typescript
-role: LanguageModelMessageRole;
-```
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index 699d3a83cd6..b087d7037e6 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -107,10 +107,9 @@ The Firebase AI Web SDK.
| [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | |
| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
-| [LanguageModelExpectedInput](./ai.languagemodelexpectedinput.md#languagemodelexpectedinput_interface) | |
+| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | |
| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | |
| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | |
-| [LanguageModelMessageShorthand](./ai.languagemodelmessageshorthand.md#languagemodelmessageshorthand_interface) | |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaInterface](./ai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./ai.objectschema.md#objectschema_class) class. |
@@ -149,7 +148,6 @@ The Firebase AI Web SDK.
| --- | --- |
| [BackendType](./ai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI'
or 'GOOGLE_AI'
. |
| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
-| [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
@@ -383,14 +381,6 @@ Determines whether inference happens on-device or in-cloud.
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
```
-## LanguageModelInitialPrompts
-
-Signature:
-
-```typescript
-export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
-```
-
## LanguageModelMessageContentValue
Signature:
diff --git a/packages/ai/src/types/index.ts b/packages/ai/src/types/index.ts
index 698f15b8aea..bd13140566f 100644
--- a/packages/ai/src/types/index.ts
+++ b/packages/ai/src/types/index.ts
@@ -26,12 +26,10 @@ export * from './googleai';
export {
LanguageModelCreateOptions,
LanguageModelCreateCoreOptions,
- LanguageModelExpectedInput,
- LanguageModelInitialPrompts,
+ LanguageModelExpected,
LanguageModelMessage,
LanguageModelMessageContent,
LanguageModelMessageContentValue,
LanguageModelMessageRole,
- LanguageModelMessageShorthand,
LanguageModelMessageType
} from './language-model';
diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts
index 503f3d49d05..83a728dc3be 100644
--- a/packages/ai/src/types/language-model.ts
+++ b/packages/ai/src/types/language-model.ts
@@ -15,7 +15,9 @@
* limitations under the License.
*/
/**
- * {@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl}
+ * The subset of the Prompt API
+ * ({@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl})
+ * required for hybrid functionality.
*/
export interface LanguageModel extends EventTarget {
create(options?: LanguageModelCreateOptions): Promise;
@@ -43,37 +45,26 @@ export enum Availability {
export interface LanguageModelCreateCoreOptions {
topK?: number;
temperature?: number;
- expectedInputs?: LanguageModelExpectedInput[];
+ expectedInputs?: LanguageModelExpected[];
}
export interface LanguageModelCreateOptions
extends LanguageModelCreateCoreOptions {
signal?: AbortSignal;
- systemPrompt?: string;
- initialPrompts?: LanguageModelInitialPrompts;
+ initialPrompts?: LanguageModelMessage[];
}
export interface LanguageModelPromptOptions {
responseConstraint?: object;
// TODO: Restore AbortSignal once the API is defined.
}
-export interface LanguageModelExpectedInput {
+export interface LanguageModelExpected {
type: LanguageModelMessageType;
languages?: string[];
}
-export type LanguageModelPrompt =
- | LanguageModelMessage[]
- | LanguageModelMessageShorthand[]
- | string;
-export type LanguageModelInitialPrompts =
- | LanguageModelMessage[]
- | LanguageModelMessageShorthand[];
+export type LanguageModelPrompt = LanguageModelMessage[];
export interface LanguageModelMessage {
role: LanguageModelMessageRole;
content: LanguageModelMessageContent[];
}
-export interface LanguageModelMessageShorthand {
- role: LanguageModelMessageRole;
- content: string;
-}
export interface LanguageModelMessageContent {
type: LanguageModelMessageType;
value: LanguageModelMessageContentValue;