@@ -32,24 +32,13 @@ class Snippets {
32
32
let vertex = VertexAI . vertexAI ( )
33
33
34
34
// Initialize the generative model with a model that supports your use case
35
- // Gemini 1.5 Pro is versatile and can accept both text-only or multimodal prompt inputs
36
- let model = vertex. generativeModel ( modelName: " gemini-1.5-pro-preview-0409 " )
35
+ // Gemini 1.5 models are versatile and can be used with all API capabilities
36
+ let model = vertex. generativeModel ( modelName: " {{generic_model_name_initialization}} " )
37
37
// [END initialize_model]
38
38
39
39
self . model = model
40
40
}
41
41
42
- func templateInitializeModel( ) {
43
- // [START template_initialize_model]
44
- // Initialize the Vertex AI service
45
- let vertex = VertexAI . vertexAI ( )
46
-
47
- // Initialize the generative model with a model that supports your use case
48
- // Gemini 1.5 Pro is versatile and can accept both text-only or multimodal prompt inputs
49
- let model = vertex. generativeModel ( modelName: " {{generic_model_name_initialization}} " )
50
- // [END template_initialize_model]
51
- }
52
-
53
42
func configureModel( ) {
54
43
let vertex = VertexAI . vertexAI ( )
55
44
@@ -63,7 +52,7 @@ class Snippets {
63
52
)
64
53
65
54
let model = vertex. generativeModel (
66
- modelName: " {{ '<var>MODEL_NAME</var>' }} " ,
55
+ modelName: " {{generic_model_name_initialization }} " ,
67
56
generationConfig: config
68
57
)
69
58
// [END configure_model]
@@ -74,7 +63,7 @@ class Snippets {
74
63
75
64
// [START safety_settings]
76
65
let model = vertex. generativeModel (
77
- modelName: " {{ '<var>MODEL_NAME</var>' }} " ,
66
+ modelName: " {{generic_model_name_initialization }} " ,
78
67
safetySettings: [
79
68
SafetySetting ( harmCategory: . harassment, threshold: . blockOnlyHigh)
80
69
]
@@ -90,42 +79,14 @@ class Snippets {
90
79
let hateSpeechSafety = SafetySetting ( harmCategory: . hateSpeech, threshold: . blockMediumAndAbove)
91
80
92
81
let model = vertex. generativeModel (
93
- modelName: " {{ '<var>MODEL_NAME</var>' }} " ,
82
+ modelName: " {{generic_model_name_initialization }} " ,
94
83
safetySettings: [ harassmentSafety, hateSpeechSafety]
95
84
)
96
85
// [END multi_safety_settings]
97
86
}
98
87
99
- func callGemini( ) async throws {
100
- // [START call_gemini]
101
- // Provide a prompt that contains text
102
- let prompt = " Write a story about a magic backpack. "
103
-
104
- // To generate text output, call generateContent with the text input
105
- let response = try await model. generateContent ( prompt)
106
- if let text = response. text {
107
- print ( text)
108
- }
109
- // [END call_gemini]
110
- }
111
-
112
- func callGeminiStreaming( ) async throws {
113
- // [START call_gemini_streaming]
114
- // Provide a prompt that contains text
115
- let prompt = " Write a story about a magic backpack. "
116
-
117
- // To stream generated text output, call generateContentStream with the text input
118
- let contentStream = model. generateContentStream ( prompt)
119
- for try await chunk in contentStream {
120
- if let text = chunk. text {
121
- print ( text)
122
- }
123
- }
124
- // [END call_gemini_streaming]
125
- }
126
-
127
88
func sendTextOnlyPromptStreaming( ) async throws {
128
- // [START text_only_prompt_streaming ]
89
+ // [START text_gen_text_only_prompt_streaming ]
129
90
// Provide a prompt that contains text
130
91
let prompt = " Write a story about a magic backpack. "
131
92
@@ -136,12 +97,11 @@ class Snippets {
136
97
print ( text)
137
98
}
138
99
}
139
- // [END text_only_prompt_streaming ]
100
+ // [START text_gen_text_only_prompt_streaming ]
140
101
}
141
102
142
- // Note: This is the same as the call gemini prompt, but may change in the future.
143
103
func sendTextOnlyPromt( ) async throws {
144
- // [START text_only_prompt ]
104
+ // [START text_gen_text_only_prompt ]
145
105
// Provide a prompt that contains text
146
106
let prompt = " Write a story about a magic backpack. "
147
107
@@ -150,11 +110,11 @@ class Snippets {
150
110
if let text = response. text {
151
111
print ( text)
152
112
}
153
- // [END text_only_prompt ]
113
+ // [START text_gen_text_only_prompt ]
154
114
}
155
115
156
116
func sendMultimodalPromptStreaming( ) async throws {
157
- // [START multimodal_prompt_streaming ]
117
+ // [START text_gen_multimodal_one_image_prompt_streaming ]
158
118
#if canImport(UIKit)
159
119
guard let image = UIImage ( named: " image " ) else { fatalError ( ) }
160
120
#else
@@ -171,11 +131,11 @@ class Snippets {
171
131
print ( text)
172
132
}
173
133
}
174
- // [END multimodal_prompt_streaming ]
134
+ // [END text_gen_multimodal_one_image_prompt_streaming ]
175
135
}
176
136
177
137
func sendMultimodalPrompt( ) async throws {
178
- // [START multimodal_prompt ]
138
+ // [START text_gen_multimodal_one_image_prompt ]
179
139
// Provide a text prompt to include with the image
180
140
#if canImport(UIKit)
181
141
guard let image = UIImage ( named: " image " ) else { fatalError ( ) }
@@ -190,11 +150,11 @@ class Snippets {
190
150
if let text = response. text {
191
151
print ( text)
192
152
}
193
- // [END multimodal_prompt ]
153
+ // [END text_gen_multimodal_one_image_prompt ]
194
154
}
195
155
196
156
func multiImagePromptStreaming( ) async throws {
197
- // [START two_image_prompt_streaming ]
157
+ // [START text_gen_multimodal_multi_image_prompt_streaming ]
198
158
#if canImport(UIKit)
199
159
guard let image1 = UIImage ( named: " image1 " ) else { fatalError ( ) }
200
160
guard let image2 = UIImage ( named: " image2 " ) else { fatalError ( ) }
@@ -213,11 +173,11 @@ class Snippets {
213
173
print ( text)
214
174
}
215
175
}
216
- // [END two_image_prompt_streaming ]
176
+ // [END text_gen_multimodal_multi_image_prompt_streaming ]
217
177
}
218
178
219
179
func multiImagePrompt( ) async throws {
220
- // [START two_image_prompt ]
180
+ // [START text_gen_multimodal_multi_image_prompt ]
221
181
#if canImport(UIKit)
222
182
guard let image1 = UIImage ( named: " image1 " ) else { fatalError ( ) }
223
183
guard let image2 = UIImage ( named: " image2 " ) else { fatalError ( ) }
@@ -234,12 +194,12 @@ class Snippets {
234
194
if let text = response. text {
235
195
print ( text)
236
196
}
237
- // [END two_image_prompt ]
197
+ // [END text_gen_multimodal_multi_image_prompt ]
238
198
}
239
199
240
200
func textAndVideoPrompt( ) async throws {
241
- // [START text_video_prompt ]
242
- guard let fileURL = Bundle . main. url ( forResource: " sample " ,
201
+ // [START text_gen_multimodal_video_prompt ]
202
+ guard let fileURL = Bundle . main. url ( forResource: " sample " ,
243
203
withExtension: " mp4 " ) else { fatalError ( ) }
244
204
let video = try Data ( contentsOf: fileURL)
245
205
let prompt = " What's in this video? "
@@ -250,11 +210,11 @@ class Snippets {
250
210
if let text = response. text {
251
211
print ( text)
252
212
}
253
- // [END text_video_prompt ]
213
+ // [END text_gen_multimodal_video_prompt ]
254
214
}
255
215
256
216
func textAndVideoPromptStreaming( ) async throws {
257
- // [START text_video_prompt_streaming ]
217
+ // [START text_gen_multimodal_video_prompt_streaming ]
258
218
guard let fileURL = Bundle . main. url ( forResource: " sample " ,
259
219
withExtension: " mp4 " ) else { fatalError ( ) }
260
220
let video = try Data ( contentsOf: fileURL)
@@ -268,7 +228,7 @@ class Snippets {
268
228
print ( text)
269
229
}
270
230
}
271
- // [END text_video_prompt_streaming ]
231
+ // [END text_gen_multimodal_video_prompt_streaming ]
272
232
}
273
233
274
234
func chatStreaming( ) async throws {
@@ -360,26 +320,26 @@ class Snippets {
360
320
}
361
321
362
322
func setSafetySetting( ) {
363
- // [START set_safety_setting ]
323
+ // [START set_one_safety_setting ]
364
324
let model = VertexAI . vertexAI ( ) . generativeModel (
365
- modelName: " MODEL_NAME " ,
325
+ modelName: " {{generic_model_name_initialization}} " ,
366
326
safetySettings: [
367
327
SafetySetting ( harmCategory: . harassment, threshold: . blockOnlyHigh)
368
328
]
369
329
)
370
- // [END set_safety_setting ]
330
+ // [END set_one_safety_setting ]
371
331
}
372
332
373
333
func setMultipleSafetySettings( ) {
374
- // [START set_safety_settings ]
334
+ // [START set_multi_safety_settings ]
375
335
let harassmentSafety = SafetySetting ( harmCategory: . harassment, threshold: . blockOnlyHigh)
376
336
let hateSpeechSafety = SafetySetting ( harmCategory: . hateSpeech, threshold: . blockMediumAndAbove)
377
337
378
338
let model = VertexAI . vertexAI ( ) . generativeModel (
379
- modelName: " MODEL_NAME " ,
339
+ modelName: " {{generic_model_name_initialization}} " ,
380
340
safetySettings: [ harassmentSafety, hateSpeechSafety]
381
341
)
382
- // [END set_safety_settings ]
342
+ // [END set_multi_safety_settings ]
383
343
}
384
344
385
345
// MARK: - Function Calling
@@ -421,7 +381,7 @@ class Snippets {
421
381
// Initialize the generative model
422
382
// Use a model that supports function calling, like Gemini 1.0 Pro.
423
383
let model = vertex. generativeModel (
424
- modelName: " gemini-1.0-pro " ,
384
+ modelName: " {{generic_model_name_initialization}} " ,
425
385
// Specify the function declaration.
426
386
tools: [ Tool ( functionDeclarations: [ getExchangeRate] ) ]
427
387
)
@@ -483,7 +443,7 @@ class Snippets {
483
443
// [START function_modes]
484
444
let model = VertexAI . vertexAI ( ) . generativeModel (
485
445
// Setting a function calling mode is only available in Gemini 1.5 Pro
486
- modelName: " gemini-1.5-pro-preview-0409 " ,
446
+ modelName: " {{generic_model_name_initialization}} " ,
487
447
// Pass the function declaration
488
448
tools: [ Tool ( functionDeclarations: [ getExchangeRate] ) ] ,
489
449
toolConfig: ToolConfig (
0 commit comments