@@ -1328,7 +1328,9 @@ def load_lora_into_transformer(
1328
1328
adapter_name (`str`, *optional*):
1329
1329
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1330
1330
`default_{i}` where i is the total number of adapters being loaded.
1331
- metadata: TODO
1331
+ metadata (`dict`):
1332
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
1333
+ from the state dict.
1332
1334
low_cpu_mem_usage (`bool`, *optional*):
1333
1335
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
1334
1336
weights.
@@ -1762,7 +1764,9 @@ def load_lora_into_transformer(
1762
1764
adapter_name (`str`, *optional*):
1763
1765
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1764
1766
`default_{i}` where i is the total number of adapters being loaded.
1765
- metadata: TODO
1767
+ metadata (`dict`):
1768
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
1769
+ from the state dict.
1766
1770
low_cpu_mem_usage (`bool`, *optional*):
1767
1771
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
1768
1772
weights.
@@ -2215,7 +2219,9 @@ def load_lora_into_transformer(
2215
2219
adapter_name (`str`, *optional*):
2216
2220
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
2217
2221
`default_{i}` where i is the total number of adapters being loaded.
2218
- metadata: TODO
2222
+ metadata (`dict`):
2223
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
2224
+ from the state dict.
2219
2225
low_cpu_mem_usage (`bool`, *optional*):
2220
2226
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
2221
2227
weights.
@@ -2812,7 +2818,9 @@ def load_lora_into_transformer(
2812
2818
adapter_name (`str`, *optional*):
2813
2819
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
2814
2820
`default_{i}` where i is the total number of adapters being loaded.
2815
- metadata: TODO
2821
+ metadata (`dict`):
2822
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
2823
+ from the state dict.
2816
2824
low_cpu_mem_usage (`bool`, *optional*):
2817
2825
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
2818
2826
weights.
@@ -3141,7 +3149,9 @@ def load_lora_into_transformer(
3141
3149
adapter_name (`str`, *optional*):
3142
3150
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
3143
3151
`default_{i}` where i is the total number of adapters being loaded.
3144
- metadata: TODO
3152
+ metadata (`dict`):
3153
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
3154
+ from the state dict.
3145
3155
low_cpu_mem_usage (`bool`, *optional*):
3146
3156
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
3147
3157
weights.
@@ -3479,7 +3489,9 @@ def load_lora_into_transformer(
3479
3489
adapter_name (`str`, *optional*):
3480
3490
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
3481
3491
`default_{i}` where i is the total number of adapters being loaded.
3482
- metadata: TODO
3492
+ metadata (`dict`):
3493
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
3494
+ from the state dict.
3483
3495
low_cpu_mem_usage (`bool`, *optional*):
3484
3496
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
3485
3497
weights.
@@ -3821,7 +3833,9 @@ def load_lora_into_transformer(
3821
3833
adapter_name (`str`, *optional*):
3822
3834
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
3823
3835
`default_{i}` where i is the total number of adapters being loaded.
3824
- metadata: TODO
3836
+ metadata (`dict`):
3837
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
3838
+ from the state dict.
3825
3839
low_cpu_mem_usage (`bool`, *optional*):
3826
3840
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
3827
3841
weights.
@@ -4161,7 +4175,9 @@ def load_lora_into_transformer(
4161
4175
adapter_name (`str`, *optional*):
4162
4176
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
4163
4177
`default_{i}` where i is the total number of adapters being loaded.
4164
- metadata: TODO
4178
+ metadata (`dict`):
4179
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
4180
+ from the state dict.
4165
4181
low_cpu_mem_usage (`bool`, *optional*):
4166
4182
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
4167
4183
weights.
@@ -4503,7 +4519,9 @@ def load_lora_into_transformer(
4503
4519
adapter_name (`str`, *optional*):
4504
4520
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
4505
4521
`default_{i}` where i is the total number of adapters being loaded.
4506
- metadata: TODO
4522
+ metadata (`dict`):
4523
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
4524
+ from the state dict.
4507
4525
low_cpu_mem_usage (`bool`, *optional*):
4508
4526
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
4509
4527
weights.
@@ -4846,7 +4864,9 @@ def load_lora_into_transformer(
4846
4864
adapter_name (`str`, *optional*):
4847
4865
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
4848
4866
`default_{i}` where i is the total number of adapters being loaded.
4849
- metadata: TODO
4867
+ metadata (`dict`):
4868
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
4869
+ from the state dict.
4850
4870
low_cpu_mem_usage (`bool`, *optional*):
4851
4871
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
4852
4872
weights.
@@ -5239,7 +5259,9 @@ def load_lora_into_transformer(
5239
5259
adapter_name (`str`, *optional*):
5240
5260
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
5241
5261
`default_{i}` where i is the total number of adapters being loaded.
5242
- metadata: TODO
5262
+ metadata (`dict`):
5263
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
5264
+ from the state dict.
5243
5265
low_cpu_mem_usage (`bool`, *optional*):
5244
5266
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
5245
5267
weights.
@@ -5579,7 +5601,9 @@ def load_lora_into_transformer(
5579
5601
adapter_name (`str`, *optional*):
5580
5602
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
5581
5603
`default_{i}` where i is the total number of adapters being loaded.
5582
- metadata: TODO
5604
+ metadata (`dict`):
5605
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
5606
+ from the state dict.
5583
5607
low_cpu_mem_usage (`bool`, *optional*):
5584
5608
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
5585
5609
weights.
@@ -5921,7 +5945,9 @@ def load_lora_into_transformer(
5921
5945
adapter_name (`str`, *optional*):
5922
5946
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
5923
5947
`default_{i}` where i is the total number of adapters being loaded.
5924
- metadata: TODO
5948
+ metadata (`dict`):
5949
+ Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
5950
+ from the state dict.
5925
5951
low_cpu_mem_usage (`bool`, *optional*):
5926
5952
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
5927
5953
weights.
0 commit comments