|
33 | 33 |
|
34 | 34 | _import_structure = {
|
35 | 35 | "configuration_utils": ["ConfigMixin"],
|
| 36 | + "guiders": [], |
36 | 37 | "hooks": [],
|
37 | 38 | "loaders": ["FromOriginalModelMixin"],
|
38 | 39 | "models": [],
|
|
129 | 130 | _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
|
130 | 131 |
|
131 | 132 | else:
|
| 133 | + _import_structure["guiders"].extend( |
| 134 | + [ |
| 135 | + "AdaptiveProjectedGuidance", |
| 136 | + "AutoGuidance", |
| 137 | + "ClassifierFreeGuidance", |
| 138 | + "ClassifierFreeZeroStarGuidance", |
| 139 | + "SkipLayerGuidance", |
| 140 | + "SmoothedEnergyGuidance", |
| 141 | + "TangentialClassifierFreeGuidance", |
| 142 | + ] |
| 143 | + ) |
132 | 144 | _import_structure["hooks"].extend(
|
133 | 145 | [
|
134 | 146 | "FasterCacheConfig",
|
135 | 147 | "HookRegistry",
|
136 | 148 | "PyramidAttentionBroadcastConfig",
|
| 149 | + "LayerSkipConfig", |
| 150 | + "SmoothedEnergyGuidanceConfig", |
137 | 151 | "apply_faster_cache",
|
| 152 | + "apply_layer_skip", |
138 | 153 | "apply_pyramid_attention_broadcast",
|
139 | 154 | ]
|
140 | 155 | )
|
|
711 | 726 | except OptionalDependencyNotAvailable:
|
712 | 727 | from .utils.dummy_pt_objects import * # noqa F403
|
713 | 728 | else:
|
| 729 | + from .guiders import ( |
| 730 | + AdaptiveProjectedGuidance, |
| 731 | + AutoGuidance, |
| 732 | + ClassifierFreeGuidance, |
| 733 | + ClassifierFreeZeroStarGuidance, |
| 734 | + SkipLayerGuidance, |
| 735 | + SmoothedEnergyGuidance, |
| 736 | + TangentialClassifierFreeGuidance, |
| 737 | + ) |
714 | 738 | from .hooks import (
|
715 | 739 | FasterCacheConfig,
|
716 | 740 | HookRegistry,
|
| 741 | + LayerSkipConfig, |
717 | 742 | PyramidAttentionBroadcastConfig,
|
| 743 | + SmoothedEnergyGuidanceConfig, |
| 744 | + apply_layer_skip, |
718 | 745 | apply_faster_cache,
|
719 | 746 | apply_pyramid_attention_broadcast,
|
720 | 747 | )
|
|
0 commit comments