6
6
import comfy .sd
7
7
import folder_paths
8
8
import nodes
9
- from comfy_api .v3 import io , resources
9
+ from comfy_api .v3 import io
10
10
from comfy_extras .v3 .nodes_slg import SkipLayerGuidanceDiT
11
11
12
12
@@ -104,7 +104,7 @@ def define_schema(cls):
104
104
105
105
@classmethod
106
106
def execute (cls , model , layers : str , scale : float , start_percent : float , end_percent : float ):
107
- return SkipLayerGuidanceDiT .execute (
107
+ return super () .execute (
108
108
model = model , scale = scale , start_percent = start_percent , end_percent = end_percent , double_layers = layers
109
109
)
110
110
@@ -128,16 +128,14 @@ def define_schema(cls):
128
128
129
129
@classmethod
130
130
def execute (cls , clip_name1 : str , clip_name2 : str , clip_name3 : str ):
131
- clip_data = [
132
- cls .resources .get (resources .TorchDictFolderFilename ("text_encoders" , clip_name1 )),
133
- cls .resources .get (resources .TorchDictFolderFilename ("text_encoders" , clip_name2 )),
134
- cls .resources .get (resources .TorchDictFolderFilename ("text_encoders" , clip_name3 )),
135
- ]
136
- return io .NodeOutput (
137
- comfy .sd .load_text_encoder_state_dicts (
138
- clip_data , embedding_directory = folder_paths .get_folder_paths ("embeddings" )
139
- )
131
+ clip_path1 = folder_paths .get_full_path_or_raise ("text_encoders" , clip_name1 )
132
+ clip_path2 = folder_paths .get_full_path_or_raise ("text_encoders" , clip_name2 )
133
+ clip_path3 = folder_paths .get_full_path_or_raise ("text_encoders" , clip_name3 )
134
+ clip = comfy .sd .load_clip (
135
+ ckpt_paths = [clip_path1 , clip_path2 , clip_path3 ],
136
+ embedding_directory = folder_paths .get_folder_paths ("embeddings" ),
140
137
)
138
+ return io .NodeOutput (clip )
141
139
142
140
NODES_LIST = [
143
141
CLIPTextEncodeSD3 ,
0 commit comments