From 3b832231bb81024d80bbe31b7d7e51e07b633beb Mon Sep 17 00:00:00 2001
From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com>
Date: Thu, 15 Jan 2026 07:33:15 -0800
Subject: [PATCH 1/2] Flux2 Klein support. (#11890)
---
comfy/sd.py | 15 +++++++--
comfy/text_encoders/flux.py | 59 +++++++++++++++++++++++++++++++++++-
comfy/text_encoders/llama.py | 31 +++++++++++++++++++
3 files changed, 102 insertions(+), 3 deletions(-)
diff --git a/comfy/sd.py b/comfy/sd.py
index b689c0dfcbab..77700dfd3b50 100644
--- a/comfy/sd.py
+++ b/comfy/sd.py
@@ -1014,6 +1014,7 @@ class CLIPType(Enum):
KANDINSKY5 = 22
KANDINSKY5_IMAGE = 23
NEWBIE = 24
+ FLUX2 = 25
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
@@ -1046,6 +1047,7 @@ class TEModel(Enum):
QWEN3_2B = 17
GEMMA_3_12B = 18
JINA_CLIP_2 = 19
+ QWEN3_8B = 20
def detect_te_model(sd):
@@ -1089,6 +1091,8 @@ def detect_te_model(sd):
return TEModel.QWEN3_4B
elif weight.shape[0] == 2048:
return TEModel.QWEN3_2B
+ elif weight.shape[0] == 4096:
+ return TEModel.QWEN3_8B
if weight.shape[0] == 5120:
if "model.layers.39.post_attention_layernorm.weight" in sd:
return TEModel.MISTRAL3_24B
@@ -1214,11 +1218,18 @@ class EmptyClass:
clip_target.tokenizer = comfy.text_encoders.flux.Flux2Tokenizer
tokenizer_data["tekken_model"] = clip_data[0].get("tekken_model", None)
elif te_model == TEModel.QWEN3_4B:
- clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
- clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
+ if clip_type == CLIPType.FLUX or clip_type == CLIPType.FLUX2:
+ clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_4b")
+ clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer
+ else:
+ clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
+ clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
elif te_model == TEModel.QWEN3_2B:
clip_target.clip = comfy.text_encoders.ovis.te(**llama_detect(clip_data))
clip_target.tokenizer = comfy.text_encoders.ovis.OvisTokenizer
+ elif te_model == TEModel.QWEN3_8B:
+ clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_8b")
+ clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer8B
elif te_model == TEModel.JINA_CLIP_2:
clip_target.clip = comfy.text_encoders.jina_clip_2.JinaClip2TextModelWrapper
clip_target.tokenizer = comfy.text_encoders.jina_clip_2.JinaClip2TokenizerWrapper
diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py
index 21d93d757603..4075afca452b 100644
--- a/comfy/text_encoders/flux.py
+++ b/comfy/text_encoders/flux.py
@@ -3,7 +3,7 @@
import comfy.text_encoders.sd3_clip
import comfy.text_encoders.llama
import comfy.model_management
-from transformers import T5TokenizerFast, LlamaTokenizerFast
+from transformers import T5TokenizerFast, LlamaTokenizerFast, Qwen2Tokenizer
import torch
import os
import json
@@ -172,3 +172,60 @@ def __init__(self, device="cpu", dtype=None, model_options={}):
model_options["num_layers"] = 30
super().__init__(device=device, dtype=dtype, model_options=model_options)
return Flux2TEModel_
+
+class Qwen3Tokenizer(sd1_clip.SDTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}):
+ tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
+ super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2560, embedding_key='qwen3_4b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
+
+class Qwen3Tokenizer8B(sd1_clip.SDTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}):
+ tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
+ super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='qwen3_8b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
+
+class KleinTokenizer(sd1_clip.SD1Tokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_4b"):
+ if name == "qwen3_4b":
+ tokenizer = Qwen3Tokenizer
+ elif name == "qwen3_8b":
+ tokenizer = Qwen3Tokenizer8B
+
+ super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name, tokenizer=tokenizer)
+ self.llama_template = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n\n\n\n\n"
+
+ def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs):
+ if llama_template is None:
+ llama_text = self.llama_template.format(text)
+ else:
+ llama_text = llama_template.format(text)
+
+ tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs)
+ return tokens
+
+class KleinTokenizer8B(KleinTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_8b"):
+ super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name)
+
+class Qwen3_4BModel(sd1_clip.SDClipModel):
+ def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
+ super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
+
+class Qwen3_8BModel(sd1_clip.SDClipModel):
+ def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
+ super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_8B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
+
+def klein_te(dtype_llama=None, llama_quantization_metadata=None, model_type="qwen3_4b"):
+ if model_type == "qwen3_4b":
+ model = Qwen3_4BModel
+ elif model_type == "qwen3_8b":
+ model = Qwen3_8BModel
+
+ class Flux2TEModel_(Flux2TEModel):
+ def __init__(self, device="cpu", dtype=None, model_options={}):
+ if llama_quantization_metadata is not None:
+ model_options = model_options.copy()
+ model_options["quantization_metadata"] = llama_quantization_metadata
+ if dtype_llama is not None:
+ dtype = dtype_llama
+ super().__init__(device=device, dtype=dtype, name=model_type, model_options=model_options, clip_model=model)
+ return Flux2TEModel_
diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py
index 76731576b461..331a30f610a3 100644
--- a/comfy/text_encoders/llama.py
+++ b/comfy/text_encoders/llama.py
@@ -99,6 +99,28 @@ class Qwen3_4BConfig:
rope_scale = None
final_norm: bool = True
+@dataclass
+class Qwen3_8BConfig:
+ vocab_size: int = 151936
+ hidden_size: int = 4096
+ intermediate_size: int = 12288
+ num_hidden_layers: int = 36
+ num_attention_heads: int = 32
+ num_key_value_heads: int = 8
+ max_position_embeddings: int = 40960
+ rms_norm_eps: float = 1e-6
+ rope_theta: float = 1000000.0
+ transformer_type: str = "llama"
+ head_dim = 128
+ rms_norm_add = False
+ mlp_activation = "silu"
+ qkv_bias = False
+ rope_dims = None
+ q_norm = "gemma3"
+ k_norm = "gemma3"
+ rope_scale = None
+ final_norm: bool = True
+
@dataclass
class Ovis25_2BConfig:
vocab_size: int = 151936
@@ -628,6 +650,15 @@ def __init__(self, config_dict, dtype, device, operations):
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
+class Qwen3_8B(BaseLlama, torch.nn.Module):
+ def __init__(self, config_dict, dtype, device, operations):
+ super().__init__()
+ config = Qwen3_8BConfig(**config_dict)
+ self.num_layers = config.num_hidden_layers
+
+ self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
+ self.dtype = dtype
+
class Ovis25_2B(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
From 8f40b43e0204d5b9780f3e9618e140e929e80594 Mon Sep 17 00:00:00 2001
From: comfyanonymous
Date: Thu, 15 Jan 2026 10:57:35 -0500
Subject: [PATCH 2/2] ComfyUI v0.9.2
---
comfyui_version.py | 2 +-
pyproject.toml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/comfyui_version.py b/comfyui_version.py
index 0c9871e3560c..dbb57b4e55a2 100644
--- a/comfyui_version.py
+++ b/comfyui_version.py
@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
-__version__ = "0.9.1"
+__version__ = "0.9.2"
diff --git a/pyproject.toml b/pyproject.toml
index dc52218b4df8..9ea73da05c3b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
-version = "0.9.1"
+version = "0.9.2"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"