From 2c1d06a4e32900c260bcb3d0888f20edc1e3e5ab Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 16 Feb 2026 03:22:30 +0200 Subject: [PATCH 1/4] feat(api-nodes): add Bria RMBG nodes (#12465) Co-authored-by: Jedrzej Kosinski --- comfy_api_nodes/apis/bria.py | 44 +++++- comfy_api_nodes/nodes_bria.py | 220 ++++++++++++++++++++++------ comfy_api_nodes/util/conversions.py | 2 +- 3 files changed, 218 insertions(+), 48 deletions(-) diff --git a/comfy_api_nodes/apis/bria.py b/comfy_api_nodes/apis/bria.py index 9119cacc6faa..8c496b56c2de 100644 --- a/comfy_api_nodes/apis/bria.py +++ b/comfy_api_nodes/apis/bria.py @@ -45,17 +45,55 @@ class BriaEditImageRequest(BaseModel): ) +class BriaRemoveBackgroundRequest(BaseModel): + image: str = Field(...) + sync: bool = Field(False) + visual_input_content_moderation: bool = Field( + False, description="If true, returns 422 on input image moderation failure." + ) + visual_output_content_moderation: bool = Field( + False, description="If true, returns 422 on visual output moderation failure." + ) + seed: int = Field(...) + + class BriaStatusResponse(BaseModel): request_id: str = Field(...) status_url: str = Field(...) warning: str | None = Field(None) -class BriaResult(BaseModel): +class BriaRemoveBackgroundResult(BaseModel): + image_url: str = Field(...) + + +class BriaRemoveBackgroundResponse(BaseModel): + status: str = Field(...) + result: BriaRemoveBackgroundResult | None = Field(None) + + +class BriaImageEditResult(BaseModel): structured_prompt: str = Field(...) image_url: str = Field(...) -class BriaResponse(BaseModel): +class BriaImageEditResponse(BaseModel): + status: str = Field(...) + result: BriaImageEditResult | None = Field(None) + + +class BriaRemoveVideoBackgroundRequest(BaseModel): + video: str = Field(...) + background_color: str = Field(default="transparent", description="Background color for the output video.") + output_container_and_codec: str = Field(...) + preserve_audio: bool = Field(True) + seed: int = Field(...) + + +class BriaRemoveVideoBackgroundResult(BaseModel): + video_url: str = Field(...) + + +class BriaRemoveVideoBackgroundResponse(BaseModel): status: str = Field(...) - result: BriaResult | None = Field(None) + result: BriaRemoveVideoBackgroundResult | None = Field(None) diff --git a/comfy_api_nodes/nodes_bria.py b/comfy_api_nodes/nodes_bria.py index d3a52bc1ba4d..4044ee3ead0c 100644 --- a/comfy_api_nodes/nodes_bria.py +++ b/comfy_api_nodes/nodes_bria.py @@ -3,7 +3,11 @@ from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.bria import ( BriaEditImageRequest, - BriaResponse, + BriaRemoveBackgroundRequest, + BriaRemoveBackgroundResponse, + BriaRemoveVideoBackgroundRequest, + BriaRemoveVideoBackgroundResponse, + BriaImageEditResponse, BriaStatusResponse, InputModerationSettings, ) @@ -11,10 +15,12 @@ ApiEndpoint, convert_mask_to_image, download_url_to_image_tensor, - get_number_of_images, + download_url_to_video_output, poll_op, sync_op, - upload_images_to_comfyapi, + upload_image_to_comfyapi, + upload_video_to_comfyapi, + validate_video_duration, ) @@ -73,21 +79,15 @@ def define_schema(cls): IO.DynamicCombo.Input( "moderation", options=[ + IO.DynamicCombo.Option("false", []), IO.DynamicCombo.Option( "true", [ - IO.Boolean.Input( - "prompt_content_moderation", default=False - ), - IO.Boolean.Input( - "visual_input_moderation", default=False - ), - IO.Boolean.Input( - "visual_output_moderation", default=True - ), + IO.Boolean.Input("prompt_content_moderation", default=False), + IO.Boolean.Input("visual_input_moderation", default=False), + IO.Boolean.Input("visual_output_moderation", default=True), ], ), - IO.DynamicCombo.Option("false", []), ], tooltip="Moderation settings", ), @@ -127,50 +127,26 @@ async def execute( mask: Input.Image | None = None, ) -> IO.NodeOutput: if not prompt and not structured_prompt: - raise ValueError( - "One of prompt or structured_prompt is required to be non-empty." - ) - if get_number_of_images(image) != 1: - raise ValueError("Exactly one input image is required.") + raise ValueError("One of prompt or structured_prompt is required to be non-empty.") mask_url = None if mask is not None: - mask_url = ( - await upload_images_to_comfyapi( - cls, - convert_mask_to_image(mask), - max_images=1, - mime_type="image/png", - wait_label="Uploading mask", - ) - )[0] + mask_url = await upload_image_to_comfyapi(cls, convert_mask_to_image(mask), wait_label="Uploading mask") response = await sync_op( cls, ApiEndpoint(path="proxy/bria/v2/image/edit", method="POST"), data=BriaEditImageRequest( instruction=prompt if prompt else None, structured_instruction=structured_prompt if structured_prompt else None, - images=await upload_images_to_comfyapi( - cls, - image, - max_images=1, - mime_type="image/png", - wait_label="Uploading image", - ), + images=[await upload_image_to_comfyapi(cls, image, wait_label="Uploading image")], mask=mask_url, negative_prompt=negative_prompt if negative_prompt else None, guidance_scale=guidance_scale, seed=seed, model_version=model, steps_num=steps, - prompt_content_moderation=moderation.get( - "prompt_content_moderation", False - ), - visual_input_content_moderation=moderation.get( - "visual_input_moderation", False - ), - visual_output_content_moderation=moderation.get( - "visual_output_moderation", False - ), + prompt_content_moderation=moderation.get("prompt_content_moderation", False), + visual_input_content_moderation=moderation.get("visual_input_moderation", False), + visual_output_content_moderation=moderation.get("visual_output_moderation", False), ), response_model=BriaStatusResponse, ) @@ -178,7 +154,7 @@ async def execute( cls, ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), status_extractor=lambda r: r.status, - response_model=BriaResponse, + response_model=BriaImageEditResponse, ) return IO.NodeOutput( await download_url_to_image_tensor(response.result.image_url), @@ -186,11 +162,167 @@ async def execute( ) +class BriaRemoveImageBackground(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="BriaRemoveImageBackground", + display_name="Bria Remove Image Background", + category="api node/image/Bria", + description="Remove the background from an image using Bria RMBG 2.0.", + inputs=[ + IO.Image.Input("image"), + IO.DynamicCombo.Input( + "moderation", + options=[ + IO.DynamicCombo.Option("false", []), + IO.DynamicCombo.Option( + "true", + [ + IO.Boolean.Input("visual_input_moderation", default=False), + IO.Boolean.Input("visual_output_moderation", default=True), + ], + ), + ], + tooltip="Moderation settings", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed controls whether the node should re-run; " + "results are non-deterministic regardless of seed.", + ), + ], + outputs=[IO.Image.Output()], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + price_badge=IO.PriceBadge( + expr="""{"type":"usd","usd":0.018}""", + ), + ) + + @classmethod + async def execute( + cls, + image: Input.Image, + moderation: dict, + seed: int, + ) -> IO.NodeOutput: + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bria/v2/image/edit/remove_background", method="POST"), + data=BriaRemoveBackgroundRequest( + image=await upload_image_to_comfyapi(cls, image, wait_label="Uploading image"), + sync=False, + visual_input_content_moderation=moderation.get("visual_input_moderation", False), + visual_output_content_moderation=moderation.get("visual_output_moderation", False), + seed=seed, + ), + response_model=BriaStatusResponse, + ) + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), + status_extractor=lambda r: r.status, + response_model=BriaRemoveBackgroundResponse, + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result.image_url)) + + +class BriaRemoveVideoBackground(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="BriaRemoveVideoBackground", + display_name="Bria Remove Video Background", + category="api node/video/Bria", + description="Remove the background from a video using Bria. ", + inputs=[ + IO.Video.Input("video"), + IO.Combo.Input( + "background_color", + options=[ + "Black", + "White", + "Gray", + "Red", + "Green", + "Blue", + "Yellow", + "Cyan", + "Magenta", + "Orange", + ], + tooltip="Background color for the output video.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed controls whether the node should re-run; " + "results are non-deterministic regardless of seed.", + ), + ], + outputs=[IO.Video.Output()], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + price_badge=IO.PriceBadge( + expr="""{"type":"usd","usd":0.14,"format":{"suffix":"/second"}}""", + ), + ) + + @classmethod + async def execute( + cls, + video: Input.Video, + background_color: str, + seed: int, + ) -> IO.NodeOutput: + validate_video_duration(video, max_duration=60.0) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bria/v2/video/edit/remove_background", method="POST"), + data=BriaRemoveVideoBackgroundRequest( + video=await upload_video_to_comfyapi(cls, video), + background_color=background_color, + output_container_and_codec="mp4_h264", + seed=seed, + ), + response_model=BriaStatusResponse, + ) + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), + status_extractor=lambda r: r.status, + response_model=BriaRemoveVideoBackgroundResponse, + ) + return IO.NodeOutput(await download_url_to_video_output(response.result.video_url)) + + class BriaExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ BriaImageEditNode, + BriaRemoveImageBackground, + BriaRemoveVideoBackground, ] diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index 3e37e8a8c491..82b6d22a513d 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -57,7 +57,7 @@ def tensor_to_bytesio( image: torch.Tensor, *, total_pixels: int | None = 2048 * 2048, - mime_type: str = "image/png", + mime_type: str | None = "image/png", ) -> BytesIO: """Converts a torch.Tensor image to a named BytesIO object. From ecd2a19661ecccd96e26f111af21781f3e613f59 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Sun, 15 Feb 2026 17:28:51 -0800 Subject: [PATCH 2/4] Fix lora Extraction in offload conditions (+ dynamic_vram mode) (#12479) * lora_extract: Add a trange If you bite off more than your GPU can chew, this kinda just hangs. Give a rough indication of progress counting the weights in a trange. * lora_extract: Support on-the-fly patching Use the on-the-fly approach from the regular model saving logic for lora extraction too. Switch off force_cast_weights accordingly. This gets extraction working in dynamic vram while also supporting extraction on GPU offloaded. --- comfy_extras/nodes_lora_extract.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index fb89e03f46ec..1542d0a882ff 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -7,6 +7,7 @@ from enum import Enum from typing_extensions import override from comfy_api.latest import ComfyExtension, io +from tqdm.auto import trange CLAMP_QUANTILE = 0.99 @@ -49,12 +50,22 @@ class LORAType(Enum): "full_diff": LORAType.FULL_DIFF} def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, bias_diff=False): - comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) + comfy.model_management.load_models_gpu([model_diff]) sd = model_diff.model_state_dict(filter_prefix=prefix_model) - for k in sd: - if k.endswith(".weight"): + sd_keys = list(sd.keys()) + for index in trange(len(sd_keys), unit="weight"): + k = sd_keys[index] + op_keys = sd_keys[index].rsplit('.', 1) + if len(op_keys) < 2 or op_keys[1] not in ["weight", "bias"] or (op_keys[1] == "bias" and not bias_diff): + continue + op = comfy.utils.get_attr(model_diff.model, op_keys[0]) + if hasattr(op, "comfy_cast_weights") and not getattr(op, "comfy_patched_weights", False): + weight_diff = model_diff.patch_weight_to_device(k, model_diff.load_device, return_weight=True) + else: weight_diff = sd[k] + + if op_keys[1] == "weight": if lora_type == LORAType.STANDARD: if weight_diff.ndim < 2: if bias_diff: @@ -69,8 +80,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora elif lora_type == LORAType.FULL_DIFF: output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() - elif bias_diff and k.endswith(".bias"): - output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu() + elif bias_diff and op_keys[1] == "bias": + output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = weight_diff.contiguous().half().cpu() return output_sd class LoraSave(io.ComfyNode): From c0370044cd467b92f4db63b88029ebc700388d36 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Sun, 15 Feb 2026 17:30:09 -0800 Subject: [PATCH 3/4] MPDynamic: force load flux img_in weight (Fixes flux1 canny+depth lora crash) (#12446) * lora: add weight shape calculations. This lets the loader know if a lora will change the shape of a weight so it can take appropriate action. * MPDynamic: force load flux img_in weight This weight is a bit special, in that the lora changes its geometry. This is rather unique, not handled by existing estimate and doesn't work for either offloading or dynamic_vram. Fix for dynamic_vram as a special case. Ideally we can fully precalculate these lora geometry changes at load time, but just get these models working first. --- comfy/lora.py | 25 +++++++++++++++++++++++++ comfy/model_patcher.py | 35 +++++++++++++++++++++++++++-------- comfy/weight_adapter/base.py | 6 ++++++ comfy/weight_adapter/lora.py | 7 +++++++ 4 files changed, 65 insertions(+), 8 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index 44030bcab6bd..279cf38bbb5c 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -374,6 +374,31 @@ def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Ten return padded_tensor +def calculate_shape(patches, weight, key, original_weights=None): + current_shape = weight.shape + + for p in patches: + v = p[1] + offset = p[3] + + # Offsets restore the old shape; lists force a diff without metadata + if offset is not None or isinstance(v, list): + continue + + if isinstance(v, weight_adapter.WeightAdapterBase): + adapter_shape = v.calculate_shape(key) + if adapter_shape is not None: + current_shape = adapter_shape + continue + + # Standard diff logic with padding + if len(v) == 2: + patch_type, patch_data = v[0], v[1] + if patch_type == "diff" and len(patch_data) > 1 and patch_data[1]['pad_weight']: + current_shape = patch_data[0].shape + + return current_shape + def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, original_weights=None): for p in patches: strength = p[0] diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 67dce088ebe5..f01818f50c2d 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -1514,8 +1514,10 @@ def setup_param(self, m, n, param_key): weight, _, _ = get_key_weight(self.model, key) if weight is None: - return 0 + return (False, 0) if key in self.patches: + if comfy.lora.calculate_shape(self.patches[key], weight, key) != weight.shape: + return (True, 0) setattr(m, param_key + "_lowvram_function", LowVramPatch(key, self.patches)) num_patches += 1 else: @@ -1529,7 +1531,13 @@ def setup_param(self, m, n, param_key): model_dtype = getattr(m, param_key + "_comfy_model_dtype", None) or weight.dtype weight._model_dtype = model_dtype geometry = comfy.memory_management.TensorGeometry(shape=weight.shape, dtype=model_dtype) - return comfy.memory_management.vram_aligned_size(geometry) + return (False, comfy.memory_management.vram_aligned_size(geometry)) + + def force_load_param(self, param_key, device_to): + key = key_param_name_to_key(n, param_key) + if key in self.backup: + comfy.utils.set_attr_param(self.model, key, self.backup[key].weight) + self.patch_weight_to_device(key, device_to=device_to) if hasattr(m, "comfy_cast_weights"): m.comfy_cast_weights = True @@ -1537,13 +1545,19 @@ def setup_param(self, m, n, param_key): m.seed_key = n set_dirty(m, dirty) - v_weight_size = 0 - v_weight_size += setup_param(self, m, n, "weight") - v_weight_size += setup_param(self, m, n, "bias") + force_load, v_weight_size = setup_param(self, m, n, "weight") + force_load_bias, v_weight_bias = setup_param(self, m, n, "bias") + force_load = force_load or force_load_bias + v_weight_size += v_weight_bias - if vbar is not None and not hasattr(m, "_v"): - m._v = vbar.alloc(v_weight_size) - allocated_size += v_weight_size + if force_load: + logging.info(f"Module {n} has resizing Lora - force loading") + force_load_param(self, "weight", device_to) + force_load_param(self, "bias", device_to) + else: + if vbar is not None and not hasattr(m, "_v"): + m._v = vbar.alloc(v_weight_size) + allocated_size += v_weight_size else: for param in params: @@ -1606,6 +1620,11 @@ def unpatch_model(self, device_to=None, unpatch_weights=True): for m in self.model.modules(): move_weight_functions(m, device_to) + keys = list(self.backup.keys()) + for k in keys: + bk = self.backup[k] + comfy.utils.set_attr_param(self.model, k, bk.weight) + def partially_load(self, device_to, extra_memory=0, force_patch_weights=False): assert not force_patch_weights #See above with self.use_ejected(skip_and_inject_on_exit_only=True): diff --git a/comfy/weight_adapter/base.py b/comfy/weight_adapter/base.py index bce89a0e23e9..d352e066ba8e 100644 --- a/comfy/weight_adapter/base.py +++ b/comfy/weight_adapter/base.py @@ -49,6 +49,12 @@ def create_train(cls, weight, *args) -> "WeightAdapterTrainBase": """ raise NotImplementedError + def calculate_shape( + self, + key + ): + return None + def calculate_weight( self, weight, diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index bc4260a8f8bb..8e1261a123d5 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -214,6 +214,13 @@ def load( else: return None + def calculate_shape( + self, + key + ): + reshape = self.weights[5] + return tuple(reshape) if reshape is not None else None + def calculate_weight( self, weight, From 88e6370527dbd602851de07d957a8f17b3ca9447 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 15 Feb 2026 17:43:53 -0800 Subject: [PATCH 4/4] Remove workaround for old pytorch. (#12480) --- comfy/ldm/modules/diffusionmodules/model.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 5a22ef030e35..805592aa5c01 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -102,19 +102,7 @@ def forward(self, x): return self.conv(x) def interpolate_up(x, scale_factor): - try: - return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest") - except: #operation not implemented for bf16 - orig_shape = list(x.shape) - out_shape = orig_shape[:2] - for i in range(len(orig_shape) - 2): - out_shape.append(round(orig_shape[i + 2] * scale_factor[i])) - out = torch.empty(out_shape, dtype=x.dtype, layout=x.layout, device=x.device) - split = 8 - l = out.shape[1] // split - for i in range(0, out.shape[1], l): - out[:,i:i+l] = torch.nn.functional.interpolate(x[:,i:i+l].to(torch.float32), scale_factor=scale_factor, mode="nearest").to(x.dtype) - return out + return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest") class Upsample(nn.Module): def __init__(self, in_channels, with_conv, conv_op=ops.Conv2d, scale_factor=2.0):