diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index 29a86b23e..b30f31c0c 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -40,7 +40,21 @@ from compel import Compel, ReturnedEmbeddingsType from optimum.quanto import freeze, qfloat8, quantize from transformers import T5EncoderModel from safetensors.torch import load_file -from sd_embed.embedding_funcs import get_weighted_text_embeddings_sd15, get_weighted_text_embeddings_sdxl, get_weighted_text_embeddings_sd3, get_weighted_text_embeddings_flux1 +# Try to import sd_embed - it might not always be available +try: + from sd_embed.embedding_funcs import ( + get_weighted_text_embeddings_sd15, + get_weighted_text_embeddings_sdxl, + get_weighted_text_embeddings_sd3, + get_weighted_text_embeddings_flux1, + ) + SD_EMBED_AVAILABLE = True +except ImportError: + get_weighted_text_embeddings_sd15 = None + get_weighted_text_embeddings_sdxl = None + get_weighted_text_embeddings_sd3 = None + get_weighted_text_embeddings_flux1 = None + SD_EMBED_AVAILABLE = False # Import LTX-2 specific utilities from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video @@ -49,6 +63,9 @@ from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig _ONE_DAY_IN_SECONDS = 60 * 60 * 24 COMPEL = os.environ.get("COMPEL", "0") == "1" SD_EMBED = os.environ.get("SD_EMBED", "0") == "1" +# Warn if SD_EMBED is enabled but the module is not available +if SD_EMBED and not SD_EMBED_AVAILABLE: + print("WARNING: SD_EMBED is enabled but sd_embed module is not available. Falling back to standard prompt processing.", file=sys.stderr) XPU = os.environ.get("XPU", "0") == "1" CLIPSKIP = os.environ.get("CLIPSKIP", "1") == "1" SAFETENSORS = os.environ.get("SAFETENSORS", "1") == "1" @@ -743,7 +760,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): guidance_scale=self.cfg_scale, **kwargs ).images[0] - elif SD_EMBED: + elif SD_EMBED and SD_EMBED_AVAILABLE: if self.PipelineType == "StableDiffusionPipeline": ( kwargs["prompt_embeds"], diff --git a/backend/python/diffusers/requirements-l4t12.txt b/backend/python/diffusers/requirements-l4t12.txt index 814a22dff..9f77a9d09 100644 --- a/backend/python/diffusers/requirements-l4t12.txt +++ b/backend/python/diffusers/requirements-l4t12.txt @@ -4,7 +4,6 @@ git+https://github.com/huggingface/diffusers transformers accelerate compel -git+https://github.com/xhinker/sd_embed peft optimum-quanto numpy<2 diff --git a/backend/python/diffusers/requirements-l4t13.txt b/backend/python/diffusers/requirements-l4t13.txt index 3eb79ecd2..560858e35 100644 --- a/backend/python/diffusers/requirements-l4t13.txt +++ b/backend/python/diffusers/requirements-l4t13.txt @@ -4,7 +4,6 @@ git+https://github.com/huggingface/diffusers transformers accelerate compel -git+https://github.com/xhinker/sd_embed peft optimum-quanto numpy<2 diff --git a/backend/python/diffusers/requirements-mps.txt b/backend/python/diffusers/requirements-mps.txt index 984cb4dc7..8b7c2413b 100644 --- a/backend/python/diffusers/requirements-mps.txt +++ b/backend/python/diffusers/requirements-mps.txt @@ -5,7 +5,6 @@ opencv-python transformers accelerate compel -git+https://github.com/xhinker/sd_embed peft sentencepiece optimum-quanto