diff --git a/.env b/.env index 852d3dac6..ca2d149fe 100644 --- a/.env +++ b/.env @@ -26,6 +26,9 @@ ## Disables COMPEL (Diffusers) # COMPEL=0 +## Disables SD_EMBED (Diffusers) +# SD_EMBED=0 + ## Enable/Disable single backend (useful if only one GPU is available) # LOCALAI_SINGLE_ACTIVE_BACKEND=true diff --git a/backend/python/diffusers/README.md b/backend/python/diffusers/README.md index 91fff3127..70fddb80f 100644 --- a/backend/python/diffusers/README.md +++ b/backend/python/diffusers/README.md @@ -115,6 +115,7 @@ Available pipelines: AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline, ... | Variable | Default | Description | |----------|---------|-------------| | `COMPEL` | `0` | Enable Compel for prompt weighting | +| `SD_EMBED` | `0` | Enable sd_embed for prompt weighting | | `XPU` | `0` | Enable Intel XPU support | | `CLIPSKIP` | `1` | Enable CLIP skip support | | `SAFETENSORS` | `1` | Use safetensors format | diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index 032af60c4..29a86b23e 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -40,6 +40,7 @@ from compel import Compel, ReturnedEmbeddingsType from optimum.quanto import freeze, qfloat8, quantize from transformers import T5EncoderModel from safetensors.torch import load_file +from sd_embed.embedding_funcs import get_weighted_text_embeddings_sd15, get_weighted_text_embeddings_sdxl, get_weighted_text_embeddings_sd3, get_weighted_text_embeddings_flux1 # Import LTX-2 specific utilities from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video @@ -47,6 +48,7 @@ from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig _ONE_DAY_IN_SECONDS = 60 * 60 * 24 COMPEL = os.environ.get("COMPEL", "0") == "1" +SD_EMBED = os.environ.get("SD_EMBED", "0") == "1" XPU = os.environ.get("XPU", "0") == "1" CLIPSKIP = os.environ.get("CLIPSKIP", "1") == "1" SAFETENSORS = os.environ.get("SAFETENSORS", "1") == "1" @@ -737,6 +739,51 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): kwargs["prompt_embeds"] = conditioning kwargs["pooled_prompt_embeds"] = pooled # pass the kwargs dictionary to the self.pipe method + image = self.pipe( + guidance_scale=self.cfg_scale, + **kwargs + ).images[0] + elif SD_EMBED: + if self.PipelineType == "StableDiffusionPipeline": + ( + kwargs["prompt_embeds"], + kwargs["negative_prompt_embeds"], + ) = get_weighted_text_embeddings_sd15( + pipe = self.pipe, + prompt = prompt, + neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None, + ) + if self.PipelineType == "StableDiffusionXLPipeline": + ( + kwargs["prompt_embeds"], + kwargs["negative_prompt_embeds"], + kwargs["pooled_prompt_embeds"], + kwargs["negative_pooled_prompt_embeds"], + ) = get_weighted_text_embeddings_sdxl( + pipe = self.pipe, + prompt = prompt, + neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None + ) + if self.PipelineType == "StableDiffusion3Pipeline": + ( + kwargs["prompt_embeds"], + kwargs["negative_prompt_embeds"], + kwargs["pooled_prompt_embeds"], + kwargs["negative_pooled_prompt_embeds"], + ) = get_weighted_text_embeddings_sd3( + pipe = self.pipe, + prompt = prompt, + neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None + ) + if self.PipelineType == "FluxTransformer2DModel": + ( + kwargs["prompt_embeds"], + kwargs["pooled_prompt_embeds"], + ) = get_weighted_text_embeddings_flux1( + pipe = self.pipe, + prompt = prompt, + ) + image = self.pipe( guidance_scale=self.cfg_scale, **kwargs diff --git a/backend/python/diffusers/requirements-cpu.txt b/backend/python/diffusers/requirements-cpu.txt index fceda06d2..2b76224d9 100644 --- a/backend/python/diffusers/requirements-cpu.txt +++ b/backend/python/diffusers/requirements-cpu.txt @@ -5,6 +5,7 @@ transformers torchvision==0.22.1 accelerate compel +git+https://github.com/xhinker/sd_embed peft sentencepiece torch==2.7.1 diff --git a/backend/python/diffusers/requirements-cublas12.txt b/backend/python/diffusers/requirements-cublas12.txt index 632e9421f..5a1e947f2 100644 --- a/backend/python/diffusers/requirements-cublas12.txt +++ b/backend/python/diffusers/requirements-cublas12.txt @@ -5,6 +5,7 @@ transformers torchvision accelerate compel +git+https://github.com/xhinker/sd_embed peft sentencepiece torch diff --git a/backend/python/diffusers/requirements-cublas13.txt b/backend/python/diffusers/requirements-cublas13.txt index 4867a85cd..354c6df07 100644 --- a/backend/python/diffusers/requirements-cublas13.txt +++ b/backend/python/diffusers/requirements-cublas13.txt @@ -5,6 +5,7 @@ transformers torchvision accelerate compel +git+https://github.com/xhinker/sd_embed peft sentencepiece torch diff --git a/backend/python/diffusers/requirements-intel.txt b/backend/python/diffusers/requirements-intel.txt index e0fa69fb0..3fd3cde74 100644 --- a/backend/python/diffusers/requirements-intel.txt +++ b/backend/python/diffusers/requirements-intel.txt @@ -8,6 +8,7 @@ opencv-python transformers accelerate compel +git+https://github.com/xhinker/sd_embed peft sentencepiece optimum-quanto diff --git a/backend/python/diffusers/requirements-l4t12.txt b/backend/python/diffusers/requirements-l4t12.txt index 9f77a9d09..814a22dff 100644 --- a/backend/python/diffusers/requirements-l4t12.txt +++ b/backend/python/diffusers/requirements-l4t12.txt @@ -4,6 +4,7 @@ git+https://github.com/huggingface/diffusers transformers accelerate compel +git+https://github.com/xhinker/sd_embed peft optimum-quanto numpy<2 diff --git a/backend/python/diffusers/requirements-l4t13.txt b/backend/python/diffusers/requirements-l4t13.txt index 560858e35..3eb79ecd2 100644 --- a/backend/python/diffusers/requirements-l4t13.txt +++ b/backend/python/diffusers/requirements-l4t13.txt @@ -4,6 +4,7 @@ git+https://github.com/huggingface/diffusers transformers accelerate compel +git+https://github.com/xhinker/sd_embed peft optimum-quanto numpy<2 diff --git a/backend/python/diffusers/requirements-mps.txt b/backend/python/diffusers/requirements-mps.txt index 8b7c2413b..984cb4dc7 100644 --- a/backend/python/diffusers/requirements-mps.txt +++ b/backend/python/diffusers/requirements-mps.txt @@ -5,6 +5,7 @@ opencv-python transformers accelerate compel +git+https://github.com/xhinker/sd_embed peft sentencepiece optimum-quanto