mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-20 07:54:27 -05:00
fix(diffusers): sd_embed is not always available (#8602)
Seems sd_embed doesn't play well with MPS and L4T. Making it optional Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
68c7077491
commit
dadc7158fb
@@ -40,7 +40,21 @@ from compel import Compel, ReturnedEmbeddingsType
|
||||
from optimum.quanto import freeze, qfloat8, quantize
|
||||
from transformers import T5EncoderModel
|
||||
from safetensors.torch import load_file
|
||||
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sd15, get_weighted_text_embeddings_sdxl, get_weighted_text_embeddings_sd3, get_weighted_text_embeddings_flux1
|
||||
# Try to import sd_embed - it might not always be available
|
||||
try:
|
||||
from sd_embed.embedding_funcs import (
|
||||
get_weighted_text_embeddings_sd15,
|
||||
get_weighted_text_embeddings_sdxl,
|
||||
get_weighted_text_embeddings_sd3,
|
||||
get_weighted_text_embeddings_flux1,
|
||||
)
|
||||
SD_EMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
get_weighted_text_embeddings_sd15 = None
|
||||
get_weighted_text_embeddings_sdxl = None
|
||||
get_weighted_text_embeddings_sd3 = None
|
||||
get_weighted_text_embeddings_flux1 = None
|
||||
SD_EMBED_AVAILABLE = False
|
||||
|
||||
# Import LTX-2 specific utilities
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
@@ -49,6 +63,9 @@ from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
COMPEL = os.environ.get("COMPEL", "0") == "1"
|
||||
SD_EMBED = os.environ.get("SD_EMBED", "0") == "1"
|
||||
# Warn if SD_EMBED is enabled but the module is not available
|
||||
if SD_EMBED and not SD_EMBED_AVAILABLE:
|
||||
print("WARNING: SD_EMBED is enabled but sd_embed module is not available. Falling back to standard prompt processing.", file=sys.stderr)
|
||||
XPU = os.environ.get("XPU", "0") == "1"
|
||||
CLIPSKIP = os.environ.get("CLIPSKIP", "1") == "1"
|
||||
SAFETENSORS = os.environ.get("SAFETENSORS", "1") == "1"
|
||||
@@ -743,7 +760,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
guidance_scale=self.cfg_scale,
|
||||
**kwargs
|
||||
).images[0]
|
||||
elif SD_EMBED:
|
||||
elif SD_EMBED and SD_EMBED_AVAILABLE:
|
||||
if self.PipelineType == "StableDiffusionPipeline":
|
||||
(
|
||||
kwargs["prompt_embeds"],
|
||||
|
||||
@@ -4,7 +4,6 @@ git+https://github.com/huggingface/diffusers
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
optimum-quanto
|
||||
numpy<2
|
||||
|
||||
@@ -4,7 +4,6 @@ git+https://github.com/huggingface/diffusers
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
optimum-quanto
|
||||
numpy<2
|
||||
|
||||
@@ -5,7 +5,6 @@ opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
|
||||
Reference in New Issue
Block a user