fix(quantize): dyn quant for int8 and int4

only set tokenizer when it is gptq

Signed-off-by: aarnphm-ec2-dev <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
aarnphm-ec2-dev
2023-09-07 01:48:45 +00:00
parent dd120d8516
commit 8173cb09a5
3 changed files with 4 additions and 4 deletions

View File

@@ -108,7 +108,7 @@ full = [
ggml = ["ctransformers"]
gptq = ["auto-gptq[triton]>=0.4.2", "optimum>=1.12.0"]
grpc = ["openllm-client[grpc]"]
llama = ["fairscale", "sentencepiece"]
llama = ["fairscale", "sentencepiece", "scipy"]
mpt = ["triton", "einops"]
openai = ["openai", "tiktoken"]
opt = ["flax>=0.7", "jax", "jaxlib", "tensorflow", "keras"]

View File

@@ -444,8 +444,8 @@ class LLM(LLMInterface[M, T], ReprMixin):
raise ValueError("'quantization_config' and 'quantize' are mutually exclusive. Either customise your quantization_config or use the 'quantize' argument.")
if quantization_config is None and quantize is not None:
# in case users input `tokenizer` to __init__, default to the _model_id
_gptq_tokenizer = attrs.pop('tokenizer', _model_id)
quantization_config, attrs = infer_quantisation_config(cls, quantize, tokenizer=_gptq_tokenizer, **attrs)
if quantize == 'gptq': attrs.setdefault('tokenizer', _model_id)
quantization_config, attrs = infer_quantisation_config(cls, quantize, **attrs)
if quantize == 'gptq': serialisation = 'safetensors'
elif cls.__llm_backend__ == 'vllm': serialisation = 'legacy' # Currently working-in-progress