From d92b136780c35ff12ed99fd6e2cb906e1e4b1554 Mon Sep 17 00:00:00 2001 From: Aaron <29749331+aarnphm@users.noreply.github.com> Date: Wed, 19 Jul 2023 18:58:04 -0400 Subject: [PATCH] chore(llama): remove decapoda vairants Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> --- README.md | 10 +--------- src/openllm/models/llama/configuration_llama.py | 8 -------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/README.md b/README.md index af75783e..bb4b5178 100644 --- a/README.md +++ b/README.md @@ -261,21 +261,13 @@ pip install "openllm[llama]"
  • meta-llama/llama-2-70b-hf
  • meta-llama/llama-2-13b-hf
  • meta-llama/llama-2-7b-hf
  • -
  • decapoda-research/llama-65b-hf
  • -
  • decapoda-research/llama-30b-hf
  • -
  • decapoda-research/llama-13b-hf
  • -
  • decapoda-research/llama-7b-hf-int8
  • -
  • decapoda-research/llama-7b-hf
  • openlm-research/open_llama_7b_v2
  • openlm-research/open_llama_3b_v2
  • openlm-research/open_llama_13b
  • -
  • openlm-research/open_llama_7b
  • -
  • openlm-research/open_llama_3b
  • huggyllama/llama-65b
  • huggyllama/llama-30b
  • huggyllama/llama-13b
  • -
  • huggyllama/llama-7b
  • -
  • syzymon/long_llama_3b
  • +
  • huggyllama/llama-7b
  • diff --git a/src/openllm/models/llama/configuration_llama.py b/src/openllm/models/llama/configuration_llama.py index d0b01e62..fcd98415 100644 --- a/src/openllm/models/llama/configuration_llama.py +++ b/src/openllm/models/llama/configuration_llama.py @@ -45,21 +45,13 @@ class LlaMAConfig(openllm.LLMConfig): "meta-llama/llama-2-70b-hf", "meta-llama/llama-2-13b-hf", "meta-llama/llama-2-7b-hf", - "decapoda-research/llama-65b-hf", - "decapoda-research/llama-30b-hf", - "decapoda-research/llama-13b-hf", - "decapoda-research/llama-7b-hf-int8", - "decapoda-research/llama-7b-hf", "openlm-research/open_llama_7b_v2", "openlm-research/open_llama_3b_v2", "openlm-research/open_llama_13b", - "openlm-research/open_llama_7b", - "openlm-research/open_llama_3b", "huggyllama/llama-65b", "huggyllama/llama-30b", "huggyllama/llama-13b", "huggyllama/llama-7b", - "syzymon/long_llama_3b", # NOTE: use ``openllm.LongLLaMA`` to load this variant. Otherwise it will be limited to context length of 2048 ], "tokenizer_class": "LlamaTokenizerFast", }