mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-05-07 15:22:46 -04:00
chore(llama): remove decapoda vairants
Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
10
README.md
10
README.md
@@ -261,21 +261,13 @@ pip install "openllm[llama]"
|
||||
<li><a href=https://huggingface.co/meta-llama/llama-2-70b-hf><code>meta-llama/llama-2-70b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/meta-llama/llama-2-13b-hf><code>meta-llama/llama-2-13b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/meta-llama/llama-2-7b-hf><code>meta-llama/llama-2-7b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/decapoda-research/llama-65b-hf><code>decapoda-research/llama-65b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/decapoda-research/llama-30b-hf><code>decapoda-research/llama-30b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/decapoda-research/llama-13b-hf><code>decapoda-research/llama-13b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/decapoda-research/llama-7b-hf-int8><code>decapoda-research/llama-7b-hf-int8</code></a></li>
|
||||
<li><a href=https://huggingface.co/decapoda-research/llama-7b-hf><code>decapoda-research/llama-7b-hf</code></a></li>
|
||||
<li><a href=https://huggingface.co/openlm-research/open_llama_7b_v2><code>openlm-research/open_llama_7b_v2</code></a></li>
|
||||
<li><a href=https://huggingface.co/openlm-research/open_llama_3b_v2><code>openlm-research/open_llama_3b_v2</code></a></li>
|
||||
<li><a href=https://huggingface.co/openlm-research/open_llama_13b><code>openlm-research/open_llama_13b</code></a></li>
|
||||
<li><a href=https://huggingface.co/openlm-research/open_llama_7b><code>openlm-research/open_llama_7b</code></a></li>
|
||||
<li><a href=https://huggingface.co/openlm-research/open_llama_3b><code>openlm-research/open_llama_3b</code></a></li>
|
||||
<li><a href=https://huggingface.co/huggyllama/llama-65b><code>huggyllama/llama-65b</code></a></li>
|
||||
<li><a href=https://huggingface.co/huggyllama/llama-30b><code>huggyllama/llama-30b</code></a></li>
|
||||
<li><a href=https://huggingface.co/huggyllama/llama-13b><code>huggyllama/llama-13b</code></a></li>
|
||||
<li><a href=https://huggingface.co/huggyllama/llama-7b><code>huggyllama/llama-7b</code></a></li>
|
||||
<li><a href=https://huggingface.co/syzymon/long_llama_3b><code>syzymon/long_llama_3b</code></a></li></ul>
|
||||
<li><a href=https://huggingface.co/huggyllama/llama-7b><code>huggyllama/llama-7b</code></a></li></ul>
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
@@ -45,21 +45,13 @@ class LlaMAConfig(openllm.LLMConfig):
|
||||
"meta-llama/llama-2-70b-hf",
|
||||
"meta-llama/llama-2-13b-hf",
|
||||
"meta-llama/llama-2-7b-hf",
|
||||
"decapoda-research/llama-65b-hf",
|
||||
"decapoda-research/llama-30b-hf",
|
||||
"decapoda-research/llama-13b-hf",
|
||||
"decapoda-research/llama-7b-hf-int8",
|
||||
"decapoda-research/llama-7b-hf",
|
||||
"openlm-research/open_llama_7b_v2",
|
||||
"openlm-research/open_llama_3b_v2",
|
||||
"openlm-research/open_llama_13b",
|
||||
"openlm-research/open_llama_7b",
|
||||
"openlm-research/open_llama_3b",
|
||||
"huggyllama/llama-65b",
|
||||
"huggyllama/llama-30b",
|
||||
"huggyllama/llama-13b",
|
||||
"huggyllama/llama-7b",
|
||||
"syzymon/long_llama_3b", # NOTE: use ``openllm.LongLLaMA`` to load this variant. Otherwise it will be limited to context length of 2048
|
||||
],
|
||||
"tokenizer_class": "LlamaTokenizerFast",
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user