diff --git a/README.md b/README.md index e1a96b2f..3e614fe7 100644 --- a/README.md +++ b/README.md @@ -226,12 +226,10 @@ openllm query 'What are large language models?' You can specify any of the following Baichuan models via `openllm start`: -- [baichuan-inc/baichuan-7b](https://huggingface.co/baichuan-inc/baichuan-7b) -- [baichuan-inc/baichuan-13b-base](https://huggingface.co/baichuan-inc/baichuan-13b-base) -- [baichuan-inc/baichuan-13b-chat](https://huggingface.co/baichuan-inc/baichuan-13b-chat) -- [fireballoon/baichuan-vicuna-chinese-7b](https://huggingface.co/fireballoon/baichuan-vicuna-chinese-7b) -- [fireballoon/baichuan-vicuna-7b](https://huggingface.co/fireballoon/baichuan-vicuna-7b) -- [hiyouga/baichuan-7b-sft](https://huggingface.co/hiyouga/baichuan-7b-sft) +- [baichuan-inc/baichuan2-7b-base](https://huggingface.co/baichuan-inc/baichuan2-7b-base) +- [baichuan-inc/baichuan2-7b-chat](https://huggingface.co/baichuan-inc/baichuan2-7b-chat) +- [baichuan-inc/baichuan2-13b-base](https://huggingface.co/baichuan-inc/baichuan2-13b-base) +- [baichuan-inc/baichuan2-13b-chat](https://huggingface.co/baichuan-inc/baichuan2-13b-chat) ### Supported backends @@ -249,7 +247,7 @@ OpenLLM will support vLLM and PyTorch as default backend. By default, it will us To install vLLM, run `pip install "openllm[vllm]"` ```bash -TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend vllm +TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend vllm ``` @@ -264,7 +262,7 @@ TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend vllm ```bash -TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend pt +TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend pt ``` diff --git a/changelog.d/728.change.md b/changelog.d/728.change.md new file mode 100644 index 00000000..c41db537 --- /dev/null +++ b/changelog.d/728.change.md @@ -0,0 +1 @@ +Only baichuan2 and baichuan3 are supported. We dropped baichuan 1 support diff --git a/openllm-core/src/openllm_core/config/configuration_auto.py b/openllm-core/src/openllm_core/config/configuration_auto.py index 6ff0098a..431d6691 100644 --- a/openllm-core/src/openllm_core/config/configuration_auto.py +++ b/openllm-core/src/openllm_core/config/configuration_auto.py @@ -42,7 +42,6 @@ CONFIG_MAPPING_NAMES = OrderedDict( ) ) - class _LazyConfigMapping(OrderedDictType, ReprMixin): def __init__(self, mapping: OrderedDict[LiteralString, LiteralString]): self._mapping = mapping diff --git a/openllm-core/src/openllm_core/config/configuration_baichuan.py b/openllm-core/src/openllm_core/config/configuration_baichuan.py index d1c23272..4265deb9 100644 --- a/openllm-core/src/openllm_core/config/configuration_baichuan.py +++ b/openllm-core/src/openllm_core/config/configuration_baichuan.py @@ -21,19 +21,17 @@ class BaichuanConfig(openllm_core.LLMConfig): 'url': 'https://github.com/baichuan-inc/Baichuan-7B', 'requirements': ['cpm-kernels'], 'backend': ('pt', 'vllm'), - 'architecture': 'BaiChuanForCausalLM', + 'architecture': 'BaichuanForCausalLM', # NOTE: See the following # https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555 # https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json # https://github.com/baichuan-inc/Baichuan-13B/issues/25 'default_id': 'baichuan-inc/baichuan-7b', 'model_ids': [ - 'baichuan-inc/baichuan-7b', - 'baichuan-inc/baichuan-13b-base', - 'baichuan-inc/baichuan-13b-chat', - 'fireballoon/baichuan-vicuna-chinese-7b', - 'fireballoon/baichuan-vicuna-7b', - 'hiyouga/baichuan-7b-sft', + 'baichuan-inc/baichuan2-7b-base', + 'baichuan-inc/baichuan2-7b-chat', + 'baichuan-inc/baichuan2-13b-base', + 'baichuan-inc/baichuan2-13b-chat', ], } diff --git a/openllm-python/README.md b/openllm-python/README.md index e1a96b2f..3e614fe7 100644 --- a/openllm-python/README.md +++ b/openllm-python/README.md @@ -226,12 +226,10 @@ openllm query 'What are large language models?' You can specify any of the following Baichuan models via `openllm start`: -- [baichuan-inc/baichuan-7b](https://huggingface.co/baichuan-inc/baichuan-7b) -- [baichuan-inc/baichuan-13b-base](https://huggingface.co/baichuan-inc/baichuan-13b-base) -- [baichuan-inc/baichuan-13b-chat](https://huggingface.co/baichuan-inc/baichuan-13b-chat) -- [fireballoon/baichuan-vicuna-chinese-7b](https://huggingface.co/fireballoon/baichuan-vicuna-chinese-7b) -- [fireballoon/baichuan-vicuna-7b](https://huggingface.co/fireballoon/baichuan-vicuna-7b) -- [hiyouga/baichuan-7b-sft](https://huggingface.co/hiyouga/baichuan-7b-sft) +- [baichuan-inc/baichuan2-7b-base](https://huggingface.co/baichuan-inc/baichuan2-7b-base) +- [baichuan-inc/baichuan2-7b-chat](https://huggingface.co/baichuan-inc/baichuan2-7b-chat) +- [baichuan-inc/baichuan2-13b-base](https://huggingface.co/baichuan-inc/baichuan2-13b-base) +- [baichuan-inc/baichuan2-13b-chat](https://huggingface.co/baichuan-inc/baichuan2-13b-chat) ### Supported backends @@ -249,7 +247,7 @@ OpenLLM will support vLLM and PyTorch as default backend. By default, it will us To install vLLM, run `pip install "openllm[vllm]"` ```bash -TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend vllm +TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend vllm ``` @@ -264,7 +262,7 @@ TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend vllm ```bash -TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b --backend pt +TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend pt ```