mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-04-26 10:01:30 -04:00
chore: update requirements in README.md (#659)
chore: update requirements Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
64
README.md
64
README.md
@@ -173,6 +173,14 @@ OpenLLM currently supports the following models. By default, OpenLLM doesn't inc
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Baichuan requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[baichuan]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Baichuan server:
|
||||
|
||||
```bash
|
||||
@@ -244,6 +252,14 @@ openllm start baichuan-inc/baichuan-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** ChatGLM requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[chatglm]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a ChatGLM server:
|
||||
|
||||
```bash
|
||||
@@ -365,6 +381,14 @@ openllm start databricks/dolly-v2-3b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Falcon requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[falcon]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Falcon server:
|
||||
|
||||
```bash
|
||||
@@ -434,6 +458,14 @@ openllm start tiiuae/falcon-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** FlanT5 requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[flan-t5]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a FlanT5 server:
|
||||
|
||||
```bash
|
||||
@@ -553,6 +585,14 @@ openllm start eleutherai/gpt-neox-20b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Llama requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[llama]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Llama server:
|
||||
|
||||
```bash
|
||||
@@ -699,6 +739,14 @@ openllm start HuggingFaceH4/zephyr-7b-alpha --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** MPT requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[mpt]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a MPT server:
|
||||
|
||||
```bash
|
||||
@@ -771,6 +819,14 @@ openllm start mosaicml/mpt-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** OPT requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[opt]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a OPT server:
|
||||
|
||||
```bash
|
||||
@@ -911,6 +967,14 @@ openllm start stabilityai/stablelm-tuned-alpha-3b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** StarCoder requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[starcoder]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a StarCoder server:
|
||||
|
||||
```bash
|
||||
|
||||
64
openllm-python/README.md
generated
64
openllm-python/README.md
generated
@@ -173,6 +173,14 @@ OpenLLM currently supports the following models. By default, OpenLLM doesn't inc
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Baichuan requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[baichuan]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Baichuan server:
|
||||
|
||||
```bash
|
||||
@@ -244,6 +252,14 @@ openllm start baichuan-inc/baichuan-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** ChatGLM requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[chatglm]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a ChatGLM server:
|
||||
|
||||
```bash
|
||||
@@ -365,6 +381,14 @@ openllm start databricks/dolly-v2-3b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Falcon requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[falcon]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Falcon server:
|
||||
|
||||
```bash
|
||||
@@ -434,6 +458,14 @@ openllm start tiiuae/falcon-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** FlanT5 requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[flan-t5]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a FlanT5 server:
|
||||
|
||||
```bash
|
||||
@@ -553,6 +585,14 @@ openllm start eleutherai/gpt-neox-20b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** Llama requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[llama]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a Llama server:
|
||||
|
||||
```bash
|
||||
@@ -699,6 +739,14 @@ openllm start HuggingFaceH4/zephyr-7b-alpha --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** MPT requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[mpt]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a MPT server:
|
||||
|
||||
```bash
|
||||
@@ -771,6 +819,14 @@ openllm start mosaicml/mpt-7b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** OPT requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[opt]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a OPT server:
|
||||
|
||||
```bash
|
||||
@@ -911,6 +967,14 @@ openllm start stabilityai/stablelm-tuned-alpha-3b --backend pt
|
||||
|
||||
### Quickstart
|
||||
|
||||
|
||||
|
||||
> **Note:** StarCoder requires to install with:
|
||||
> ```bash
|
||||
> pip install "openllm[starcoder]"
|
||||
> ```
|
||||
|
||||
|
||||
Run the following command to quickly spin up a StarCoder server:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -33,12 +33,13 @@ def main() -> int:
|
||||
|
||||
for it in CONFIG_MAPPING.values():
|
||||
it = it()
|
||||
details_block = ['<details>\n']
|
||||
architecture_name = it.__class__.__name__[:-6]
|
||||
details_block = ['<details>\n', f'<summary>{architecture_name}</summary>\n\n', '### Quickstart\n']
|
||||
if it['start_name'] in deps:
|
||||
instruction = f'> ```bash\n> pip install "openllm[{it["start_name"]}]"\n> ```'
|
||||
details_block.extend(markdown_noteblock(f'{architecture_name} requires to install with:\n{instruction}\n'))
|
||||
details_block.extend(
|
||||
[
|
||||
f'<summary>{architecture_name}</summary>\n\n',
|
||||
'### Quickstart\n',
|
||||
f'Run the following command to quickly spin up a {architecture_name} server:\n',
|
||||
f"""\
|
||||
```bash
|
||||
|
||||
Reference in New Issue
Block a user