mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-03-06 08:08:03 -05:00
chore: cleanup pre-commit jobs and update usage
Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
984
openllm-python/README.md
generated
984
openllm-python/README.md
generated
File diff suppressed because it is too large
Load Diff
@@ -2,74 +2,74 @@
|
||||
[build-system]
|
||||
build-backend = "hatchling.build"
|
||||
requires = [
|
||||
"hatchling==1.18.0",
|
||||
"hatch-vcs==0.3.0",
|
||||
"hatch-fancy-pypi-readme==23.1.0",
|
||||
"hatchling==1.18.0",
|
||||
"hatch-vcs==0.3.0",
|
||||
"hatch-fancy-pypi-readme==23.1.0",
|
||||
]
|
||||
|
||||
[project]
|
||||
authors = [
|
||||
{ name = "Aaron Pham", email = "aarnphm@bentoml.com" },
|
||||
{ name = "BentoML Team", email = "contact@bentoml.com" },
|
||||
{name = "Aaron Pham",email = "aarnphm@bentoml.com"},
|
||||
{name = "BentoML Team",email = "contact@bentoml.com"},
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: GPU :: NVIDIA CUDA",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 12",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 11.8",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 11.7",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Topic :: Software Development :: Libraries",
|
||||
"Operating System :: OS Independent",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: Science/Research",
|
||||
"Intended Audience :: System Administrators",
|
||||
"Typing :: Typed",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Programming Language :: Python :: Implementation :: PyPy",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: GPU :: NVIDIA CUDA",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 12",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 11.8",
|
||||
"Environment :: GPU :: NVIDIA CUDA :: 11.7",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Topic :: Software Development :: Libraries",
|
||||
"Operating System :: OS Independent",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: Science/Research",
|
||||
"Intended Audience :: System Administrators",
|
||||
"Typing :: Typed",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Programming Language :: Python :: Implementation :: PyPy",
|
||||
]
|
||||
dependencies = [
|
||||
"bentoml[io]>=1.1.2",
|
||||
"transformers[torch,tokenizers,accelerate]>=4.32.1",
|
||||
"openllm-client",
|
||||
"safetensors",
|
||||
"optimum>=1.12.0",
|
||||
"accelerate",
|
||||
"ghapi",
|
||||
"tabulate[widechars]>=0.9.0",
|
||||
"click>=8.1.3",
|
||||
"cuda-python;platform_system!=\"Darwin\"",
|
||||
"bitsandbytes<0.42",
|
||||
"bentoml[io]>=1.1.2",
|
||||
"transformers[torch,tokenizers,accelerate]>=4.32.1",
|
||||
"openllm-client",
|
||||
"safetensors",
|
||||
"optimum>=1.12.0",
|
||||
"accelerate",
|
||||
"ghapi",
|
||||
"tabulate[widechars]>=0.9.0",
|
||||
"click>=8.1.3",
|
||||
"cuda-python;platform_system!=\"Darwin\"",
|
||||
"bitsandbytes<0.42",
|
||||
]
|
||||
description = "OpenLLM: Operating LLMs in production"
|
||||
dynamic = ["version", "readme"]
|
||||
keywords = [
|
||||
"MLOps",
|
||||
"AI",
|
||||
"BentoML",
|
||||
"Model Serving",
|
||||
"Model Deployment",
|
||||
"LLMOps",
|
||||
"Falcon",
|
||||
"Vicuna",
|
||||
"Llama 2",
|
||||
"Fine tuning",
|
||||
"Serverless",
|
||||
"Large Language Model",
|
||||
"Generative AI",
|
||||
"StableLM",
|
||||
"Alpaca",
|
||||
"PyTorch",
|
||||
"Transformers",
|
||||
"MLOps",
|
||||
"AI",
|
||||
"BentoML",
|
||||
"Model Serving",
|
||||
"Model Deployment",
|
||||
"LLMOps",
|
||||
"Falcon",
|
||||
"Vicuna",
|
||||
"Llama 2",
|
||||
"Fine tuning",
|
||||
"Serverless",
|
||||
"Large Language Model",
|
||||
"Generative AI",
|
||||
"StableLM",
|
||||
"Alpaca",
|
||||
"PyTorch",
|
||||
"Transformers",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
name = "openllm"
|
||||
|
||||
Reference in New Issue
Block a user