Files
OpenLLM/openllm-python/pyproject.toml
Aaron Pham cfd09bfc47 chore(runner): yield the outputs directly (#573)
update openai client examples to >1

Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2023-11-07 22:34:11 -05:00

244 lines
7.6 KiB
TOML

# NOTE: PEP517 is manged via ./tools/dependencies.py
[build-system]
build-backend = "hatchling.build"
requires = [
"hatchling==1.18.0",
"hatch-vcs==0.3.0",
"hatch-fancy-pypi-readme==23.1.0",
]
[project]
authors = [
{name = "Aaron Pham",email = "aarnphm@bentoml.com"},
{name = "BentoML Team",email = "contact@bentoml.com"},
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA",
"Environment :: GPU :: NVIDIA CUDA :: 12",
"Environment :: GPU :: NVIDIA CUDA :: 11.8",
"Environment :: GPU :: NVIDIA CUDA :: 11.7",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Typing :: Typed",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = [
"bentoml[io]>=1.1.2",
"transformers[torch,tokenizers]>=4.35.0",
"openllm-client",
"openllm-core",
"safetensors",
"optimum>=1.12.0",
"accelerate",
"ghapi",
"tabulate[widechars]>=0.9.0",
"click>=8.1.3",
"cuda-python;platform_system!=\"Darwin\"",
"bitsandbytes<0.42",
]
description = "OpenLLM: Operating LLMs in production"
dynamic = ["version", "readme"]
keywords = [
"MLOps",
"AI",
"BentoML",
"Model Serving",
"Model Deployment",
"LLMOps",
"Falcon",
"Vicuna",
"Llama 2",
"Fine tuning",
"Serverless",
"Large Language Model",
"Generative AI",
"StableLM",
"Alpaca",
"PyTorch",
"Transformers",
]
license = "Apache-2.0"
name = "openllm"
requires-python = ">=3.8"
[project.scripts]
openllm = "openllm.cli.entrypoint:cli"
openllm-build-base-container = "openllm.cli.extension.build_base_container:cli"
openllm-dive-bentos = "openllm.cli.extension.dive_bentos:cli"
openllm-get-containerfile = "openllm.cli.extension.get_containerfile:cli"
openllm-get-prompt = "openllm.cli.extension.get_prompt:cli"
openllm-list-bentos = "openllm.cli.extension.list_bentos:cli"
openllm-list-models = "openllm.cli.extension.list_models:cli"
openllm-playground = "openllm.cli.extension.playground:cli"
[project.urls]
Blog = "https://modelserving.com"
Chat = "https://discord.gg/openllm"
Documentation = "https://github.com/bentoml/openllm#readme"
GitHub = "https://github.com/bentoml/OpenLLM"
History = "https://github.com/bentoml/OpenLLM/blob/main/CHANGELOG.md"
Homepage = "https://bentoml.com"
Tracker = "https://github.com/bentoml/OpenLLM/issues"
Twitter = "https://twitter.com/bentomlai"
[project.optional-dependencies]
agents = ["transformers[agents]>=4.34.0", "diffusers", "soundfile"]
all = ["openllm[full]"]
awq = ["autoawq"]
baichuan = ["cpm-kernels", "sentencepiece"]
chatglm = ["cpm-kernels", "sentencepiece"]
falcon = ["einops", "xformers"]
fine-tune = ["peft>=0.6.0", "bitsandbytes", "datasets", "accelerate", "trl", "scipy"]
flan-t5 = ["transformers>=4.34.0"]
full = [
"openllm[agents,awq,baichuan,chatglm,falcon,fine-tune,flan-t5,ggml,gptq,grpc,llama,mpt,openai,opt,playground,starcoder,vllm]",
]
ggml = ["ctransformers"]
gptq = ["auto-gptq[triton]>=0.4.2", "optimum>=1.12.0"]
grpc = ["openllm-client[grpc]"]
llama = ["fairscale", "sentencepiece", "scipy"]
mpt = ["triton", "einops"]
openai = ["openai[datalib]>=1", "tiktoken"]
opt = ["transformers>=4.34.0"]
playground = ["jupyter", "notebook", "ipython", "jupytext", "nbformat"]
starcoder = ["bitsandbytes"]
vllm = ["vllm>=0.2.1post1", "ray"]
[tool.hatch.version]
fallback-version = "0.0.0"
source = "vcs"
[tool.hatch.build.hooks.vcs]
version-file = "src/openllm/_version.py"
[tool.hatch.version.raw-options]
git_describe_command = [
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--first-parent",
]
local_scheme = "no-local-version"
root = ".."
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.build.targets.wheel]
only-include = ["src/openllm"]
sources = ["src"]
[tool.hatch.build.targets.sdist]
exclude = [
"/.git_archival.txt",
"tests",
"/.python-version-default",
"ADDING_NEW_MODEL.md",
]
[tool.hatch.build.targets.wheel.hooks.mypyc]
dependencies = [
"hatch-mypyc==0.16.0",
"mypy==1.5.1",
# avoid https://github.com/pallets/click/issues/2558
"click==8.1.3",
"bentoml==1.1.2",
"transformers>=4.32.1",
"pandas-stubs",
"types-psutil",
"types-tabulate",
"types-PyYAML",
"types-protobuf",
]
enable-by-default = false
exclude = ["src/openllm/_service.py", "src/openllm/utils/__init__.py"]
include = [
"src/openllm/__init__.py",
"src/openllm/_quantisation.py",
"src/openllm/_generation.py",
"src/openllm/exceptions.py",
"src/openllm/testing.py",
"src/openllm/utils",
]
# NOTE: This is consistent with pyproject.toml
mypy-args = [
"--strict",
# this is because all transient library doesn't have types
"--follow-imports=skip",
"--allow-subclassing-any",
"--check-untyped-defs",
"--ignore-missing-imports",
"--no-warn-return-any",
"--warn-unreachable",
"--no-warn-no-return",
"--no-warn-unused-ignores",
"--exclude='/src\\/openllm\\/playground\\/**'",
]
options = { verbose = true, strip_asserts = true, debug_level = "2", opt_level = "3", include_runtime_files = true }
require-runtime-dependencies = true
[tool.hatch.metadata.hooks.fancy-pypi-readme]
content-type = "text/markdown"
# PyPI doesn't support the <picture> tag.
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
text = """
<p align="center">
<a href="https://github.com/bentoml/openllm">
<img src="https://raw.githubusercontent.com/bentoml/openllm/main/.github/assets/main-banner.png" alt="Banner for OpenLLM" />
</a>
</p>
"""
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
end-before = "\n<!-- hatch-fancy-pypi-readme intro stop -->"
path = "README.md"
start-after = "<!-- hatch-fancy-pypi-readme intro start -->\n"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
text = """
<p align="center">
<img src="https://raw.githubusercontent.com/bentoml/openllm/main/.github/assets/output.gif" alt="Gif showing OpenLLM Intro" />
</p>
"""
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
end-before = "\n<!-- hatch-fancy-pypi-readme interim stop -->"
path = "README.md"
start-after = "<!-- hatch-fancy-pypi-readme interim start -->\n"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
text = """
<p align="center">
<img src="https://raw.githubusercontent.com/bentoml/openllm/main/.github/assets/agent.gif" alt="Gif showing Agent integration" />
</p>
"""
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
end-before = "\n<!-- hatch-fancy-pypi-readme meta stop -->"
path = "README.md"
start-after = "<!-- hatch-fancy-pypi-readme meta start -->\n"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
text = """
## Release Information
"""
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
path = "CHANGELOG.md"
pattern = "\n(###.+?\n)## "
start-after = "<!-- towncrier release notes start -->"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
text = """
---
[Click me for full changelog](https://github.com/bentoml/openllm/blob/main/CHANGELOG.md)
"""