[project] name = "openllm" description = "OpenLLM: Self-hosting LLMs Made Easy." readme = { file = "README.md", content-type = "text/markdown" } authors = [{ name = "BentoML Team", email = "contact@bentoml.com" }] dynamic = ["version"] classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: GPU :: NVIDIA CUDA", "Environment :: GPU :: NVIDIA CUDA :: 12", "Environment :: GPU :: NVIDIA CUDA :: 11.8", "Environment :: GPU :: NVIDIA CUDA :: 11.7", "License :: OSI Approved :: Apache Software License", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries", "Operating System :: OS Independent", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Intended Audience :: System Administrators", "Typing :: Typed", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] dependencies = [ "bentoml==1.4.23", "typer", "questionary", "pyaml", "attrs", "psutil", "pip_requirements_parser", "nvidia-ml-py", "dulwich", "tabulate", "uv", "openai==1.90.0", "huggingface-hub", "hf-xet", "typing-extensions>=4.12.2", ] keywords = [ "MLOps", "AI", "BentoML", "Model Serving", "Model Deployment", "LLMOps", "Falcon", "Vicuna", "Llama 2", "Fine tuning", "Serverless", "Large Language Model", "Generative AI", "StableLM", "Alpaca", "PyTorch", "Mistral", "vLLM", "Transformers", ] license = "Apache-2.0" requires-python = ">=3.9" [project.scripts] openllm = "openllm.__main__:app" [project.urls] Blog = "https://modelserving.com" Documentation = "https://github.com/bentoml/OpenLLM#readme" GitHub = "https://github.com/bentoml/OpenLLM" Homepage = "https://bentoml.com" Tracker = "https://github.com/bentoml/OpenLLM/issues" Twitter = "https://twitter.com/bentomlai" [tool.typer] src-dir = "src/openllm" [build-system] requires = ["hatchling==1.27.0", "hatch-vcs==0.5.0"] build-backend = 'hatchling.build' [dependency-groups] tests = ["pexpect>=4.9.0", "pytest>=8.3.5"] [tool.hatch.version] source = "vcs" fallback-version = "0.0.0" [tool.hatch.build.hooks.vcs] version-file = "src/openllm/_version.py" [tool.hatch.version.raw-options] git_describe_command = [ "git", "describe", "--dirty", "--tags", "--long", "--first-parent", ] version_scheme = "post-release" fallback_version = "0.0.0" [tool.hatch.metadata] allow-direct-references = true [tool.hatch.build.targets.wheel] only-include = ["src/openllm"] sources = ["src"] [tool.hatch.build.targets.sdist] exclude = ["/.git_archival.txt", "/.python-version-default"]