Files
OpenLLM/pyproject.toml

215 lines
5.8 KiB
TOML

[build-system]
build-backend = "hatchling.build"
requires = [
"hatchling==1.18.0",
"hatch-vcs==0.3.0",
"hatch-fancy-pypi-readme==23.1.0",
]
[project]
authors = [
{ name = "Aaron Pham", email = "aarnphm@bentoml.com" },
{ name = "BentoML Team", email = "contact@bentoml.com" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA",
"Environment :: GPU :: NVIDIA CUDA :: 12",
"Environment :: GPU :: NVIDIA CUDA :: 11.8",
"Environment :: GPU :: NVIDIA CUDA :: 11.7",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Typing :: Typed",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
description = "OpenLLM monorepo"
dynamic = ["version", "readme", "dependencies"]
keywords = [
"MLOps",
"AI",
"BentoML",
"Model Serving",
"Model Deployment",
"LLMOps",
"Falcon",
"Vicuna",
"Llama 2",
"Fine tuning",
"Serverless",
"Large Language Model",
"Generative AI",
"StableLM",
"Alpaca",
"PyTorch",
"Transformers",
]
license = "Apache-2.0"
name = "openllm-monorepo"
requires-python = ">=3.8"
[project.urls]
Blog = "https://modelserving.com"
Chat = "https://discord.gg/openllm"
Documentation = "https://github.com/bentoml/openllm#readme"
GitHub = "https://github.com/bentoml/OpenLLM"
History = "https://github.com/bentoml/OpenLLM/blob/main/CHANGELOG.md"
Homepage = "https://bentoml.com"
Tracker = "https://github.com/bentoml/OpenLLM/issues"
Twitter = "https://twitter.com/bentomlai"
[tool.hatch.version]
fallback-version = "0.0.0"
source = "vcs"
[tool.hatch.version.raw-options]
git_describe_command = [
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--first-parent",
]
local_scheme = "no-local-version"
[tool.hatch.metadata]
allow-direct-references = true
[tool.cibuildwheel]
build-verbosity = 1
# So the following environment will be targeted for compiled wheels:
# - Python: CPython 3.8-3.11
# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64
# - OS: Linux (no musl), and macOS
build = "cp3*-*"
skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp-*", "cp312-*"]
[tool.cibuildwheel.environment]
HATCH_BUILD_HOOKS_ENABLE = "1"
MYPYC_DEBUG_LEVEL = "1"
MYPYC_OPT_LEVEL = "3"
[tool.cibuildwheel.linux]
before-build = ["yum install -y clang gcc"]
[tool.cibuildwheel.macos]
before-build = ["rustup target add aarch64-apple-darwin"]
[tool.cibuildwheel.linux.environment]
HATCH_BUILD_HOOKS_ENABLE = "1"
MYPYC_DEBUG_LEVEL = "1"
MYPYC_OPT_LEVEL = "3"
# Use clang to compile successfully on Linux.
CC = "clang"
[tool.towncrier]
directory = "changelog.d"
filename = "CHANGELOG.md"
issue_format = "[#{issue}](https://github.com/bentoml/openllm/issues/{issue})"
name = "openllm"
start_string = "<!-- towncrier release notes start -->\n"
template = "changelog.d/template.md.jinja"
title_format = ""
underlines = ["", "", ""]
[[tool.towncrier.section]]
path = ""
[[tool.towncrier.type]]
directory = "breaking"
name = "Backwards-incompatible Changes"
showcontent = true
[[tool.towncrier.type]]
directory = "deprecation"
name = "Deprecations"
showcontent = true
[[tool.towncrier.type]]
directory = "change"
name = "Changes"
showcontent = true
[[tool.towncrier.type]]
directory = "refactor"
name = "Refactor"
showcontent = true
[[tool.towncrier.type]]
directory = "feature"
name = "Features"
showcontent = true
[[tool.towncrier.type]]
directory = "fix"
name = "Bug fix"
showcontent = true
[tool.interrogate]
fail-under = 100
verbose = 2
whitelist-regex = ["test_.*"]
[tool.check-wheel-contents]
toplevel = ["openllm"]
[tool.pytest.ini_options]
addopts = ["-rfEX", "-pno:warnings", "--snapshot-warn-unused"]
python_files = ["test_*.py", "*_test.py"]
testpaths = ["openllm-python/tests"]
[tool.coverage.paths]
openllm = [
"openllm-python/src/openllm",
"*/openllm-python/src/openllm",
"openllm-client/src/openllm_client",
"*/openllm-client/src/openllm_client",
"openllm-core/src/openllm_core",
"*/openllm-core/src/openllm_core",
]
[tool.coverage.run]
branch = true
omit = [
"__pypackages__/*",
"openllm-python/src/openllm/_version.py",
"openllm-python/src/openllm/__init__.py",
"openllm-python/src/openllm/__main__.py",
"openllm-core/src/openllm_core/_typing_compat.py",
"openllm-client/src/openllm_client/pb/**",
]
source_pkgs = ["openllm", "openllm_core", "openllm_client"]
[tool.coverage.report]
exclude_lines = [
"no cov",
"pragma: no cover",
"if __name__ == .__main__.:",
"if t.TYPE_CHECKING:",
"if _t.TYPE_CHECKING:",
'if TYPE_CHECKING:',
'if typing.TYPE_CHECKING:',
'if DEBUG:',
'if utils.DEBUG',
'if openllm.utils.DEBUG',
'if openllm_core.utils.DEBUG',
'@_overload',
'@overload',
'@t.overload',
'@typing.overload',
'raise NotImplementedError',
'raise NotImplemented',
'except MissingDependencyError:',
]
omit = [
"__pypackages__/*",
"openllm-python/src/openllm/_version.py",
"openllm-python/src/openllm/__init__.py",
"openllm-python/src/openllm/__main__.py",
"openllm-core/src/openllm_core/_typing_compat.py",
"openllm-client/src/openllm_client/pb/**",
]
precision = 2
show_missing = true