From 103156cd71f5663c250b1607d10d12c5872bceb3 Mon Sep 17 00:00:00 2001 From: Aaron Pham <29749331+aarnphm@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:20:50 -0500 Subject: [PATCH] chore(cli): move playground to CLI components (#655) Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> --- openllm-python/pyproject.toml | 132 +++++++++--------- openllm-python/src/openllm/__init__.py | 3 - openllm-python/src/openllm/__init__.pyi | 1 - .../src/openllm_cli/extension/playground.py | 3 +- .../playground/README.md | 0 .../playground/__init__.py | 0 .../playground/_meta.yml | 0 .../playground/falcon_tuned.py | 0 .../playground/features.py | 2 +- .../playground/llama2_qlora.py | 0 .../playground/opt_tuned.py | 0 pyproject.toml | 9 -- ruff.toml | 2 +- 13 files changed, 70 insertions(+), 82 deletions(-) rename openllm-python/src/{openllm => openllm_cli}/playground/README.md (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/__init__.py (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/_meta.yml (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/falcon_tuned.py (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/features.py (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/llama2_qlora.py (100%) rename openllm-python/src/{openllm => openllm_cli}/playground/opt_tuned.py (100%) diff --git a/openllm-python/pyproject.toml b/openllm-python/pyproject.toml index 656cce58..0995afd8 100644 --- a/openllm-python/pyproject.toml +++ b/openllm-python/pyproject.toml @@ -2,75 +2,75 @@ [build-system] build-backend = "hatchling.build" requires = [ - "hatchling==1.18.0", - "hatch-vcs==0.3.0", - "hatch-fancy-pypi-readme==23.1.0", + "hatchling==1.18.0", + "hatch-vcs==0.3.0", + "hatch-fancy-pypi-readme==23.1.0", ] [project] authors = [ - {name = "Aaron Pham",email = "aarnphm@bentoml.com"}, - {name = "BentoML Team",email = "contact@bentoml.com"}, + { name = "Aaron Pham", email = "aarnphm@bentoml.com" }, + { name = "BentoML Team", email = "contact@bentoml.com" }, ] classifiers = [ - "Development Status :: 5 - Production/Stable", - "Environment :: GPU :: NVIDIA CUDA", - "Environment :: GPU :: NVIDIA CUDA :: 12", - "Environment :: GPU :: NVIDIA CUDA :: 11.8", - "Environment :: GPU :: NVIDIA CUDA :: 11.7", - "License :: OSI Approved :: Apache Software License", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development :: Libraries", - "Operating System :: OS Independent", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "Intended Audience :: System Administrators", - "Typing :: Typed", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", + "Development Status :: 5 - Production/Stable", + "Environment :: GPU :: NVIDIA CUDA", + "Environment :: GPU :: NVIDIA CUDA :: 12", + "Environment :: GPU :: NVIDIA CUDA :: 11.8", + "Environment :: GPU :: NVIDIA CUDA :: 11.7", + "License :: OSI Approved :: Apache Software License", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries", + "Operating System :: OS Independent", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Intended Audience :: System Administrators", + "Typing :: Typed", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", ] dependencies = [ - "bentoml[io]>=1.1.2", - "transformers[torch,tokenizers]>=4.35.0", - "openllm-client", - "openllm-core", - "safetensors", - "optimum>=1.12.0", - "accelerate", - "ghapi", - "build[virtualenv]<1", - "click>=8.1.3", - "cuda-python;platform_system!=\"Darwin\"", - "bitsandbytes<0.42", + "bentoml[io]>=1.1.2", + "transformers[torch,tokenizers]>=4.35.0", + "openllm-client", + "openllm-core", + "safetensors", + "optimum>=1.12.0", + "accelerate", + "ghapi", + "build[virtualenv]<1", + "click>=8.1.3", + "cuda-python;platform_system!=\"Darwin\"", + "bitsandbytes<0.42", ] description = "OpenLLM: Operating LLMs in production" dynamic = ["version", "readme"] keywords = [ - "MLOps", - "AI", - "BentoML", - "Model Serving", - "Model Deployment", - "LLMOps", - "Falcon", - "Vicuna", - "Llama 2", - "Fine tuning", - "Serverless", - "Large Language Model", - "Generative AI", - "StableLM", - "Alpaca", - "PyTorch", - "Transformers", + "MLOps", + "AI", + "BentoML", + "Model Serving", + "Model Deployment", + "LLMOps", + "Falcon", + "Vicuna", + "Llama 2", + "Fine tuning", + "Serverless", + "Large Language Model", + "Generative AI", + "StableLM", + "Alpaca", + "PyTorch", + "Transformers", ] license = "Apache-2.0" name = "openllm" @@ -102,10 +102,17 @@ awq = ["autoawq"] baichuan = ["cpm-kernels", "sentencepiece"] chatglm = ["cpm-kernels", "sentencepiece"] falcon = ["einops", "xformers"] -fine-tune = ["peft>=0.6.0", "bitsandbytes", "datasets", "accelerate", "trl", "scipy"] +fine-tune = [ + "peft>=0.6.0", + "bitsandbytes", + "datasets", + "accelerate", + "trl", + "scipy", +] flan-t5 = ["transformers>=4.34.0"] full = [ - "openllm[agents,awq,baichuan,chatglm,falcon,fine-tune,flan-t5,ggml,gptq,grpc,llama,mpt,openai,opt,playground,starcoder,vllm]", + "openllm[agents,awq,baichuan,chatglm,falcon,fine-tune,flan-t5,ggml,gptq,grpc,llama,mpt,openai,opt,playground,starcoder,vllm]", ] ggml = ["ctransformers"] gptq = ["auto-gptq[triton]>=0.4.2", "optimum>=1.12.0"] @@ -140,11 +147,7 @@ allow-direct-references = true only-include = ["src/openllm", "src/openllm_cli"] sources = ["src"] [tool.hatch.build.targets.sdist] -exclude = [ - "/.git_archival.txt", - "tests", - "/.python-version-default", -] +exclude = ["/.git_archival.txt", "tests", "/.python-version-default"] [tool.hatch.build.targets.wheel.hooks.mypyc] dependencies = [ "hatch-mypyc==0.16.0", @@ -181,7 +184,6 @@ mypy-args = [ "--warn-unreachable", "--no-warn-no-return", "--no-warn-unused-ignores", - "--exclude='/src\\/openllm\\/playground\\/**'", ] options = { verbose = true, strip_asserts = true, debug_level = "2", opt_level = "3", include_runtime_files = true } require-runtime-dependencies = true diff --git a/openllm-python/src/openllm/__init__.py b/openllm-python/src/openllm/__init__.py index 272ee8d0..3d48570b 100644 --- a/openllm-python/src/openllm/__init__.py +++ b/openllm-python/src/openllm/__init__.py @@ -15,12 +15,10 @@ import pathlib as _pathlib import warnings as _warnings import openllm_cli as _cli - from openllm_cli import _sdk from . import utils as utils - if utils.DEBUG: utils.set_debug_mode(True) utils.set_quiet_mode(False) @@ -51,7 +49,6 @@ __lazy = utils.LazyModule( 'exceptions': [], 'client': ['HTTPClient', 'AsyncHTTPClient'], 'bundle': [], - 'playground': [], 'testing': [], 'protocol': [], 'utils': [], diff --git a/openllm-python/src/openllm/__init__.pyi b/openllm-python/src/openllm/__init__.pyi index 420299ad..531ee8b7 100644 --- a/openllm-python/src/openllm/__init__.pyi +++ b/openllm-python/src/openllm/__init__.pyi @@ -38,7 +38,6 @@ from . import ( bundle as bundle, client as client, exceptions as exceptions, - playground as playground, serialisation as serialisation, testing as testing, utils as utils, diff --git a/openllm-python/src/openllm_cli/extension/playground.py b/openllm-python/src/openllm_cli/extension/playground.py index 23d89249..40ed9831 100644 --- a/openllm-python/src/openllm_cli/extension/playground.py +++ b/openllm-python/src/openllm_cli/extension/playground.py @@ -13,8 +13,7 @@ import jupytext import nbformat import yaml -from openllm import playground -from openllm_cli import termui +from openllm_cli import playground, termui from openllm_core.utils import is_jupyter_available, is_jupytext_available, is_notebook_available if t.TYPE_CHECKING: diff --git a/openllm-python/src/openllm/playground/README.md b/openllm-python/src/openllm_cli/playground/README.md similarity index 100% rename from openllm-python/src/openllm/playground/README.md rename to openllm-python/src/openllm_cli/playground/README.md diff --git a/openllm-python/src/openllm/playground/__init__.py b/openllm-python/src/openllm_cli/playground/__init__.py similarity index 100% rename from openllm-python/src/openllm/playground/__init__.py rename to openllm-python/src/openllm_cli/playground/__init__.py diff --git a/openllm-python/src/openllm/playground/_meta.yml b/openllm-python/src/openllm_cli/playground/_meta.yml similarity index 100% rename from openllm-python/src/openllm/playground/_meta.yml rename to openllm-python/src/openllm_cli/playground/_meta.yml diff --git a/openllm-python/src/openllm/playground/falcon_tuned.py b/openllm-python/src/openllm_cli/playground/falcon_tuned.py similarity index 100% rename from openllm-python/src/openllm/playground/falcon_tuned.py rename to openllm-python/src/openllm_cli/playground/falcon_tuned.py diff --git a/openllm-python/src/openllm/playground/features.py b/openllm-python/src/openllm_cli/playground/features.py similarity index 100% rename from openllm-python/src/openllm/playground/features.py rename to openllm-python/src/openllm_cli/playground/features.py index 62a4a19a..6233a836 100644 --- a/openllm-python/src/openllm/playground/features.py +++ b/openllm-python/src/openllm_cli/playground/features.py @@ -1,9 +1,9 @@ from __future__ import annotations import argparse +import asyncio import logging import typing as t -import asyncio import openllm openllm.utils.configure_logging() diff --git a/openllm-python/src/openllm/playground/llama2_qlora.py b/openllm-python/src/openllm_cli/playground/llama2_qlora.py similarity index 100% rename from openllm-python/src/openllm/playground/llama2_qlora.py rename to openllm-python/src/openllm_cli/playground/llama2_qlora.py diff --git a/openllm-python/src/openllm/playground/opt_tuned.py b/openllm-python/src/openllm_cli/playground/opt_tuned.py similarity index 100% rename from openllm-python/src/openllm/playground/opt_tuned.py rename to openllm-python/src/openllm_cli/playground/opt_tuned.py diff --git a/pyproject.toml b/pyproject.toml index 0bd49179..1d569a71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -155,13 +155,6 @@ whitelist-regex = ["test_.*"] [tool.check-wheel-contents] toplevel = ["openllm"] -[tool.yapfignore] -ignore_patterns = [ - "openllm-python/src/openllm/playground", - "openllm-python/src/openllm/models/__init__.py", - "openllm-client/src/openllm_client/pb/**", -] - [tool.pytest.ini_options] addopts = ["-rfEX", "-pno:warnings", "--snapshot-warn-unused"] @@ -182,7 +175,6 @@ branch = true omit = [ "__pypackages__/*", "openllm-python/src/openllm/_version.py", - "openllm-python/src/openllm/playground/", "openllm-python/src/openllm/__init__.py", "openllm-python/src/openllm/__main__.py", "openllm-core/src/openllm_core/_typing_compat.py", @@ -214,7 +206,6 @@ exclude_lines = [ omit = [ "__pypackages__/*", "openllm-python/src/openllm/_version.py", - "openllm-python/src/openllm/playground/", "openllm-python/src/openllm/__init__.py", "openllm-python/src/openllm/__main__.py", "openllm-core/src/openllm_core/_typing_compat.py", diff --git a/ruff.toml b/ruff.toml index 88a9883b..00e5ceb7 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,10 +1,10 @@ extend-exclude = [ "tools", "examples", - "openllm-python/src/openllm/playground", "openllm-python/src/openllm/__init__.py", "openllm-python/src/openllm/_version.py", "openllm-python/src/openllm/models/__init__.py", + "openllm-python/src/openllm_cli/playground", "openllm-client/src/openllm_client/pb/**", ] extend-include = ["*.ipynb"]