From 31a799ff6133d2cf7e5400c27f21e025b9166e7c Mon Sep 17 00:00:00 2001 From: Aaron Pham <29749331+aarnphm@users.noreply.github.com> Date: Tue, 14 Nov 2023 01:39:58 -0500 Subject: [PATCH] refactor: use DEBUG env-var instead of OPENLLMDEVDEBUG (#647) Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> --- DEVELOPMENT.md | 2 +- openllm-core/src/openllm_core/utils/__init__.py | 4 ++-- openllm-python/src/openllm/bundle/oci/__init__.py | 2 +- openllm-python/src/openllm/entrypoints/cohere.py | 4 ++-- openllm-python/src/openllm/playground/falcon_tuned.py | 1 - openllm-python/src/openllm/playground/llama2_qlora.py | 1 - openllm-python/src/openllm/playground/opt_tuned.py | 1 - 7 files changed, 6 insertions(+), 9 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 006cc33a..3cba191d 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -29,7 +29,7 @@ Before you can start developing, you'll need to set up your environment: > Therefore any tools that understand `.python-version` will use the correct Python version. > [!NOTE] -> When in doubt, set `OPENLLMDEVDEBUG=5` to see all generation debug logs and outputs +> When in doubt, set `DEBUG=5` to see all generation debug logs and outputs 1. Ensure you have [Git](https://git-scm.com/), and [Python3.8+](https://www.python.org/downloads/) installed. diff --git a/openllm-core/src/openllm_core/utils/__init__.py b/openllm-core/src/openllm_core/utils/__init__.py index 734a1496..a3eda552 100644 --- a/openllm-core/src/openllm_core/utils/__init__.py +++ b/openllm-core/src/openllm_core/utils/__init__.py @@ -41,7 +41,7 @@ else: # _GenericAlias is the actual GenericAlias implementation _WithArgsTypes: t.Any = (t._GenericAlias, types.GenericAlias, types.UnionType) # type: ignore -DEV_DEBUG_VAR = 'OPENLLMDEVDEBUG' +DEV_DEBUG_VAR = 'DEBUG' def resolve_user_filepath(filepath: str, ctx: str | None) -> str: @@ -157,7 +157,7 @@ def field_env_key(key: str, suffix: str | None = None) -> str: return '_'.join(filter(None, map(str.upper, ['OPENLLM', suffix.strip('_') if suffix else '', key]))) -# Special debug flag controled via OPENLLMDEVDEBUG +# Special debug flag controled via DEBUG DEBUG: bool = sys.flags.dev_mode or (not sys.flags.ignore_environment and check_bool_env(DEV_DEBUG_VAR, default=False)) # Whether to show the codenge for debug purposes SHOW_CODEGEN: bool = DEBUG and ( diff --git a/openllm-python/src/openllm/bundle/oci/__init__.py b/openllm-python/src/openllm/bundle/oci/__init__.py index fdb348d5..1755ecba 100644 --- a/openllm-python/src/openllm/bundle/oci/__init__.py +++ b/openllm-python/src/openllm/bundle/oci/__init__.py @@ -213,7 +213,7 @@ def build_container( tags['image_sha'] = outputs.decode('utf-8').strip() except Exception as err: raise openllm.exceptions.OpenLLMException( - f'Failed to containerize base container images (Scroll up to see error above, or set OPENLLMDEVDEBUG=True for more traceback):\n{err}' + f'Failed to containerize base container images (Scroll up to see error above, or set DEBUG=5 for more traceback):\n{err}' ) from err return tags diff --git a/openllm-python/src/openllm/entrypoints/cohere.py b/openllm-python/src/openllm/entrypoints/cohere.py index ebd5bb1f..e79ffc77 100644 --- a/openllm-python/src/openllm/entrypoints/cohere.py +++ b/openllm-python/src/openllm/entrypoints/cohere.py @@ -147,8 +147,8 @@ async def cohere_generate(req: Request, llm: openllm.LLM[M, T]) -> Response: return StreamingResponse(generate_stream_generator(), media_type='text/event-stream') # None-streaming case final_result: GenerationOutput | None = None - texts: list[list[str]] = [[]] * config['num_generations'] - token_ids: list[list[int]] = [[]] * config['num_generations'] + texts: list[list[str]] = [[]] * config['n'] + token_ids: list[list[int]] = [[]] * config['n'] async for res in result_generator: if await req.is_disconnected(): return error_response(HTTPStatus.BAD_REQUEST, 'Client disconnected.') diff --git a/openllm-python/src/openllm/playground/falcon_tuned.py b/openllm-python/src/openllm/playground/falcon_tuned.py index e36680f4..ff603267 100644 --- a/openllm-python/src/openllm/playground/falcon_tuned.py +++ b/openllm-python/src/openllm/playground/falcon_tuned.py @@ -8,7 +8,6 @@ import typing as t import torch import transformers -# import openllm here for OPENLLMDEVDEBUG import openllm # Make sure to have at least one GPU to run this script diff --git a/openllm-python/src/openllm/playground/llama2_qlora.py b/openllm-python/src/openllm/playground/llama2_qlora.py index 31c2ee1c..6c33e759 100644 --- a/openllm-python/src/openllm/playground/llama2_qlora.py +++ b/openllm-python/src/openllm/playground/llama2_qlora.py @@ -8,7 +8,6 @@ import typing as t import torch import transformers -# import openllm here for OPENLLMDEVDEBUG import openllm if t.TYPE_CHECKING: diff --git a/openllm-python/src/openllm/playground/opt_tuned.py b/openllm-python/src/openllm/playground/opt_tuned.py index ded2b08f..dbeb8ffb 100644 --- a/openllm-python/src/openllm/playground/opt_tuned.py +++ b/openllm-python/src/openllm/playground/opt_tuned.py @@ -7,7 +7,6 @@ import typing as t import transformers -# import openllm here for OPENLLMDEVDEBUG import openllm # Make sure to have at least one GPU to run this script