Files
OpenLLM/typings/vllm/sampling_params.pyi
aarnphm-ec2-dev 820b4991fa chore(stubs): add generated for auto-gptq and vllm [skip ci]
This is to help with working on CPU machine

Signed-off-by: aarnphm-ec2-dev <29749331+aarnphm@users.noreply.github.com>
2023-08-03 02:28:24 +00:00

21 lines
802 B
Python
Generated

from typing import List
from typing import Optional
from typing import Union
from _typeshed import Incomplete
class SamplingParams:
n: Incomplete
best_of: Incomplete
presence_penalty: Incomplete
frequency_penalty: Incomplete
temperature: Incomplete
top_p: Incomplete
top_k: Incomplete
use_beam_search: Incomplete
stop: Incomplete
ignore_eos: Incomplete
max_tokens: Incomplete
logprobs: Incomplete
def __init__(self, n: int = ..., best_of: Optional[int] = ..., presence_penalty: float = ..., frequency_penalty: float = ..., temperature: float = ..., top_p: float = ..., top_k: int = ..., use_beam_search: bool = ..., stop: Union[None, str, List[str]] = ..., ignore_eos: bool = ..., max_tokens: int = ..., logprobs: Optional[int] = ...) -> None: ...