Consolidate!!! Fixes

This commit is contained in:
rltakashige
2025-12-03 12:19:25 +00:00
committed by GitHub
parent 10c905c8dd
commit 2b243bd80e
17 changed files with 245 additions and 70 deletions

View File

@@ -26,12 +26,11 @@ dependencies = [
"greenlet>=3.2.4",
"huggingface-hub>=0.33.4",
"psutil>=7.0.0",
"transformers>=4.55.2",
"cobs>=1.2.2",
"loguru>=0.7.3",
"textual>=5.3.0",
"exo_pyo3_bindings", # rust bindings
"anyio>=4.11.0",
"anyio==4.11.0",
"bidict>=0.23.1",
"mlx>=0.29.3",
"mlx-lm>=0.28.3",

View File

@@ -96,6 +96,13 @@ def get_instance_placements_after_create(
instance_id = InstanceId()
target_instances = dict(deepcopy(current_instances))
if len(selected_cycle) == 1:
logger.warning(
"You have likely selected ibv for a single node instance; falling back to MlxRing"
)
command.instance_meta = InstanceMeta.MlxRing
# TODO: Single node instances
match command.instance_meta:
case InstanceMeta.MlxIbv:

View File

@@ -51,11 +51,8 @@ def get_smallest_cycles(cycles: list[list[NodeInfo]]) -> list[list[NodeInfo]]:
def get_shard_assignments_for_pipeline_parallel(
model_meta: ModelMetadata,
selected_cycle: list[NodeInfo],
selected_cycle: list[NodeWithProfile],
):
if not narrow_all_nodes(selected_cycle):
raise ValueError("All nodes must have profiles to create shard assignments")
cycle_memory = sum(
(node.node_profile.memory.ram_available for node in selected_cycle),
start=Memory(),
@@ -105,11 +102,8 @@ def get_shard_assignments_for_pipeline_parallel(
def get_shard_assignments_for_tensor_parallel(
model_meta: ModelMetadata,
selected_cycle: list[NodeInfo],
selected_cycle: list[NodeWithProfile],
):
if not narrow_all_nodes(selected_cycle):
raise ValueError("All nodes must have profiles to create shard assignments")
total_layers = model_meta.n_layers
world_size = len(selected_cycle)
runner_to_shard: dict[RunnerId, ShardMetadata] = {}
@@ -144,6 +138,8 @@ def get_shard_assignments(
selected_cycle: list[NodeInfo],
sharding: Sharding,
) -> ShardAssignments:
if not narrow_all_nodes(selected_cycle):
raise ValueError("All nodes must have profiles to create shard assignments")
match sharding:
case Sharding.Pipeline:
return get_shard_assignments_for_pipeline_parallel(
@@ -159,13 +155,21 @@ def get_shard_assignments(
def get_hosts_from_subgraph(cycle_digraph: Topology) -> list[Host]:
cycles = cycle_digraph.get_cycles()
expected_length = len(list(cycle_digraph.list_nodes()))
cycles = [cycle for cycle in cycles if len(cycle) == expected_length]
if not cycles:
if expected_length > 1:
logger.warning(
f"No cycles of length {expected_length} found even though chosen subgraph contained {expected_length} nodes"
)
return []
get_thunderbolt = False
if cycle_digraph.is_thunderbolt_cycle(cycles[0]):
get_thunderbolt = True
logger.info(f"Using thunderbolt cycle: {get_thunderbolt}")
cycle = cycles[0]
hosts: list[Host] = []
for i in range(len(cycle)):

View File

@@ -409,6 +409,7 @@ def test_tensor_rdma_backend_connectivity_matrix(
instance_meta=InstanceMeta.MlxIbv,
command_id=CommandId(),
model_meta=model_meta,
min_nodes=1,
)
placements = get_instance_placements_after_create(cic, topology, {})

View File

@@ -83,17 +83,27 @@ def apply(state: State, event: IndexedEvent) -> State:
def apply_node_download_progress(event: NodeDownloadProgress, state: State) -> State:
new_node_downloads: Sequence[DownloadProgress] = [
event.download_progress
if dp.shard_metadata == event.download_progress.shard_metadata
else dp
for dp in state.downloads.get(
event.download_progress.node_id, [event.download_progress]
)
]
"""
Update or add a node download progress to state.
"""
dp = event.download_progress
node_id = dp.node_id
current = list(state.downloads.get(node_id, ()))
replaced = False
for i, existing_dp in enumerate(current):
if existing_dp.shard_metadata == dp.shard_metadata:
current[i] = dp
replaced = True
break
if not replaced:
current.append(dp)
new_downloads: Mapping[NodeId, Sequence[DownloadProgress]] = {
**state.downloads,
event.download_progress.node_id: new_node_downloads,
node_id: current,
}
return state.model_copy(update={"downloads": new_downloads})

View File

@@ -169,7 +169,9 @@ class Election:
await anyio.sleep(0.2)
rest = connection_messages.collect()
logger.debug(f"Connection messages received: {first} followed by {rest}")
logger.debug(
f"Connection messages received: {first} followed by {rest}"
)
logger.debug(f"Current clock: {self.clock}")
# These messages are strictly peer to peer
self.clock += 1

View File

@@ -560,8 +560,9 @@ async def download_shard(
all_start_time = time.time()
# TODO: currently not recursive. Some models might require subdirectories - thus this will need to be changed.
# Update: <- This does not seem to be the case. Yay?
file_list = await fetch_file_list_with_cache(
str(shard.model_meta.model_id), revision, recursive=False
str(shard.model_meta.model_id), revision, recursive=True
)
filtered_file_list = list(
filter_repo_objects(

View File

@@ -94,7 +94,9 @@ def extract_layer_num(tensor_name: str) -> int | None:
def get_allow_patterns(weight_map: dict[str, str], shard: ShardMetadata) -> list[str]:
default_patterns = set(["*.json", "*.py", "tokenizer.model", "*.tiktoken", "*.txt"])
default_patterns = set(
["*.json", "*.py", "tokenizer.model", "*.tiktoken", "*.txt", "*.jinja"]
)
shard_specific_patterns: set[str] = set()
if weight_map:
for tensor_name, filename in weight_map.items():
@@ -104,14 +106,11 @@ def get_allow_patterns(weight_map: dict[str, str], shard: ShardMetadata) -> list
and shard.start_layer <= layer_num <= shard.end_layer
):
shard_specific_patterns.add(filename)
sorted_file_names = sorted(weight_map.values())
# TODO: if the model needs any "layer-independent" parameters,
# we might want to always add files that correspond to them
# e.g. lm_head
if shard.is_first_layer:
shard_specific_patterns.add(sorted_file_names[0])
elif shard.is_last_layer:
shard_specific_patterns.add(sorted_file_names[-1])
layer_independent_files = set(
[v for k, v in weight_map.items() if extract_layer_num(k) is None]
)
shard_specific_patterns.update(layer_independent_files)
logger.debug(f"get_allow_patterns {shard=} {layer_independent_files=}")
else:
shard_specific_patterns = set(["*.safetensors"])
logger.info(f"get_allow_patterns {shard=} {shard_specific_patterns=}")

View File

@@ -1,7 +1,7 @@
from abc import ABC, abstractmethod
from functools import partial
from inspect import signature
from typing import TYPE_CHECKING, Callable, Protocol, cast, override
from typing import TYPE_CHECKING, Callable, Protocol, cast
import mlx.core as mx
import mlx.nn as nn
@@ -66,7 +66,6 @@ class PipelineFirstLayer(CustomMlxLayer):
self.r: int = r
self.group = group
@override
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
if self.r != 0:
x = mx.distributed.recv_like(x, (self.r - 1), group=self.group)
@@ -87,7 +86,6 @@ class PipelineLastLayer(CustomMlxLayer):
self.group = group
self.original_layer_signature = signature(self.original_layer.__call__)
@override
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
cache = self.original_layer_signature.bind_partial(
x, *args, **kwargs
@@ -109,6 +107,31 @@ class PipelineLastLayer(CustomMlxLayer):
return output
def _inner_model(model: nn.Module) -> nn.Module:
inner = getattr(model, "model", None)
if isinstance(inner, nn.Module):
return inner
inner = getattr(model, "transformer", None)
if isinstance(inner, nn.Module):
return inner
raise ValueError("Model must either have a 'model' or 'transformer' attribute")
def _get_layers(inner_model_instance: nn.Module) -> list[_LayerCallable]:
# Handle both model.layers and model.h cases
layers: list[_LayerCallable]
if hasattr(inner_model_instance, "layers"):
layers = cast(list[_LayerCallable], inner_model_instance.layers)
elif hasattr(inner_model_instance, "h"):
layers = cast(list[_LayerCallable], inner_model_instance.h)
else:
raise ValueError("Model must have either a 'layers' or 'h' attribute")
return layers
def _set_layers(model: nn.Module, layers: list[_LayerCallable]) -> None:
inner_model_instance = _inner_model(model)
if hasattr(inner_model_instance, "layers"):
@@ -143,20 +166,17 @@ def pipeline_auto_parallel(
inner_model_instance: nn.Module = _inner_model(model)
# Handle both model.layers and model.h cases
layers: list[_LayerCallable]
if hasattr(inner_model_instance, "layers"):
layers = cast(list[_LayerCallable], inner_model_instance.layers)
elif hasattr(inner_model_instance, "h"):
layers = cast(list[_LayerCallable], inner_model_instance.h)
else:
raise ValueError("Model must have either a 'layers' or 'h' attribute")
layers: list[_LayerCallable] = _get_layers(inner_model_instance)
layers = layers[model_shard_meta.start_layer : model_shard_meta.end_layer]
layers[0] = PipelineFirstLayer(layers[0], model_shard_meta.device_rank, group=group)
start_layer, end_layer = model_shard_meta.start_layer, model_shard_meta.end_layer
device_rank, world_size = model_shard_meta.device_rank, model_shard_meta.world_size
layers = layers[start_layer:end_layer]
layers[0] = PipelineFirstLayer(layers[0], device_rank, group=group)
layers[-1] = PipelineLastLayer(
layers[-1],
model_shard_meta.device_rank,
model_shard_meta.world_size,
device_rank,
world_size,
group=group,
)
@@ -169,18 +189,6 @@ def pipeline_auto_parallel(
return model
def _inner_model(model: nn.Module) -> nn.Module:
inner = getattr(model, "model", None)
if isinstance(inner, nn.Module):
return inner
inner = getattr(model, "transformer", None)
if isinstance(inner, nn.Module):
return inner
raise ValueError("Model must either have a 'model' or 'transformer' attribute")
def tensor_auto_parallel(
model: nn.Module,
group: mx.distributed.Group,

View File

@@ -43,6 +43,8 @@ def warmup_inference(
tokenizer: TokenizerWrapper,
sampler: Callable[[mx.array], mx.array],
) -> int:
content = "Prompt to warm up the inference engine. Repeat this."
warmup_prompt = apply_chat_template(
tokenizer=tokenizer,
chat_task_data=ChatCompletionTaskParams(
@@ -50,7 +52,7 @@ def warmup_inference(
messages=[
ChatCompletionMessage(
role="user",
content="Prompt to warm up the inference engine. Repeat this.",
content=content,
)
],
),
@@ -126,3 +128,6 @@ def mlx_generate(
token=out.token,
finish_reason=cast(FinishReason | None, out.finish_reason),
)
if out.finish_reason is not None:
break

View File

@@ -202,8 +202,10 @@ class Worker:
await self.event_sender.send(
NodeDownloadProgress(download_progress=progress)
)
initial_progress = await self.shard_downloader.get_shard_download_status_for_shard(
shard
initial_progress = (
await self.shard_downloader.get_shard_download_status_for_shard(
shard
)
)
if initial_progress.status == "complete":
progress = DownloadCompleted(
@@ -232,13 +234,12 @@ class Worker:
await self.runners.pop(runner_id).start_task(task)
except TimeoutError:
await self.event_sender.send(
TaskStatusUpdated(task_id=task.task_id, task_status=TaskStatus.TimedOut)
TaskStatusUpdated(
task_id=task.task_id, task_status=TaskStatus.TimedOut
)
)
case task:
await self.runners[self._task_to_runner_id(task)].start_task(
task
)
await self.runners[self._task_to_runner_id(task)].start_task(task)
def shutdown(self):
if self._tg:

View File

@@ -135,7 +135,8 @@ def _load_model(
shard_assignments = instance.shard_assignments
all_downloads_complete_local = all(
any(
nid in global_download_status
and any(
isinstance(dp, DownloadCompleted)
and dp.shard_metadata == shard_assignments.runner_to_shard[rid]
for dp in global_download_status[nid]

View File

@@ -20,7 +20,10 @@ def entrypoint(
task_receiver: MpReceiver[Task],
_logger: "loguru.Logger",
) -> None:
if isinstance(bound_instance.instance, MlxIbvInstance) and len(bound_instance.instance.ibv_devices) >= 2:
if (
isinstance(bound_instance.instance, MlxIbvInstance)
and len(bound_instance.instance.ibv_devices) >= 2
):
os.environ["MLX_METAL_FAST_SYNCH"] = "1"
global logger

View File

@@ -118,6 +118,7 @@ class RunnerSupervisor:
self._tg.cancel_scope.cancel()
async def start_task(self, task: Task):
logger.info(f"Starting task {task}")
event = anyio.Event()
self.pending[task.task_id] = event
try:
@@ -126,6 +127,7 @@ class RunnerSupervisor:
logger.warning(f"Task {task} dropped, runner closed communication.")
return
await event.wait()
logger.info(f"Finished task {task}")
async def _forward_events(self):
with self._ev_recv as events:
@@ -149,11 +151,13 @@ class RunnerSupervisor:
self.runner_process.kill()
async def _check_runner(self, e: Exception) -> None:
logger.info("Checking runner's status")
if self.runner_process.is_alive():
logger.info("Runner was found to be alive, attempting to join process")
await to_thread.run_sync(self.runner_process.join, 1)
rc = self.runner_process.exitcode
logger.info(f"RunnerSupervisor exited with exit code {rc}")
if rc == 0:
#
return
if isinstance(rc, int) and rc < 0:

47
tmp/prompt.txt Normal file
View File

@@ -0,0 +1,47 @@
Summarise this Wikipedia article for me:
Transition from Republic to Empire
Augustus of Prima Porta
Rome had begun expanding shortly after the founding of the Roman Republic in the 6th century BC, though not outside the Italian Peninsula until the 3rd century BC. The Republic was not a nation-state in the modern sense, but a network of self-ruled towns (with varying degrees of independence from the Senate) and provinces administered by military commanders. It was governed by annually elected magistrates (Roman consuls above all) in conjunction with the Senate.[22] The 1st century BC was a time of political and military upheaval, which ultimately led to rule by emperors.[23][24][25] The consuls' military power rested in the Roman legal concept of imperium, meaning "command" (typically in a military sense).[26] Occasionally, successful consuls or generals were given the honorary title imperator (commander); this is the origin of the word emperor, since this title was always bestowed to the early emperors.[27][g]
Rome suffered a long series of internal conflicts, conspiracies, and civil wars from the late second century BC (see Crisis of the Roman Republic) while greatly extending its power beyond Italy. In 44 BC Julius Caesar was briefly perpetual dictator before being assassinated by a faction that opposed his concentration of power. This faction was driven from Rome and defeated at the Battle of Philippi in 42 BC by Mark Antony and Caesar's adopted son Octavian. Antony and Octavian divided the Roman world between them, but this did not last long. Octavian's forces defeated those of Mark Antony and Cleopatra at the Battle of Actium in 31 BC. In 27 BC the Senate gave him the title Augustus ("venerated") and made him princeps ("foremost") with proconsular imperium, thus beginning the Principate, the first epoch of Roman imperial history. Although the republic stood in name, Augustus had all meaningful authority.[29] During his 40-year rule, a new constitutional order emerged so that, upon his death, Tiberius would succeed him as the new de facto monarch.[30]
Pax Romana
Main article: Pax Romana
The so-called "Five Good Emperors" of 96180 AD
Nerva (r.9698)
Trajan (r.98117)
Hadrian (r.117138)
Antoninus Pius (r.138161)
Marcus Aurelius (r.161180)
The 200 years that began with Augustus's rule are traditionally regarded as the Pax Romana ("Roman Peace"). The cohesion of the empire was furthered by a degree of social stability and economic prosperity that Rome had never before experienced. Uprisings in the provinces were infrequent and put down "mercilessly and swiftly".[31] The success of Augustus in establishing principles of dynastic succession was limited by his outliving a number of talented potential heirs. The Julio-Claudian dynasty lasted for four more emperors—Tiberius, Caligula, Claudius, and Nero—before it yielded in 69 AD to the strife-torn Year of the Four Emperors, from which Vespasian emerged as the victor. Vespasian became the founder of the brief Flavian dynasty, followed by the NervaAntonine dynasty which produced the "Five Good Emperors": Nerva, Trajan, Hadrian, Antoninus Pius, and Marcus Aurelius.[32]
Among the so-called “Five Good Emperors,” Hadrian (r. 117138) is particularly noted for consolidating the empires frontiers and embarking on ambitious building projects throughout the provinces.[33] In Judaea, which had long been the center of Jewish national and religious life, his reign marked a decisive turning point. After earlier Jewish resistance to Roman rule, Hadrian visited the region in 129/130 CE and refounded Jerusalem as the Roman colony Aelia Capitolina, naming it after his family (Aelius) and the Capitoline Triad.[34] The refoundation overlaid the destroyed Jewish city with a new Roman urban plan, and included the construction of a Temple to Jupiter on the site of the former Jewish Temple.[35] Later tradition and archaeological evidence also indicate a Temple of Venus near the site of the Holy Sepulchre.[36]
Hadrians measures, combined with restrictions on Jewish practices, helped spark the Bar Kokhba Revolt (132135 CE). After crushing the uprising, Roman forces expelled most Jews from Jerusalem, barring their entry except on certain days, and rebuilt the city as a statement of imperial power and domination.[33] Most scholars consider Hadrianic Aelia to have been unwalled, with free-standing gate complexes (such as the northern gate beneath todays Damascus Gate) rather than a continuous defensive circuit.[37]
Transition from classical to late antiquity
Main articles: Later Roman Empire and Fall of the Western Roman Empire
See also: Barbarian kingdoms and Byzantine Empire
The Barbarian invasions consisted of the movement of (mainly) ancient Germanic peoples into Roman territory. Historically, this event marked the transition between classical antiquity and the Middle Ages.
In the view of contemporary Greek historian Cassius Dio, the accession of Commodus in 180 marked the descent "from a kingdom of gold to one of rust and iron",[38] a comment which has led some historians, notably Edward Gibbon, to take Commodus' reign as the beginning of the Empire's decline.[39][40]
In 212, during the reign of Caracalla, Roman citizenship was granted to all freeborn inhabitants of the empire. The Severan dynasty was tumultuous; an emperor's reign was ended routinely by his murder or execution and, following its collapse, the Empire was engulfed by the Crisis of the Third Century, a period of invasions, civil strife, economic disorder, and plague.[41] In defining historical epochs, this crisis sometimes marks the transition from Classical to Late Antiquity. Aurelian (r.270275) stabilised the empire militarily and Diocletian reorganised and restored much of it in 285.[42] Diocletian's reign brought the empire's most concerted effort against the perceived threat of Christianity, the "Great Persecution".[43]
Diocletian divided the empire into four regions, each ruled by a separate tetrarch.[44] Confident that he fixed the disorder plaguing Rome, he abdicated along with his co-emperor, but the Tetrarchy collapsed shortly after. Order was eventually restored by Constantine the Great, who became the first emperor to convert to Christianity, and who established Constantinople as the new capital of the Eastern Empire. During the decades of the Constantinian and Valentinian dynasties, the empire was divided along an eastwest axis, with dual power centres in Constantinople and Rome. Julian, who under the influence of his adviser Mardonius attempted to restore Classical Roman and Hellenistic religion, only briefly interrupted the succession of Christian emperors. Theodosius I, the last emperor to rule over both East and West, died in 395 after making Christianity the state religion.[45]
The Roman Empire by 476, noting western and eastern divisions
The administrative divisions of the Roman Empire in 395 AD
Fall in the West and survival in the East
The Western Roman Empire began to disintegrate in the early 5th century. The Romans fought off all invaders, most famously Attila,[46] but the empire had assimilated so many Germanic peoples of dubious loyalty to Rome that the empire started to dismember itself.[47] Most chronologies place the end of the Western Roman Empire in 476, when Romulus Augustulus was forced to abdicate to the Germanic warlord Odoacer.[48][49][50]
Odoacer ended the Western Empire by declaring Zeno sole emperor and placing himself as Zeno's nominal subordinate. In reality, Italy was ruled by Odoacer alone.[48][49][51] The Eastern Roman Empire, called the Byzantine Empire by later historians, continued until the reign of Constantine XI Palaiologos, the last Roman emperor. He died in battle in 1453 against Mehmed II and his Ottoman forces during the siege of Constantinople. Mehmed II adopted the title of caesar in an attempt to claim a connection to the former Empire.[52][53] His claim was soon recognized by the Patriarchate of Constantinople, but not by European monarchs.

85
tmp/run_llm.py Normal file
View File

@@ -0,0 +1,85 @@
#!/usr/bin/env python3
import argparse
import json
import sys
import requests
def stream_chat(host: str, query: str) -> None:
url = f"http://{host}:8000/v1/chat/completions"
headers = {"Content-Type": "application/json"}
payload = {
"model": "mlx-community/Llama-3.2-1B-Instruct-4bit",
# "model": "mlx-community/Llama-3_3-Nemotron-Super-49B-v1_5-mlx-4Bit",
"stream": True,
"messages": [{"role": "user", "content": query}],
}
try:
with requests.post(url, headers=headers, json=payload, stream=True) as resp:
resp.raise_for_status()
for line in resp.iter_lines(decode_unicode=True):
if not line:
continue
# SSE lines look like: "data: {...}" or "data: [DONE]"
if not line.startswith("data:"):
continue
data = line[len("data:"):].strip()
if data == "[DONE]":
break
try:
obj = json.loads(data)
except json.JSONDecodeError:
continue
for choice in obj.get("choices", []):
delta = choice.get("delta") or {}
content = delta.get("content")
if content:
print(content, end="", flush=True)
except requests.RequestException as e:
print(f"Request failed: {e}", file=sys.stderr)
sys.exit(1)
print()
def main() -> None:
parser = argparse.ArgumentParser(
description="Stream chat completions from a local server."
)
parser.add_argument("host", help="Hostname (without protocol), e.g. localhost")
parser.add_argument(
"-f", "--file",
help="Path to a text file whose contents will be used as the query",
)
parser.add_argument(
"query",
nargs="*",
help="Query text (if not using -f/--file). All remaining arguments are joined with spaces.",
)
args = parser.parse_args()
if args.file:
try:
with open(args.file, "r", encoding="utf-8") as f:
query = f.read().strip()
except OSError as e:
print(f"Error reading file {args.file}: {e}", file=sys.stderr)
sys.exit(1)
elif args.query:
query = " ".join(args.query)
else:
parser.error("You must provide either a query or a file (-f/--file).")
stream_chat(args.host, query)
if __name__ == "__main__":
main()

4
uv.lock generated
View File

@@ -352,7 +352,6 @@ dependencies = [
{ name = "sqlmodel", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "textual", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "transformers", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "typeguard", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "types-aiofiles", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
{ name = "uvicorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
@@ -371,7 +370,7 @@ requires-dist = [
{ name = "aiofiles", specifier = ">=24.1.0" },
{ name = "aiohttp", specifier = ">=3.12.14" },
{ name = "aiosqlite", specifier = ">=0.21.0" },
{ name = "anyio", specifier = ">=4.11.0" },
{ name = "anyio", specifier = "==4.11.0" },
{ name = "base58", specifier = ">=2.1.1" },
{ name = "bidict", specifier = ">=0.23.1" },
{ name = "cobs", specifier = ">=1.2.2" },
@@ -395,7 +394,6 @@ requires-dist = [
{ name = "sqlmodel", specifier = ">=0.0.24" },
{ name = "textual", specifier = ">=5.3.0" },
{ name = "tiktoken", specifier = ">=0.12.0" },
{ name = "transformers", specifier = ">=4.55.2" },
{ name = "typeguard", specifier = ">=4.4.4" },
{ name = "types-aiofiles", specifier = ">=24.1.0.20250708" },
{ name = "uvicorn", specifier = ">=0.35.0" },