[BUG] Allows for RunAI Streamer and Torch.compile cache to be used together (#24922)

Signed-off-by: ahao-anyscale <ahao@anyscale.com>
This commit is contained in:
ahao-anyscale
2025-09-23 17:13:32 -07:00
committed by GitHub
parent 88d7bdbd23
commit c8bde93367
3 changed files with 119 additions and 4 deletions

View File

@@ -699,11 +699,12 @@ class ModelConfig:
model: Model name or path
tokenizer: Tokenizer name or path
"""
if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
return
if is_runai_obj_uri(model):
object_storage_model = ObjectStorageModel()
object_storage_model = ObjectStorageModel(url=model)
object_storage_model.pull_files(
model, allow_pattern=["*.model", "*.py", "*.json"])
self.model_weights = model
@@ -722,7 +723,7 @@ class ModelConfig:
# Only download tokenizer if needed and not already handled
if is_runai_obj_uri(tokenizer):
object_storage_tokenizer = ObjectStorageModel()
object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
object_storage_tokenizer.pull_files(model,
ignore_pattern=[
"*.pt", "*.safetensors",