[Misc] Terratorch related fixes (#24337)

Signed-off-by: Christian Pinto <christian.pinto@ibm.com>
Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk>
This commit is contained in:
Christian Pinto
2025-09-08 15:40:26 +02:00
committed by GitHub
parent e041314184
commit 9cd76b71ab
11 changed files with 18 additions and 37 deletions

View File

@@ -11,7 +11,7 @@ from vllm.entrypoints.openai.protocol import IOProcessorResponse
from vllm.plugins.io_processors import get_io_processor
from vllm.pooling_params import PoolingParams
MODEL_NAME = "mgazz/Prithvi-EO-2.0-300M-TL-Sen1Floods11"
MODEL_NAME = "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11"
image_url = "https://huggingface.co/christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM/resolve/main/valencia_example_2024-10-26.tiff" # noqa: E501
@@ -35,7 +35,7 @@ def server():
"--max-num-seqs",
"32",
"--io-processor-plugin",
"prithvi_to_tiff_valencia",
"prithvi_to_tiff",
"--model-impl",
"terratorch",
]
@@ -107,7 +107,7 @@ def test_prithvi_mae_plugin_offline(vllm_runner, model_name: str):
# to avoid the model going OOM in CI.
max_num_seqs=1,
model_impl="terratorch",
io_processor_plugin="prithvi_to_tiff_valencia",
io_processor_plugin="prithvi_to_tiff",
) as llm_runner:
pooler_output = llm_runner.get_llm().encode(
img_prompt,