[Bugfix] Fix the issue where llm.generate cannot be called repeatedly after setting GuidedDecodingParams (#16767)

Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
Signed-off-by: Russell Bryant <rbryant@redhat.com>
Co-authored-by: Russell Bryant <rbryant@redhat.com>
This commit is contained in:
Chauncey
2025-04-22 14:02:20 +08:00
committed by GitHub
parent a114bf20a3
commit acba33a0f1
3 changed files with 32 additions and 4 deletions

View File

@@ -79,6 +79,17 @@ class GuidedDecodingParams:
return []
return self.backend.split(":")[1].split(",")
def add_option(self, opt_name: str) -> None:
"""Adds an option to the backend options."""
if not self.backend:
self.backend = f":{opt_name}"
elif ":" not in self.backend:
self.backend += f":{opt_name}"
else:
options = set(self.backend_options())
options.add(opt_name)
self.backend = f"{self.backend_name}:{','.join(sorted(options))}"
def no_fallback(self) -> bool:
"""Returns True if the "no-fallback" option is supplied for the guided
decoding backend"""