[Bugfix] Fix Incremental Detokenization with tokenizers == 0.22.0 (#24159)
Signed-off-by: Fanli Lin <fanli.lin@intel.com> Signed-off-by: Fanli Lin <fanli0116@gmail.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -234,7 +234,7 @@ class FastIncrementalDetokenizer(BaseIncrementalDetokenizer):
|
||||
try:
|
||||
token = self.stream.step(self.tokenizer, next_token_id)
|
||||
except Exception as e:
|
||||
if str(e) != INVALID_PREFIX_ERR_MSG:
|
||||
if not str(e).startswith(INVALID_PREFIX_ERR_MSG):
|
||||
raise e
|
||||
# Recover from edge case where tokenizer can produce non-monotonic,
|
||||
# invalid UTF-8 output, which breaks the internal state of
|
||||
@@ -243,7 +243,8 @@ class FastIncrementalDetokenizer(BaseIncrementalDetokenizer):
|
||||
logger.warning(
|
||||
"Encountered invalid prefix detokenization error"
|
||||
" for request %s, resetting decode stream.", self.request_id)
|
||||
self.stream = DecodeStream(self.skip_special_tokens)
|
||||
self.stream = DecodeStream(
|
||||
skip_special_tokens=self.skip_special_tokens)
|
||||
token = self.stream.step(self.tokenizer, next_token_id)
|
||||
return token
|
||||
|
||||
|
||||
Reference in New Issue
Block a user