[Docs] Improve malformed exception caused by backslash line continuations (#31694)
Signed-off-by: maang <maang_h@163.com> Signed-off-by: maang <55082429+maang-h@users.noreply.github.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com>
This commit is contained in:
@@ -839,9 +839,9 @@ class CompilationConfig:
|
||||
"""
|
||||
if self.mode is None:
|
||||
raise ValueError(
|
||||
"No compilation mode is set. This method should only be \
|
||||
called via vllm config where the level is set if none is \
|
||||
provided."
|
||||
"No compilation mode is set. This method should only be "
|
||||
"called via vllm config where the level is set if none is "
|
||||
"provided."
|
||||
)
|
||||
if self.mode == CompilationMode.NONE:
|
||||
raise ValueError("No compilation mode is set.")
|
||||
|
||||
@@ -231,8 +231,7 @@ def rocm_aiter_fused_experts(
|
||||
# w8a8 block-scaled
|
||||
if quant_config.block_shape is not None and quant_config.use_fp8_w8a8:
|
||||
assert not apply_router_weight_on_input, (
|
||||
"apply_router_weight_on_input is\
|
||||
not supported for block scaled moe"
|
||||
"apply_router_weight_on_input is not supported for block scaled moe"
|
||||
)
|
||||
assert quant_config.w1_scale is not None
|
||||
assert quant_config.w2_scale is not None
|
||||
|
||||
@@ -681,8 +681,8 @@ def safetensors_weights_iterator(
|
||||
# instead we reconstruct the subclasses here before returning
|
||||
if not torchao_version_at_least("0.15.0"):
|
||||
raise ValueError(
|
||||
"Please use torchao version >= 0.15.0 \
|
||||
to load torchao safetensors checkpoint"
|
||||
"Please use torchao version >= 0.15.0 "
|
||||
"to load torchao safetensors checkpoint"
|
||||
)
|
||||
from torchao.prototype.safetensors.safetensors_support import (
|
||||
unflatten_tensor_state_dict,
|
||||
|
||||
@@ -201,8 +201,8 @@ class LlamaAttention(nn.Module):
|
||||
# This is a target model, use layer_idx directly
|
||||
effective_layer_idx = layer_idx
|
||||
assert effective_layer_idx < len(layer_types), (
|
||||
f"effective_layer_idx: {effective_layer_idx} \
|
||||
is out of bounds for layer_types: {layer_types}"
|
||||
f"effective_layer_idx: {effective_layer_idx} "
|
||||
f"is out of bounds for layer_types: {layer_types}"
|
||||
)
|
||||
|
||||
is_sliding = layer_types[effective_layer_idx] == "sliding_attention"
|
||||
|
||||
@@ -428,14 +428,13 @@ class Phi4MMImageEncoder(nn.Module):
|
||||
output_imgs.append(torch.cat([sub_img, self.glb_GN, glb_img], dim=1))
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f'hd_transform_order = {self.hd_transform_order}, "\
|
||||
"not implemented'
|
||||
f"hd_transform_order = {self.hd_transform_order}, not implemented"
|
||||
)
|
||||
|
||||
# temp_len = int((h*w+1)*144 + 1 + (h+1)*12)
|
||||
assert temp_len == output_imgs[-1].shape[1], (
|
||||
f'temp_len: {temp_len}, output_imgs[-1].shape[1]: "\
|
||||
"{output_imgs[-1].shape[1]}'
|
||||
f"temp_len: {temp_len}, output_imgs[-1].shape[1]: "
|
||||
f"{output_imgs[-1].shape[1]}"
|
||||
)
|
||||
|
||||
output_len.append(temp_len)
|
||||
|
||||
@@ -367,8 +367,8 @@ class MultiModalRegistry:
|
||||
# than whisper.
|
||||
return 0
|
||||
assert len(max_tokens) == 1, (
|
||||
"Encoder-decoder models are expected \
|
||||
to implement the multimodal interface with at most one modality."
|
||||
"Encoder-decoder models are expected "
|
||||
"to implement the multimodal interface with at most one modality."
|
||||
)
|
||||
|
||||
first_modality = next(iter(max_tokens))
|
||||
|
||||
@@ -399,8 +399,7 @@ class FlexibleArgumentParser(ArgumentParser):
|
||||
index = args.index("--config")
|
||||
if index == len(args) - 1:
|
||||
raise ValueError(
|
||||
"No config file specified! \
|
||||
Please check your command-line arguments."
|
||||
"No config file specified! Please check your command-line arguments."
|
||||
)
|
||||
|
||||
file_path = args[index + 1]
|
||||
|
||||
Reference in New Issue
Block a user