[Bug] Fix outdated links in source code (#35314)
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
3
.github/mergify.yml
vendored
3
.github/mergify.yml
vendored
@@ -259,8 +259,7 @@ pull_request_rules:
|
|||||||
- files=benchmarks/run_structured_output_benchmark.sh
|
- files=benchmarks/run_structured_output_benchmark.sh
|
||||||
- files=docs/features/structured_outputs.md
|
- files=docs/features/structured_outputs.md
|
||||||
- files=examples/offline_inference/structured_outputs.py
|
- files=examples/offline_inference/structured_outputs.py
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs.py
|
- files=examples/online_serving/structured_outputs/structured_outputs.py
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs_with_reasoning.py
|
|
||||||
- files~=^tests/v1/structured_output/
|
- files~=^tests/v1/structured_output/
|
||||||
- files=tests/v1/entrypoints/llm/test_struct_output_generate.py
|
- files=tests/v1/entrypoints/llm/test_struct_output_generate.py
|
||||||
- files~=^vllm/v1/structured_output/
|
- files~=^vllm/v1/structured_output/
|
||||||
|
|||||||
@@ -656,7 +656,7 @@ vLLM has support for OpenTelemetry tracing:
|
|||||||
- Added by <https://github.com/vllm-project/vllm/pull/4687> and reinstated by <https://github.com/vllm-project/vllm/pull/20372>
|
- Added by <https://github.com/vllm-project/vllm/pull/4687> and reinstated by <https://github.com/vllm-project/vllm/pull/20372>
|
||||||
- Configured with `--oltp-traces-endpoint` and `--collect-detailed-traces`
|
- Configured with `--oltp-traces-endpoint` and `--collect-detailed-traces`
|
||||||
- [OpenTelemetry blog post](https://opentelemetry.io/blog/2024/llm-observability/)
|
- [OpenTelemetry blog post](https://opentelemetry.io/blog/2024/llm-observability/)
|
||||||
- [User-facing docs](../examples/online_serving/opentelemetry.md)
|
- [User-facing docs](../../examples/online_serving/opentelemetry/README.md)
|
||||||
- [Blog post](https://medium.com/@ronen.schaffer/follow-the-trail-supercharging-vllm-with-opentelemetry-distributed-tracing-aa655229b46f)
|
- [Blog post](https://medium.com/@ronen.schaffer/follow-the-trail-supercharging-vllm-with-opentelemetry-distributed-tracing-aa655229b46f)
|
||||||
- [IBM product docs](https://www.ibm.com/docs/en/instana-observability/current?topic=mgaa-monitoring-large-language-models-llms-vllm-public-preview)
|
- [IBM product docs](https://www.ibm.com/docs/en/instana-observability/current?topic=mgaa-monitoring-large-language-models-llms-vllm-public-preview)
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,10 @@ if __name__ == "__main__":
|
|||||||
"--json-trace",
|
"--json-trace",
|
||||||
type=str,
|
type=str,
|
||||||
required=True,
|
required=True,
|
||||||
help="json trace file output by examples/offline_inference/profiling.py",
|
help=(
|
||||||
|
"JSON trace file generated by scripts that use "
|
||||||
|
"vllm.profiler.layerwise_profile"
|
||||||
|
),
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--phase",
|
"--phase",
|
||||||
|
|||||||
@@ -564,8 +564,10 @@ if __name__ == "__main__":
|
|||||||
"--json-trace",
|
"--json-trace",
|
||||||
type=str,
|
type=str,
|
||||||
required=True,
|
required=True,
|
||||||
help="json trace file output by \
|
help=(
|
||||||
examples/offline_inference/profiling.py",
|
"JSON trace file generated by scripts that use "
|
||||||
|
"vllm.profiler.layerwise_profile"
|
||||||
|
),
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--output-directory", type=str, required=False, help="Directory to output plots"
|
"--output-directory", type=str, required=False, help="Directory to output plots"
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ class NomicBertModelConfig(VerifyAndUpdateConfig):
|
|||||||
"Nomic context extension is disabled. "
|
"Nomic context extension is disabled. "
|
||||||
"Changing max_model_len from %s to %s. "
|
"Changing max_model_len from %s to %s. "
|
||||||
"To enable context extension, see: "
|
"To enable context extension, see: "
|
||||||
"https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/context_extension.html",
|
"https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/context_extension.py",
|
||||||
max_model_len_before,
|
max_model_len_before,
|
||||||
model_config.max_model_len,
|
model_config.max_model_len,
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user