This repository was archived by the owner on Mar 11, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathlatency_throughput_curve.sh
More file actions
executable file
·79 lines (66 loc) · 2.71 KB
/
latency_throughput_curve.sh
File metadata and controls
executable file
·79 lines (66 loc) · 2.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/bin/bash
# Copyright 2024 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o xtrace
export IP=$IP
huggingface-cli login --token "$HF_TOKEN" --add-to-git-credential
if [[ "$PROMPT_DATASET" = "sharegpt" ]]; then
PROMPT_DATASET_FILE="ShareGPT_V3_unfiltered_cleaned_split.json"
fi
PYTHON="python3"
BASE_PYTHON_OPTS=(
"benchmark_serving.py"
"--save-json-results"
"--host=$IP"
"--port=$PORT"
"--dataset=$PROMPT_DATASET_FILE"
"--tokenizer=$TOKENIZER"
"--backend=$BACKEND"
"--max-input-length=$INPUT_LENGTH"
"--max-output-length=$OUTPUT_LENGTH"
"--file-prefix=$FILE_PREFIX"
"--models=$MODELS"
"--pm-namespace=$PM_NAMESPACE"
"--pm-job=$PM_JOB"
)
[[ "$TRAFFIC_SPLIT" ]] && BASE_PYTHON_OPTS+=("--traffic-split=$TRAFFIC_SPLIT")
[[ "$OUTPUT_BUCKET" ]] && BASE_PYTHON_OPTS+=("--output-bucket=$OUTPUT_BUCKET")
[[ "$SCRAPE_SERVER_METRICS" = "true" ]] && BASE_PYTHON_OPTS+=("--scrape-server-metrics")
[[ "$SAVE_AGGREGATED_RESULT" = "true" ]] && BASE_PYTHON_OPTS+=("--save-aggregated-result")
[[ "$STREAM_REQUEST" = "true" ]] && BASE_PYTHON_OPTS+=("--stream-request")
[[ "$IGNORE_EOS" = "true" ]] && BASE_PYTHON_OPTS+=("--ignore-eos")
[[ "$OUTPUT_BUCKET_FILEPATH" ]] && BASE_PYTHON_OPTS+=("--output-bucket-filepath" "$OUTPUT_BUCKET_FILEPATH")
[[ "$TCP_CONN_LIMIT" ]] && BASE_PYTHON_OPTS+=("--tcp-conn-limit" "$TCP_CONN_LIMIT")
SLEEP_TIME=${SLEEP_TIME:-0}
POST_BENCHMARK_SLEEP_TIME=${POST_BENCHMARK_SLEEP_TIME:-infinity}
for request_rate in $(echo $REQUEST_RATES | tr ',' ' '); do
echo "Benchmarking request rate: ${request_rate}"
# TODO: Check if profile already exists, if so then skip
timestamp=$(date +"%Y-%m-%d_%H-%M-%S")
output_file="latency-profile-${timestamp}.txt"
if [ "$request_rate" == "0" ]; then
request_rate="inf"
num_prompts=$MAX_NUM_PROMPTS
else
num_prompts=$(awk "BEGIN {print int($request_rate * $BENCHMARK_TIME_SECONDS)}")
fi
echo "TOTAL prompts: $num_prompts"
PYTHON_OPTS=("${BASE_PYTHON_OPTS[@]}" "--request-rate=$request_rate" "--num-prompts=$num_prompts")
$PYTHON "${PYTHON_OPTS[@]}" > "$output_file"
cat "$output_file"
echo "Sleeping for $SLEEP_TIME seconds..."
sleep $SLEEP_TIME
done
export LPG_FINISHED="true"
sleep $POST_BENCHMARK_SLEEP_TIME