xref: /aosp_15_r20/external/ublksrv/tests/common/fio_common (revision 94c4a1e103eb1715230460aab379dff275992c20)
1#!/bin/bash
2# SPDX-License-Identifier: MIT or GPL-2.0-only
3
4declare -A TEST_RUN
5
6declare -A FIO_TERSE_FIELDS
7FIO_TERSE_FIELDS=(
8	# Read status
9	["read io"]=6
10	["read bandwidth"]=7
11	["read iops"]=8
12	["read runtime"]=9
13	["read slat min"]=10
14	["read slat max"]=11
15	["read slat mean"]=12
16	["read slat stdev"]=13
17	["read clat min"]=14
18	["read clat max"]=15
19	["read clat mean"]=16
20	["read clat stdev"]=17
21	# read clat percentiles are 18-37
22	["read lat min"]=38
23	["read lat max"]=39
24	["read lat mean"]=40
25	["read lat stdev"]=41
26	["read bandwidth min"]=42
27	["read bandwidth max"]=43
28	["read bandwidth %"]=44
29	["read bandwidth mean"]=45
30	["read bandwidth stdev"]=46
31
32	# Write status
33	["write io"]=47
34	["write bandwidth"]=48
35	["write iops"]=49
36	["write runtime"]=50
37	["write slat min"]=51
38	["write slat max"]=52
39	["write slat mean"]=53
40	["write slat stdev"]=54
41	["write clat min"]=55
42	["write clat max"]=56
43	["write clat mean"]=57
44	["write clat stdev"]=58
45	# write clat percentiles are 59-78
46	["write lat min"]=79
47	["write lat max"]=80
48	["write lat mean"]=81
49	["write lat stdev"]=82
50	["write bandwidth min"]=83
51	["write bandwidth max"]=84
52	["write bandwidth %"]=85
53	["write bandwidth mean"]=86
54	["write bandwidth stdev"]=87
55
56	# Trim status
57	["trim io"]=88
58	["trim bandwidth"]=89
59	["trim iops"]=90
60	["trim runtime"]=91
61	["trim slat min"]=92
62	["trim slat max"]=93
63	["trim slat mean"]=94
64	["trim slat stdev"]=95
65	["trim clat min"]=96
66	["trim clat max"]=97
67	["trim clat mean"]=98
68	["trim clat stdev"]=99
69	# trim clat percentiles are 100-119
70	["trim lat min"]=120
71	["trim lat max"]=121
72	["trim lat mean"]=122
73	["trim lat stdev"]=123
74	["trim bandwidth min"]=124
75	["trim bandwidth max"]=125
76	["trim bandwidth %"]=126
77	["trim bandwidth mean"]=127
78	["trim bandwidth stdev"]=128
79
80	# CPU usage
81	["user cpu"]=129
82	["system cpu"]=130
83	["context switches"]=131
84	["major page faults"]=132
85	["minor page faults"]=133
86
87	# IO depth distribution
88	["io depth <=1"]=134
89	["io depth 2"]=135
90	["io depth 4"]=136
91	["io depth 8"]=137
92	["io depth 16"]=138
93	["io depth 32"]=139
94	["io depth >=64"]=140
95
96	# IO latency distribution
97	["io latency <=2 us"]=141
98	["io latency 4 us"]=142
99	["io latency 10 us"]=143
100	["io latency 20 us"]=144
101	["io latency 50 us"]=145
102	["io latency 100 us"]=146
103	["io latency 250 us"]=147
104	["io latency 500 us"]=148
105	["io latency 750 us"]=149
106	["io latency 1000 us"]=150
107	["io latency <=2 ms"]=151
108	["io latency 4 ms"]=152
109	["io latency 10 ms"]=153
110	["io latency 20 ms"]=154
111	["io latency 50 ms"]=155
112	["io latency 100 ms"]=156
113	["io latency 250 ms"]=157
114	["io latency 500 ms"]=158
115	["io latency 750 ms"]=159
116	["io latency 1000 ms"]=160
117	["io latency 2000 ms"]=161
118	["io latency >=2000 ms"]=162
119
120	# Disk utilization (11 fields per disk)
121)
122
123FIO_OUTPUT="$TEST_DIR/.fio_perf"
124
125_fio_perf_report() {
126	# If there is more than one group, we don't know what to report.
127	if [[ $(wc -l < "$FIO_OUTPUT") -gt 1 ]]; then
128		echo "_fio_perf: too many terse lines" >&2
129		return
130	fi
131
132	local name field value
133	for name in "${FIO_PERF_FIELDS[@]}"; do
134		field="${FIO_TERSE_FIELDS["$name"]}"
135		if [[ -z $field ]]; then
136			echo "_fio_perf: unknown fio terse field '$name'" >&2
137			continue
138		fi
139		value="$(cut -d ';' -f "$field" "$FIO_OUTPUT")"
140		TEST_RUN["$FIO_PERF_PREFIX$name"]="$value"
141	done
142}
143
144__run_fio_libaio() {
145	DEVS=$1
146	BS=$2
147	RW=$3
148	JOBS=$4
149	RTIME=$5
150
151	QD=128
152	BATCH=16
153	FIO=fio
154
155	$FIO --output=$FIO_OUTPUT --output-format=terse --terse-version=4 --group_reporting=1 \
156		--bs=$BS --ioengine=libaio \
157        --iodepth=$QD \
158        --iodepth_batch_submit=$BATCH \
159        --iodepth_batch_complete_min=$BATCH \
160        --filename=$DEVS --gtod_reduce=1 \
161        --direct=1 --runtime=$RTIME --numjobs=$JOBS --rw=$RW \
162        --name=test > /dev/null 2>&1
163}
164
165__ublk_loop_backing_file() {
166	eval $UBLK list > ${UBLK_TMP}
167	file=`cat ${UBLK_TMP} | grep "loop" | awk '{print $2}' | awk -F "," '{print $1}' | awk -F ":" '{print $2}'`
168	echo $file | xargs
169}
170
171__ublk_dev_id() {
172	local dev=$1
173	dev_id=`echo "$dev" | awk '{print substr($1, 11)}'`
174	echo "$dev_id"
175}
176
177__ublk_get_pid() {
178	local dev=$1
179	local dev_id=`__ublk_dev_id $dev`
180
181	eval $UBLK list -n $dev_id > ${UBLK_TMP}
182	pid=`cat ${UBLK_TMP} | grep "pid" | awk '{print $7}'`
183	echo $pid
184}
185
186__ublk_get_queue_tid() {
187	local dev=$1
188	local qid=$2
189	local dev_id=`__ublk_dev_id $dev`
190
191	eval $UBLK list -n ${dev_id} > ${UBLK_TMP}
192	q_tid=`cat ${UBLK_TMP} | grep "queue ${qid}" | awk '{print $4}'`
193	echo $q_tid
194}
195
196__ublk_get_dev_state() {
197	local dev=$1
198	local dev_id=`__ublk_dev_id $dev`
199
200	eval $UBLK list -n ${dev_id} > ${UBLK_TMP}
201	state=`cat ${UBLK_TMP} | grep "state" | awk '{print $11}'`
202	echo $state
203}
204
205__run_fio_perf() {
206	__run_fio_libaio $@
207	_fio_perf_report
208}
209
210__remove_ublk_dev_return() {
211	local dev="$1"
212	if [ "$dev" == "*" ]; then
213		eval $UBLK del -a
214	else
215		dev_id=`__ublk_dev_id $dev`
216		eval $UBLK del -n "$dev_id"
217	fi
218	RES=$?
219	udevadm settle
220	echo $RES
221}
222
223__remove_ublk_dev() {
224	__remove_ublk_dev_return $@ > /dev/null 2>&1
225}
226
227__find_free_ublk_id()
228{
229	for id in `seq 0 64`; do
230		[ -c /dev/ublkc${id} ] && continue
231		echo $id
232		break
233	done
234	[ $id == "64" ] && echo "-"
235}
236
237__create_ublk_dev()
238{
239	id=`__find_free_ublk_id`
240	[ ${id} == "-" ] && echo "no free ublk device nodes" && exit -1
241	eval $UBLK add ${T_TYPE_PARAMS} -n $id > /dev/null 2>&1
242	udevadm settle
243	echo "/dev/ublkb${id}"
244}
245
246__recover_ublk_dev()
247{
248	local dev=$1
249	local dev_id=`__ublk_dev_id $dev`
250
251	eval $UBLK recover -n $dev_id
252	RES=$?
253	echo $RES
254}
255
256__get_cpu_utils()
257{
258	local user_cpu=`echo ${TEST_RUN["user cpu"]} | awk -F "." '{print $1}'`
259	local sys_cpu=`echo ${TEST_RUN["system cpu"]} | awk -F "." '{print $1}'`
260	echo "cpu_util(${user_cpu}% ${sys_cpu}%)"
261}
262
263__run_dev_perf_no_create()
264{
265	local TYPE=$1
266	local JOBS=$2
267	local DEV=$3
268	local RT=$TRUNTIME
269	local BS=4k
270	local FIO_PERF_FIELDS=("read iops" "write iops" "user cpu" "system cpu")
271
272	RW="randwrite"
273	__run_fio_perf $DEV $BS $RW $JOBS 20
274	cpu_util=`__get_cpu_utils`
275	echo -e "\t$RW($BS): jobs $JOBS, iops ${TEST_RUN["write iops"]}, $cpu_util"
276
277	RW="randread"
278	__run_fio_perf $DEV $BS $RW $JOBS $RT
279	cpu_util=`__get_cpu_utils`
280	echo -e "\t$RW($BS): jobs $JOBS, iops ${TEST_RUN["read iops"]}, $cpu_util"
281
282	RW="randrw"
283	__run_fio_perf $DEV $BS $RW $JOBS $RT
284	cpu_util=`__get_cpu_utils`
285	echo -e "\t$RW($BS): jobs $JOBS, iops read ${TEST_RUN["read iops"]} write ${TEST_RUN["write iops"]}, $cpu_util"
286
287	RW="rw"
288	BS=64k
289	__run_fio_perf $DEV $BS $RW $JOBS $RT
290	cpu_util=`__get_cpu_utils`
291	echo -e "\t$RW($BS): jobs $JOBS, iops read ${TEST_RUN["read iops"]} write ${TEST_RUN["write iops"]}, $cpu_util"
292
293	RW="rw"
294	BS=512k
295	__run_fio_perf $DEV $BS $RW $JOBS $RT
296	cpu_util=`__get_cpu_utils`
297	echo -e "\t$RW($BS): jobs $JOBS, iops read ${TEST_RUN["read iops"]} write ${TEST_RUN["write iops"]}, $cpu_util"
298
299	echo ""
300}
301
302__run_dev_perf()
303{
304	JOBS=$1
305
306	DEV=`__create_ublk_dev`
307
308	echo -e "\tublk add ${T_TYPE_PARAMS}, fio: ($DEV libaio dio io jobs($JOBS))..."
309	__run_dev_perf_no_create "ublk" $JOBS $DEV
310
311	__remove_ublk_dev $DEV
312}
313
314_create_null_image()
315{
316	echo ""
317}
318
319_create_image()
320{
321	local type=$1
322
323	shift 1
324
325	eval _create_${type}_image $@
326}
327
328_remove_null_image()
329{
330	echo "nothing" > /dev/null
331}
332
333_remove_image()
334{
335	local type=$1
336	shift 1
337	eval _remove_${type}_image $@
338}
339