run more times benchmark and select by median

This commit is contained in:
lyon 2022-02-11 09:31:43 +08:00
parent 81ad99bbed
commit d5e3dd8a6d
5 changed files with 69 additions and 9 deletions

View File

@ -1,4 +1,7 @@
cd Release && make -j
cd ..
bash ci_benchmark_run.sh
python3 ci_benchmark_calibration.py
sh ci_benchmark_times.sh 2
python3 ci_benchmark_select.py
#clear
rm benchmark_result_*.json
rm performance_data.json
echo

View File

@ -1,6 +1,12 @@
#!/usr/bin/python3
import json
with open('benchmark_result.json', 'r', encoding='utf8') as json_in:
import sys
import os
import fcntl
benchmark_result_file_path = sys.argv[1]
with open(benchmark_result_file_path, 'r', encoding='utf8') as json_in:
json_data = dict(json.load(json_in))
benchmarks_data = list(json_data['benchmarks'])
@ -31,11 +37,22 @@ benchmarks_data[0]['cpu_time'] = performance_point_res
benchmarks_data[0]['time_unit'] = 'Point'
print('---------------------------------------------')
print('Perfomance point:', int(performance_point_res))
print('Perfomance point:', int(performance_point_res), '\n')
# update json_data
json_data['benchmarks'] = benchmarks_data
# save json
with open('benchmark_result.json', 'w') as json_out:
json.dump(json_data, json_out)
with open(benchmark_result_file_path, 'w') as benchmark_reqult_file:
json.dump(json_data, benchmark_reqult_file)
lock_file_path = 'performance_data.lock'
# save performance_data
with open('performance_data.json', 'r') as perf_json_file:
# lock
fcntl.flock(perf_json_file.fileno(), fcntl.LOCK_EX)
perf_json_data: list = json.load(perf_json_file)
with open('performance_data.json', 'w') as perf_json_file:
perf_json_data.append(performance_point_res)
json.dump(perf_json_data, perf_json_file)

View File

@ -1 +1,2 @@
Release/benchmark/pikascript_benchmark --benchmark_format=json | tee benchmark_result.json
echo write benchmark result to $1
Release/benchmark/pikascript_benchmark --benchmark_format=json | tee $1

View File

@ -0,0 +1,22 @@
#!/usr/bin/python3
import json
from select import select
import sys
import os
import shutil
with open('performance_data.json', 'r') as perf_json_file:
perf_json_data: list = json.load(perf_json_file)
perf_data_with_index = []
for i in range(perf_json_data.__len__()):
perf_data_with_index.append({'index': i+1, 'value': perf_json_data[i]})
perf_data_with_index_sorted = sorted(
perf_data_with_index, key=lambda e: e['value'])
selected = perf_data_with_index_sorted[int(perf_json_data.__len__()/2)]
print('=============================================')
print('selected: ', selected)
shutil.copy('benchmark_result_' +
str(selected['index']) + '.json', 'benchmark_result.json')

View File

@ -0,0 +1,17 @@
cd Release && make -j
cd ..
RUN_TIMES=$1
# init
rm performance_data*
rm benchmark*.json
touch performance_data.json
echo [] > performance_data.json
for ((i=1; i<=RUN_TIMES;i++))
do
OUT_FILE=benchmark_result_$i.json
# main
bash ci_benchmark_run.sh $OUT_FILE > /dev/null && \
python3 ci_benchmark_calibration.py $OUT_FILE
done