Fix benchmark by making sure we use Python 3 (#9170)
The benchmark runs have been failing since we started requiring Python
3, so this changes fixes the benchmarks by ensuring we always use Python
3.
diff --git a/benchmarks/Makefile.am b/benchmarks/Makefile.am
index 2b2204d..3ab35e3 100644
--- a/benchmarks/Makefile.am
+++ b/benchmarks/Makefile.am
@@ -165,7 +165,7 @@
done \
done
-python_cpp_pkg_flags = `pkg-config --cflags --libs python`
+python_cpp_pkg_flags = `pkg-config --cflags --libs python3`
lib_LTLIBRARIES = libbenchmark_messages.la
libbenchmark_messages_la_SOURCES = python/python_benchmark_messages.cc
@@ -186,7 +186,7 @@
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-pure-python-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'python\' >> python-pure-python-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-pure-python-benchmark
- @echo python tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
+ @echo python3 tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
@chmod +x python-pure-python-benchmark
python-cpp-reflection-benchmark: python_add_init
@@ -196,7 +196,7 @@
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-reflection-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-reflection-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-reflection-benchmark
- @echo python tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
+ @echo python3 tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
@chmod +x python-cpp-reflection-benchmark
python-cpp-generated-code-benchmark: python_add_init libbenchmark_messages.la
@@ -206,7 +206,7 @@
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-generated-code-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-generated-code-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-generated-code-benchmark
- @echo python tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
+ @echo python3 tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
@chmod +x python-cpp-generated-code-benchmark
python-pure-python: python-pure-python-benchmark
diff --git a/benchmarks/python/python_benchmark_messages.cc b/benchmarks/python/python_benchmark_messages.cc
index ded16fe..ef7e8a2 100644
--- a/benchmarks/python/python_benchmark_messages.cc
+++ b/benchmarks/python/python_benchmark_messages.cc
@@ -7,13 +7,19 @@
#include "datasets/google_message3/benchmark_message3.pb.h"
#include "datasets/google_message4/benchmark_message4.pb.h"
-static PyMethodDef python_benchmark_methods[] = {
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
+static struct PyModuleDef _module = {PyModuleDef_HEAD_INIT,
+ "libbenchmark_messages",
+ "Benchmark messages Python module",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL};
-
+extern "C" {
PyMODINIT_FUNC
-initlibbenchmark_messages() {
+PyInit_libbenchmark_messages() {
benchmarks::BenchmarkDataset().descriptor();
benchmarks::proto3::GoogleMessage1().descriptor();
benchmarks::proto2::GoogleMessage1().descriptor();
@@ -21,9 +27,6 @@
benchmarks::google_message3::GoogleMessage3().descriptor();
benchmarks::google_message4::GoogleMessage4().descriptor();
- PyObject *m;
-
- m = Py_InitModule("libbenchmark_messages", python_benchmark_methods);
- if (m == NULL)
- return;
+ return PyModule_Create(&_module);
+}
}
diff --git a/benchmarks/util/result_parser.py b/benchmarks/util/result_parser.py
index bdf3a99..d3251a8 100644
--- a/benchmarks/util/result_parser.py
+++ b/benchmarks/util/result_parser.py
@@ -61,7 +61,7 @@
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
@@ -96,7 +96,7 @@
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
@@ -126,7 +126,7 @@
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
@@ -176,7 +176,7 @@
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
@@ -212,7 +212,7 @@
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
@@ -252,7 +252,7 @@
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
- with open(filename, "rb") as f:
+ with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
diff --git a/kokoro/linux/benchmark/run.sh b/kokoro/linux/benchmark/run.sh
index 502f436..acd8737 100755
--- a/kokoro/linux/benchmark/run.sh
+++ b/kokoro/linux/benchmark/run.sh
@@ -23,8 +23,10 @@
./configure CXXFLAGS="-fPIC -O2"
make -j8
pushd python
-python setup.py build --cpp_implementation
-pip install . --user
+virtualenv -p python3 env
+source env/bin/activate
+python3 setup.py build --cpp_implementation
+pip3 install --install-option="--cpp_implementation" .
popd
# build and run Python benchmark
@@ -91,7 +93,7 @@
# print the postprocessed results to the build job log
# TODO(jtattermusch): re-enable uploading results to bigquery (it is currently broken)
make python_add_init
-env LD_LIBRARY_PATH="${repo_root}/src/.libs" python -m util.result_parser \
+env LD_LIBRARY_PATH="${repo_root}/src/.libs" python3 -m util.result_parser \
-cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" -python="../tmp/python_result.json"
popd