Spaces:
Runtime error
Runtime error
Commit
ยท
7682345
1
Parent(s):
4d423a9
clean up
Browse files- app.py +20 -43
- config_store.py +0 -30
app.py
CHANGED
|
@@ -1,29 +1,27 @@
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
import traceback
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
from huggingface_hub import create_repo, whoami
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
from config_store import (
|
| 9 |
-
get_process_config,
|
| 10 |
-
get_inference_config,
|
| 11 |
-
get_openvino_config,
|
| 12 |
-
get_pytorch_config,
|
| 13 |
-
)
|
| 14 |
-
from optimum_benchmark.launchers.base import Launcher # noqa
|
| 15 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
| 16 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
| 17 |
from optimum_benchmark import (
|
|
|
|
| 18 |
BenchmarkConfig,
|
| 19 |
-
PyTorchConfig,
|
| 20 |
-
OVConfig,
|
| 21 |
ProcessConfig,
|
| 22 |
InferenceConfig,
|
| 23 |
-
|
|
|
|
| 24 |
)
|
| 25 |
from optimum_benchmark.logging_utils import setup_logging
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
DEVICE = "cpu"
|
| 29 |
LAUNCHER = "process"
|
|
@@ -137,28 +135,13 @@ def build_demo():
|
|
| 137 |
gr.LoginButton(min_width=250)
|
| 138 |
|
| 139 |
# add image
|
| 140 |
-
gr.
|
| 141 |
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
|
| 142 |
-
)
|
| 143 |
-
|
| 144 |
-
# title text
|
| 145 |
-
gr.Markdown(
|
| 146 |
"<h1 style='text-align: center'>๐ค Optimum-Benchmark Interface ๐๏ธ</h1>"
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
-
# explanation text
|
| 150 |
-
gr.HTML(
|
| 151 |
-
"<h3 style='text-align: center'>"
|
| 152 |
-
"Zero code Gradio interface of "
|
| 153 |
-
"<a href='https://github.com/huggingface/optimum-benchmark.git'>"
|
| 154 |
-
"Optimum-Benchmark"
|
| 155 |
-
"</a>"
|
| 156 |
-
"<br>"
|
| 157 |
-
"</h3>"
|
| 158 |
"<p style='text-align: center'>"
|
| 159 |
-
"This Space uses Optimum
|
| 160 |
-
"<br>"
|
| 161 |
-
"
|
| 162 |
)
|
| 163 |
|
| 164 |
model = gr.Dropdown(
|
|
@@ -190,12 +173,10 @@ def build_demo():
|
|
| 190 |
inference_config = get_inference_config()
|
| 191 |
|
| 192 |
with gr.Row() as backend_configs:
|
| 193 |
-
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
|
| 194 |
-
openvino_config = get_openvino_config()
|
| 195 |
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
| 196 |
pytorch_config = get_pytorch_config()
|
| 197 |
-
|
| 198 |
-
|
| 199 |
|
| 200 |
backends.change(
|
| 201 |
inputs=backends,
|
|
@@ -209,12 +190,10 @@ def build_demo():
|
|
| 209 |
button = gr.Button(value="Run Benchmark", variant="primary")
|
| 210 |
|
| 211 |
with gr.Row() as md_output:
|
| 212 |
-
with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
|
| 213 |
-
openvino_output = gr.Markdown()
|
| 214 |
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
| 215 |
pytorch_output = gr.Markdown()
|
| 216 |
-
|
| 217 |
-
|
| 218 |
|
| 219 |
backends.change(
|
| 220 |
inputs=backends,
|
|
@@ -232,14 +211,12 @@ def build_demo():
|
|
| 232 |
backends,
|
| 233 |
*process_config.values(),
|
| 234 |
*inference_config.values(),
|
| 235 |
-
*openvino_config.values(),
|
| 236 |
*pytorch_config.values(),
|
| 237 |
-
|
| 238 |
},
|
| 239 |
outputs={
|
| 240 |
-
openvino_output,
|
| 241 |
pytorch_output,
|
| 242 |
-
|
| 243 |
},
|
| 244 |
concurrency_limit=1,
|
| 245 |
)
|
|
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
import traceback
|
| 4 |
+
|
| 5 |
import gradio as gr
|
| 6 |
from huggingface_hub import create_repo, whoami
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
| 8 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
| 9 |
from optimum_benchmark import (
|
| 10 |
+
Benchmark,
|
| 11 |
BenchmarkConfig,
|
|
|
|
|
|
|
| 12 |
ProcessConfig,
|
| 13 |
InferenceConfig,
|
| 14 |
+
PyTorchConfig,
|
| 15 |
+
OVConfig,
|
| 16 |
)
|
| 17 |
from optimum_benchmark.logging_utils import setup_logging
|
| 18 |
|
| 19 |
+
from .config_store import (
|
| 20 |
+
get_process_config,
|
| 21 |
+
get_inference_config,
|
| 22 |
+
get_openvino_config,
|
| 23 |
+
get_pytorch_config,
|
| 24 |
+
)
|
| 25 |
|
| 26 |
DEVICE = "cpu"
|
| 27 |
LAUNCHER = "process"
|
|
|
|
| 135 |
gr.LoginButton(min_width=250)
|
| 136 |
|
| 137 |
# add image
|
| 138 |
+
gr.HTML(
|
| 139 |
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
"<h1 style='text-align: center'>๐ค Optimum-Benchmark Interface ๐๏ธ</h1>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
"<p style='text-align: center'>"
|
| 142 |
+
"This Space uses <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> to automatically benchmark a model from the Hub on different backends."
|
| 143 |
+
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
|
| 144 |
+
"</p>"
|
| 145 |
)
|
| 146 |
|
| 147 |
model = gr.Dropdown(
|
|
|
|
| 173 |
inference_config = get_inference_config()
|
| 174 |
|
| 175 |
with gr.Row() as backend_configs:
|
|
|
|
|
|
|
| 176 |
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
| 177 |
pytorch_config = get_pytorch_config()
|
| 178 |
+
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
|
| 179 |
+
openvino_config = get_openvino_config()
|
| 180 |
|
| 181 |
backends.change(
|
| 182 |
inputs=backends,
|
|
|
|
| 190 |
button = gr.Button(value="Run Benchmark", variant="primary")
|
| 191 |
|
| 192 |
with gr.Row() as md_output:
|
|
|
|
|
|
|
| 193 |
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
| 194 |
pytorch_output = gr.Markdown()
|
| 195 |
+
with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
|
| 196 |
+
openvino_output = gr.Markdown()
|
| 197 |
|
| 198 |
backends.change(
|
| 199 |
inputs=backends,
|
|
|
|
| 211 |
backends,
|
| 212 |
*process_config.values(),
|
| 213 |
*inference_config.values(),
|
|
|
|
| 214 |
*pytorch_config.values(),
|
| 215 |
+
*openvino_config.values(),
|
| 216 |
},
|
| 217 |
outputs={
|
|
|
|
| 218 |
pytorch_output,
|
| 219 |
+
openvino_output,
|
| 220 |
},
|
| 221 |
concurrency_limit=1,
|
| 222 |
)
|
config_store.py
CHANGED
|
@@ -71,32 +71,6 @@ def get_pytorch_config():
|
|
| 71 |
}
|
| 72 |
|
| 73 |
|
| 74 |
-
def get_onnxruntime_config():
|
| 75 |
-
return {
|
| 76 |
-
"onnxruntime.export": gr.Checkbox(
|
| 77 |
-
value=True,
|
| 78 |
-
label="onnxruntime.export",
|
| 79 |
-
info="Exports the model to ONNX",
|
| 80 |
-
),
|
| 81 |
-
"onnxruntime.use_cache": gr.Checkbox(
|
| 82 |
-
value=True,
|
| 83 |
-
label="onnxruntime.use_cache",
|
| 84 |
-
info="Uses cached ONNX model if available",
|
| 85 |
-
),
|
| 86 |
-
"onnxruntime.use_merged": gr.Checkbox(
|
| 87 |
-
value=True,
|
| 88 |
-
label="onnxruntime.use_merged",
|
| 89 |
-
info="Uses merged ONNX model if available",
|
| 90 |
-
),
|
| 91 |
-
"onnxruntime.torch_dtype": gr.Dropdown(
|
| 92 |
-
value="float32",
|
| 93 |
-
label="onnxruntime.torch_dtype",
|
| 94 |
-
choices=["bfloat16", "float16", "float32", "auto"],
|
| 95 |
-
info="The dtype to use for the model",
|
| 96 |
-
),
|
| 97 |
-
}
|
| 98 |
-
|
| 99 |
-
|
| 100 |
def get_openvino_config():
|
| 101 |
return {
|
| 102 |
"openvino.export": gr.Checkbox(
|
|
@@ -125,7 +99,3 @@ def get_openvino_config():
|
|
| 125 |
info="Converts model to half precision",
|
| 126 |
),
|
| 127 |
}
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
def get_ipex_config():
|
| 131 |
-
return {}
|
|
|
|
| 71 |
}
|
| 72 |
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
def get_openvino_config():
|
| 75 |
return {
|
| 76 |
"openvino.export": gr.Checkbox(
|
|
|
|
| 99 |
info="Converts model to half precision",
|
| 100 |
),
|
| 101 |
}
|
|
|
|
|
|
|
|
|
|
|
|