Spaces:
Running
Running
Added description below the plot
Browse files- app.py +3 -0
- content.py +6 -0
app.py
CHANGED
|
@@ -9,6 +9,7 @@ from gradio_modal import Modal
|
|
| 9 |
from content import (
|
| 10 |
HEADER_MARKDOWN,
|
| 11 |
LEADERBOARD_TAB_TITLE_MARKDOWN,
|
|
|
|
| 12 |
SUBMISSION_TAB_TITLE_MARKDOWN,
|
| 13 |
MODAL_SUBMIT_MARKDOWN,
|
| 14 |
SUBMISSION_DETAILS_MARKDOWN,
|
|
@@ -691,6 +692,8 @@ def gradio_app():
|
|
| 691 |
label='Scatter plot',
|
| 692 |
)
|
| 693 |
|
|
|
|
|
|
|
| 694 |
with gr.Row():
|
| 695 |
tournament_results_title = gr.Markdown(
|
| 696 |
value="## Tournament results for selected model",
|
|
|
|
| 9 |
from content import (
|
| 10 |
HEADER_MARKDOWN,
|
| 11 |
LEADERBOARD_TAB_TITLE_MARKDOWN,
|
| 12 |
+
LEADERBOARD_TAB_BELLOW_PLOT_MARKDOWN,
|
| 13 |
SUBMISSION_TAB_TITLE_MARKDOWN,
|
| 14 |
MODAL_SUBMIT_MARKDOWN,
|
| 15 |
SUBMISSION_DETAILS_MARKDOWN,
|
|
|
|
| 692 |
label='Scatter plot',
|
| 693 |
)
|
| 694 |
|
| 695 |
+
gr.Markdown(LEADERBOARD_TAB_BELLOW_PLOT_MARKDOWN)
|
| 696 |
+
|
| 697 |
with gr.Row():
|
| 698 |
tournament_results_title = gr.Markdown(
|
| 699 |
value="## Tournament results for selected model",
|
content.py
CHANGED
|
@@ -28,6 +28,12 @@ Here, you can compare models on tasks in the Czech language or submit your own m
|
|
| 28 |
LEADERBOARD_TAB_TITLE_MARKDOWN = """
|
| 29 |
"""
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
SUBMISSION_TAB_TITLE_MARKDOWN = """
|
| 32 |
## How to submit
|
| 33 |
1. Head down to our modified fork of [lm-evaluation-harness](https://github.com/DCGM/lm-evaluation-harness).
|
|
|
|
| 28 |
LEADERBOARD_TAB_TITLE_MARKDOWN = """
|
| 29 |
"""
|
| 30 |
|
| 31 |
+
LEADERBOARD_TAB_BELLOW_PLOT_MARKDOWN = """
|
| 32 |
+
Explanation:
|
| 33 |
+
- the point symbol is determined by the type of model ('chat': 'circle', 'pretrained': 'triangle', 'ensemble': 'star')
|
| 34 |
+
- the size of the symbol is larger according to the variance across categories
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
SUBMISSION_TAB_TITLE_MARKDOWN = """
|
| 38 |
## How to submit
|
| 39 |
1. Head down to our modified fork of [lm-evaluation-harness](https://github.com/DCGM/lm-evaluation-harness).
|