Spaces:
Runtime error
Runtime error
zetavg
commited on
update
Browse files- README.md +3 -3
- app.py +1 -1
- app_hf_ui_demo.py +0 -35
- config-ui-demo.yaml +32 -0
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
emoji: π¦ποΈ
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 3.24.1
|
| 8 |
python_version: 3.8.9
|
| 9 |
-
app_file:
|
| 10 |
-
pinned:
|
| 11 |
---
|
| 12 |
|
| 13 |
# HF UI DEMO
|
|
|
|
| 1 |
---
|
| 2 |
+
title: LLM Tuner - UI Demo
|
| 3 |
emoji: π¦ποΈ
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 3.24.1
|
| 8 |
python_version: 3.8.9
|
| 9 |
+
app_file: app.py
|
| 10 |
+
pinned: true
|
| 11 |
---
|
| 12 |
|
| 13 |
# HF UI DEMO
|
app.py
CHANGED
|
@@ -132,7 +132,7 @@ def main(
|
|
| 132 |
def read_yaml_config(config_path: Union[str, None] = None):
|
| 133 |
if not config_path:
|
| 134 |
app_dir = os.path.dirname(os.path.abspath(__file__))
|
| 135 |
-
config_path = os.path.join(app_dir, 'config.yaml')
|
| 136 |
|
| 137 |
if not os.path.exists(config_path):
|
| 138 |
return None
|
|
|
|
| 132 |
def read_yaml_config(config_path: Union[str, None] = None):
|
| 133 |
if not config_path:
|
| 134 |
app_dir = os.path.dirname(os.path.abspath(__file__))
|
| 135 |
+
config_path = os.path.join(app_dir, 'config-ui-demo.yaml')
|
| 136 |
|
| 137 |
if not os.path.exists(config_path):
|
| 138 |
return None
|
app_hf_ui_demo.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
import fire
|
| 5 |
-
import gradio as gr
|
| 6 |
-
|
| 7 |
-
from llama_lora.globals import Global
|
| 8 |
-
from llama_lora.ui.main_page import main_page, get_page_title, main_page_custom_css
|
| 9 |
-
from llama_lora.utils.data import init_data_dir
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
def main():
|
| 13 |
-
data_dir = os.path.abspath("./data")
|
| 14 |
-
Global.default_base_model_name = Global.base_model_name = "decapoda-research/llama-7b-hf"
|
| 15 |
-
Global.base_model_choices = ["decapoda-research/llama-7b-hf", "nomic-ai/gpt4all-j"]
|
| 16 |
-
Global.data_dir = data_dir
|
| 17 |
-
Global.load_8bit = False
|
| 18 |
-
|
| 19 |
-
Global.ui_dev_mode = True
|
| 20 |
-
Global.ui_dev_mode_title_prefix = ""
|
| 21 |
-
Global.ui_show_sys_info = False
|
| 22 |
-
|
| 23 |
-
Global.ui_subtitle = "This is a UI demo of <a href=\"https://github.com/zetavg/LLaMA-LoRA\" target=\"_blank\">LLaMA-LoRA</a>, toolkit for evaluating and fine-tuning LLaMA models. Run the actual one: <a href=\"https://colab.research.google.com/github/zetavg/LLaMA-LoRA/blob/main/LLaMA_LoRA.ipynb\" target=\"_parent\"><img style=\"display: inline;\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 24 |
-
|
| 25 |
-
os.makedirs(data_dir, exist_ok=True)
|
| 26 |
-
init_data_dir()
|
| 27 |
-
|
| 28 |
-
with gr.Blocks(title=get_page_title(), css=main_page_custom_css()) as demo:
|
| 29 |
-
main_page()
|
| 30 |
-
|
| 31 |
-
demo.queue().launch()
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
if __name__ == "__main__":
|
| 35 |
-
fire.Fire(main)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config-ui-demo.yaml
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
server_name: 0.0.0.0
|
| 2 |
+
|
| 3 |
+
# Basic Configurations
|
| 4 |
+
data_dir: ./data
|
| 5 |
+
default_base_model_name: decapoda-research/llama-7b-hf
|
| 6 |
+
base_model_choices:
|
| 7 |
+
- decapoda-research/llama-7b-hf
|
| 8 |
+
- nomic-ai/gpt4all-j
|
| 9 |
+
- databricks/dolly-v2-7b
|
| 10 |
+
- databricks/dolly-v2-12b
|
| 11 |
+
load_8bit: false
|
| 12 |
+
trust_remote_code: false
|
| 13 |
+
|
| 14 |
+
# timezone: Atlantic/Reykjavik
|
| 15 |
+
|
| 16 |
+
# auth_username: username
|
| 17 |
+
# auth_password: password
|
| 18 |
+
|
| 19 |
+
# UI Customization
|
| 20 |
+
ui_title: LLM Tuner (UI Demo Mode)
|
| 21 |
+
# ui_emoji: π¦ποΈ
|
| 22 |
+
ui_subtitle: "This is a UI demo of <a href=\"https://github.com/zetavg/LLaMA-LoRA\" target=\"_blank\">LLaMA-LoRA</a>, toolkit for evaluating and fine-tuning LLaMA models. Run the actual one: <a href=\"https://colab.research.google.com/github/zetavg/LLaMA-LoRA/blob/main/LLaMA_LoRA.ipynb\" target=\"_parent\"><img style=\"display: inline;\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 23 |
+
ui_dev_mode_title_prefix: ""
|
| 24 |
+
ui_show_sys_info: false
|
| 25 |
+
|
| 26 |
+
# WandB
|
| 27 |
+
# enable_wandb: false
|
| 28 |
+
# wandb_api_key: ""
|
| 29 |
+
# default_wandb_project: LLM-Tuner
|
| 30 |
+
|
| 31 |
+
# Special Modes
|
| 32 |
+
ui_dev_mode: true
|