Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +2 -1
- joycaption.py +2 -2
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
-
import
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from joycaption import stream_chat_mod, get_text_model, change_text_model, get_repo_gguf
|
| 4 |
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
if os.environ.get("SPACES_ZERO_GPU") is not None: import spaces
|
| 3 |
import gradio as gr
|
| 4 |
from joycaption import stream_chat_mod, get_text_model, change_text_model, get_repo_gguf
|
| 5 |
|
joycaption.py
CHANGED
|
@@ -264,7 +264,7 @@ load_text_model(MODEL_PATH, None, LOAD_IN_NF4, True)
|
|
| 264 |
#print(f"pixtral_model: {type(pixtral_model)}") #
|
| 265 |
#print(f"pixtral_processor: {type(pixtral_processor)}") #
|
| 266 |
|
| 267 |
-
@spaces.GPU
|
| 268 |
@torch.inference_mode()
|
| 269 |
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
|
| 270 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
|
@@ -464,7 +464,7 @@ def get_repo_gguf(repo_id: str):
|
|
| 464 |
else: return gr.update(value=files[0], choices=files)
|
| 465 |
|
| 466 |
|
| 467 |
-
@spaces.GPU
|
| 468 |
def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
|
| 469 |
is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
|
| 470 |
global use_inference_client, llm_models
|
|
|
|
| 264 |
#print(f"pixtral_model: {type(pixtral_model)}") #
|
| 265 |
#print(f"pixtral_processor: {type(pixtral_processor)}") #
|
| 266 |
|
| 267 |
+
@spaces.GPU
|
| 268 |
@torch.inference_mode()
|
| 269 |
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
|
| 270 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
|
|
|
| 464 |
else: return gr.update(value=files[0], choices=files)
|
| 465 |
|
| 466 |
|
| 467 |
+
@spaces.GPU
|
| 468 |
def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
|
| 469 |
is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
|
| 470 |
global use_inference_client, llm_models
|