Spaces:
Running
on
Zero
Running
on
Zero
Support T4 GPU and CPU - make ZeroGPU decorator conditional
Browse files
app.py
CHANGED
|
@@ -160,6 +160,10 @@ def generate_lyrics(prompt: str, progress=gr.Progress()):
|
|
| 160 |
def generate_music(prompt: str, lyrics: str, lyrics_mode: str, position: str, context_length: int, use_lora: bool, selected_lora: str, timeline_state: dict, progress=gr.Progress()):
|
| 161 |
"""Generate music clip and add to timeline"""
|
| 162 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
# Restore timeline from state
|
| 164 |
if timeline_state and 'clips' in timeline_state:
|
| 165 |
timeline_service.clips = []
|
|
@@ -418,6 +422,10 @@ def get_timeline_display():
|
|
| 418 |
def remove_clip(clip_number: int, timeline_state: dict):
|
| 419 |
"""Remove a clip from timeline"""
|
| 420 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
# Restore timeline from state
|
| 422 |
if timeline_state and 'clips' in timeline_state:
|
| 423 |
timeline_service.clips = []
|
|
@@ -468,6 +476,10 @@ def clear_timeline(timeline_state: dict):
|
|
| 468 |
def export_timeline(filename: str, export_format: str, timeline_state: dict, progress=gr.Progress()):
|
| 469 |
"""Export timeline to audio file"""
|
| 470 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
# Restore timeline from state
|
| 472 |
if timeline_state and 'clips' in timeline_state:
|
| 473 |
timeline_service.clips = []
|
|
@@ -511,6 +523,10 @@ def get_timeline_playback(timeline_state: dict):
|
|
| 511 |
try:
|
| 512 |
logger.info(f"[PLAYBACK] get_timeline_playback called with state: {timeline_state is not None}")
|
| 513 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 514 |
# Restore timeline from state
|
| 515 |
if timeline_state and 'clips' in timeline_state:
|
| 516 |
timeline_service.clips = []
|
|
@@ -606,6 +622,10 @@ def update_preset_description(preset_select_value: str):
|
|
| 606 |
def preview_mastering_preset(preset_name: str, timeline_state: dict):
|
| 607 |
"""Preview mastering preset on the most recent clip"""
|
| 608 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 609 |
# Restore timeline from state
|
| 610 |
if timeline_state and 'clips' in timeline_state:
|
| 611 |
timeline_service.clips = []
|
|
@@ -656,6 +676,10 @@ def apply_mastering_preset(preset_name: str, timeline_state: dict):
|
|
| 656 |
logger.info(f"[STATE DEBUG] timeline_state type: {type(timeline_state)}")
|
| 657 |
logger.info(f"[STATE DEBUG] timeline_state value: {timeline_state}")
|
| 658 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 659 |
# Restore timeline from state
|
| 660 |
if timeline_state and 'clips' in timeline_state:
|
| 661 |
timeline_service.clips = []
|
|
@@ -712,6 +736,10 @@ def apply_mastering_preset(preset_name: str, timeline_state: dict):
|
|
| 712 |
def preview_custom_eq(low_shelf, low_mid, mid, high_mid, high_shelf, timeline_state: dict):
|
| 713 |
"""Preview custom EQ on the most recent clip"""
|
| 714 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 715 |
# Restore timeline from state
|
| 716 |
if timeline_state and 'clips' in timeline_state:
|
| 717 |
timeline_service.clips = []
|
|
@@ -768,6 +796,10 @@ def apply_custom_eq(low_shelf, low_mid, mid, high_mid, high_shelf, timeline_stat
|
|
| 768 |
logger.info(f"[STATE DEBUG] timeline_state type: {type(timeline_state)}")
|
| 769 |
logger.info(f"[STATE DEBUG] timeline_state value: {timeline_state}")
|
| 770 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 771 |
# Restore timeline from state
|
| 772 |
if timeline_state and 'clips' in timeline_state:
|
| 773 |
timeline_service.clips = []
|
|
@@ -831,6 +863,10 @@ def enhance_timeline_clips(enhancement_level: str, timeline_state: dict):
|
|
| 831 |
try:
|
| 832 |
logger.info(f"[ENHANCEMENT] Starting enhancement: level={enhancement_level}")
|
| 833 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 834 |
# Restore timeline from state
|
| 835 |
if timeline_state and 'clips' in timeline_state:
|
| 836 |
timeline_service.clips = []
|
|
@@ -889,6 +925,10 @@ def upscale_timeline_clips(upscale_mode: str, timeline_state: dict):
|
|
| 889 |
try:
|
| 890 |
logger.info(f"[UPSCALE] Starting upscale: mode={upscale_mode}")
|
| 891 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 892 |
# Restore timeline from state
|
| 893 |
if timeline_state and 'clips' in timeline_state:
|
| 894 |
timeline_service.clips = []
|
|
@@ -1851,7 +1891,8 @@ with gr.Blocks(
|
|
| 1851 |
)
|
| 1852 |
|
| 1853 |
# Timeline state - persists across GPU context switches
|
| 1854 |
-
|
|
|
|
| 1855 |
|
| 1856 |
# Generation Section
|
| 1857 |
gr.Markdown("### 🎼 Music Generation")
|
|
|
|
| 160 |
def generate_music(prompt: str, lyrics: str, lyrics_mode: str, position: str, context_length: int, use_lora: bool, selected_lora: str, timeline_state: dict, progress=gr.Progress()):
|
| 161 |
"""Generate music clip and add to timeline"""
|
| 162 |
try:
|
| 163 |
+
# Initialize timeline state if None
|
| 164 |
+
if timeline_state is None:
|
| 165 |
+
timeline_state = {'clips': []}
|
| 166 |
+
|
| 167 |
# Restore timeline from state
|
| 168 |
if timeline_state and 'clips' in timeline_state:
|
| 169 |
timeline_service.clips = []
|
|
|
|
| 422 |
def remove_clip(clip_number: int, timeline_state: dict):
|
| 423 |
"""Remove a clip from timeline"""
|
| 424 |
try:
|
| 425 |
+
# Initialize timeline state if None
|
| 426 |
+
if timeline_state is None:
|
| 427 |
+
timeline_state = {'clips': []}
|
| 428 |
+
|
| 429 |
# Restore timeline from state
|
| 430 |
if timeline_state and 'clips' in timeline_state:
|
| 431 |
timeline_service.clips = []
|
|
|
|
| 476 |
def export_timeline(filename: str, export_format: str, timeline_state: dict, progress=gr.Progress()):
|
| 477 |
"""Export timeline to audio file"""
|
| 478 |
try:
|
| 479 |
+
# Initialize timeline state if None
|
| 480 |
+
if timeline_state is None:
|
| 481 |
+
timeline_state = {'clips': []}
|
| 482 |
+
|
| 483 |
# Restore timeline from state
|
| 484 |
if timeline_state and 'clips' in timeline_state:
|
| 485 |
timeline_service.clips = []
|
|
|
|
| 523 |
try:
|
| 524 |
logger.info(f"[PLAYBACK] get_timeline_playback called with state: {timeline_state is not None}")
|
| 525 |
|
| 526 |
+
# Initialize timeline state if None
|
| 527 |
+
if timeline_state is None:
|
| 528 |
+
timeline_state = {'clips': []}
|
| 529 |
+
|
| 530 |
# Restore timeline from state
|
| 531 |
if timeline_state and 'clips' in timeline_state:
|
| 532 |
timeline_service.clips = []
|
|
|
|
| 622 |
def preview_mastering_preset(preset_name: str, timeline_state: dict):
|
| 623 |
"""Preview mastering preset on the most recent clip"""
|
| 624 |
try:
|
| 625 |
+
# Initialize timeline state if None
|
| 626 |
+
if timeline_state is None:
|
| 627 |
+
timeline_state = {'clips': []}
|
| 628 |
+
|
| 629 |
# Restore timeline from state
|
| 630 |
if timeline_state and 'clips' in timeline_state:
|
| 631 |
timeline_service.clips = []
|
|
|
|
| 676 |
logger.info(f"[STATE DEBUG] timeline_state type: {type(timeline_state)}")
|
| 677 |
logger.info(f"[STATE DEBUG] timeline_state value: {timeline_state}")
|
| 678 |
|
| 679 |
+
# Initialize timeline state if None
|
| 680 |
+
if timeline_state is None:
|
| 681 |
+
timeline_state = {'clips': []}
|
| 682 |
+
|
| 683 |
# Restore timeline from state
|
| 684 |
if timeline_state and 'clips' in timeline_state:
|
| 685 |
timeline_service.clips = []
|
|
|
|
| 736 |
def preview_custom_eq(low_shelf, low_mid, mid, high_mid, high_shelf, timeline_state: dict):
|
| 737 |
"""Preview custom EQ on the most recent clip"""
|
| 738 |
try:
|
| 739 |
+
# Initialize timeline state if None
|
| 740 |
+
if timeline_state is None:
|
| 741 |
+
timeline_state = {'clips': []}
|
| 742 |
+
|
| 743 |
# Restore timeline from state
|
| 744 |
if timeline_state and 'clips' in timeline_state:
|
| 745 |
timeline_service.clips = []
|
|
|
|
| 796 |
logger.info(f"[STATE DEBUG] timeline_state type: {type(timeline_state)}")
|
| 797 |
logger.info(f"[STATE DEBUG] timeline_state value: {timeline_state}")
|
| 798 |
|
| 799 |
+
# Initialize timeline state if None
|
| 800 |
+
if timeline_state is None:
|
| 801 |
+
timeline_state = {'clips': []}
|
| 802 |
+
|
| 803 |
# Restore timeline from state
|
| 804 |
if timeline_state and 'clips' in timeline_state:
|
| 805 |
timeline_service.clips = []
|
|
|
|
| 863 |
try:
|
| 864 |
logger.info(f"[ENHANCEMENT] Starting enhancement: level={enhancement_level}")
|
| 865 |
|
| 866 |
+
# Initialize timeline state if None
|
| 867 |
+
if timeline_state is None:
|
| 868 |
+
timeline_state = {'clips': []}
|
| 869 |
+
|
| 870 |
# Restore timeline from state
|
| 871 |
if timeline_state and 'clips' in timeline_state:
|
| 872 |
timeline_service.clips = []
|
|
|
|
| 925 |
try:
|
| 926 |
logger.info(f"[UPSCALE] Starting upscale: mode={upscale_mode}")
|
| 927 |
|
| 928 |
+
# Initialize timeline state if None
|
| 929 |
+
if timeline_state is None:
|
| 930 |
+
timeline_state = {'clips': []}
|
| 931 |
+
|
| 932 |
# Restore timeline from state
|
| 933 |
if timeline_state and 'clips' in timeline_state:
|
| 934 |
timeline_service.clips = []
|
|
|
|
| 1891 |
)
|
| 1892 |
|
| 1893 |
# Timeline state - persists across GPU context switches
|
| 1894 |
+
# Use None to avoid Gradio schema validation errors
|
| 1895 |
+
timeline_state = gr.State(value=None)
|
| 1896 |
|
| 1897 |
# Generation Section
|
| 1898 |
gr.Markdown("### 🎼 Music Generation")
|