import gradio as gr
import os
import json
import requests
from datetime import datetime
import time
from typing import List, Dict, Any, Generator, Tuple, Optional, Set
import logging
import re
import tempfile
from pathlib import Path
import sqlite3
import hashlib
import threading
from contextlib import contextmanager
from dataclasses import dataclass, field, asdict
from collections import defaultdict
import json
from pathlib import Path
import random
# --- Logging setup ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Document export imports ---
try:
from docx import Document
from docx.shared import Inches, Pt, RGBColor, Mm
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.enum.style import WD_STYLE_TYPE
from docx.oxml.ns import qn
from docx.oxml import OxmlElement
DOCX_AVAILABLE = True
except ImportError:
DOCX_AVAILABLE = False
logger.warning("python-docx not installed. DOCX export will be disabled.")
# --- Environment variables and constants ---
FRIENDLI_TOKEN = os.getenv("FRIENDLI_TOKEN", "")
BRAVE_SEARCH_API_KEY = os.getenv("BRAVE_SEARCH_API_KEY", "")
API_URL = "https://api.friendli.ai/dedicated/v1/chat/completions"
MODEL_ID = "dep86pjolcjjnv8"
DB_PATH = "novel_sessions_v6.db"
# Target word count settings
TARGET_WORDS = 8000 # Safety margin
MIN_WORDS_PER_PART = 800 # Minimum words per part
# --- Environment validation ---
if not FRIENDLI_TOKEN:
logger.error("FRIENDLI_TOKEN not set. Application will not work properly.")
FRIENDLI_TOKEN = "dummy_token_for_testing"
if not BRAVE_SEARCH_API_KEY:
logger.warning("BRAVE_SEARCH_API_KEY not set. Web search features will be disabled.")
# --- Global variables ---
db_lock = threading.Lock()
# Narrative phases definition
NARRATIVE_PHASES = [
"Introduction: Daily Life and Cracks",
"Development 1: Rising Anxiety",
"Development 2: External Shock",
"Development 3: Deepening Internal Conflict",
"Climax 1: Peak of Crisis",
"Climax 2: Moment of Choice",
"Falling Action 1: Consequences and Aftermath",
"Falling Action 2: New Recognition",
"Resolution 1: Changed Daily Life",
"Resolution 2: Open Questions"
]
# Stage configuration - Single writer system
UNIFIED_STAGES = [
("director", "π¬ Director: Integrated Narrative Structure Planning"),
("critic_director", "π Critic: Deep Review of Narrative Structure"),
("director", "π¬ Director: Final Master Plan"),
] + [
item for i in range(1, 11)
for item in [
("writer", f"βοΈ Writer: Part {i} - {NARRATIVE_PHASES[i-1]}"),
(f"critic_part{i}", f"π Part {i} Critic: Immediate Review and Revision Request"),
("writer", f"βοΈ Writer: Part {i} Revision")
]
] + [
("critic_final", "π Final Critic: Comprehensive Evaluation and Literary Achievement"),
]
# --- Data classes ---
@dataclass
class StoryBible:
"""Story bible for maintaining narrative consistency"""
characters: Dict[str, Dict[str, Any]] = field(default_factory=dict)
settings: Dict[str, str] = field(default_factory=dict)
timeline: List[Dict[str, Any]] = field(default_factory=list)
plot_points: List[Dict[str, Any]] = field(default_factory=list)
themes: List[str] = field(default_factory=list)
symbols: Dict[str, List[str]] = field(default_factory=dict)
style_guide: Dict[str, str] = field(default_factory=dict)
opening_sentence: str = ""
@dataclass
class PartCritique:
"""Critique content for each part"""
part_number: int
continuity_issues: List[str] = field(default_factory=list)
character_consistency: List[str] = field(default_factory=list)
plot_progression: List[str] = field(default_factory=list)
thematic_alignment: List[str] = field(default_factory=list)
technical_issues: List[str] = field(default_factory=list)
strengths: List[str] = field(default_factory=list)
required_changes: List[str] = field(default_factory=list)
literary_quality: List[str] = field(default_factory=list)
# --- Core logic classes ---
class UnifiedNarrativeTracker:
"""Unified narrative tracker for single writer system"""
def __init__(self):
self.story_bible = StoryBible()
self.part_critiques: Dict[int, PartCritique] = {}
self.accumulated_content: List[str] = []
self.word_count_by_part: Dict[int, int] = {}
self.revision_history: Dict[int, List[str]] = defaultdict(list)
self.causal_chains: List[Dict[str, Any]] = []
self.narrative_momentum: float = 0.0
def update_story_bible(self, element_type: str, key: str, value: Any):
"""Update story bible"""
if element_type == "character":
self.story_bible.characters[key] = value
elif element_type == "setting":
self.story_bible.settings[key] = value
elif element_type == "timeline":
self.story_bible.timeline.append({"event": key, "details": value})
elif element_type == "theme":
if key not in self.story_bible.themes:
self.story_bible.themes.append(key)
elif element_type == "symbol":
if key not in self.story_bible.symbols:
self.story_bible.symbols[key] = []
self.story_bible.symbols[key].append(value)
def add_part_critique(self, part_number: int, critique: PartCritique):
"""Add part critique"""
self.part_critiques[part_number] = critique
def check_continuity(self, current_part: int, new_content: str) -> List[str]:
"""Check continuity"""
issues = []
# Character consistency check
for char_name, char_data in self.story_bible.characters.items():
if char_name in new_content:
if "traits" in char_data:
for trait in char_data["traits"]:
if trait.get("abandoned", False):
issues.append(f"{char_name}'s abandoned trait '{trait['name']}' reappears")
# Timeline consistency check
if len(self.story_bible.timeline) > 0:
last_event = self.story_bible.timeline[-1]
# Causality check
if current_part > 1 and not any(kw in new_content for kw in
['because', 'therefore', 'thus', 'hence', 'consequently']):
issues.append("Unclear causality with previous part")
return issues
def calculate_narrative_momentum(self, part_number: int, content: str) -> float:
"""Calculate narrative momentum"""
momentum = 5.0
# New elements introduced
new_elements = len(set(content.split()) - set(' '.join(self.accumulated_content).split()))
if new_elements > 100:
momentum += 2.0
# Conflict escalation
tension_words = ['crisis', 'conflict', 'tension', 'struggle', 'dilemma']
if any(word in content.lower() for word in tension_words):
momentum += 1.5
# Causal clarity
causal_words = ['because', 'therefore', 'thus', 'consequently', 'hence']
causal_count = sum(1 for word in causal_words if word in content.lower())
momentum += min(causal_count * 0.5, 2.0)
# Repetition penalty
if part_number > 1:
prev_content = self.accumulated_content[-1] if self.accumulated_content else ""
overlap = len(set(content.split()) & set(prev_content.split()))
if overlap > len(content.split()) * 0.3:
momentum -= 3.0
return max(0.0, min(10.0, momentum))
class NovelDatabase:
"""Database management - Modified for single writer system"""
@staticmethod
def init_db():
with sqlite3.connect(DB_PATH) as conn:
conn.execute("PRAGMA journal_mode=WAL")
cursor = conn.cursor()
# Main sessions table
cursor.execute('''
CREATE TABLE IF NOT EXISTS sessions (
session_id TEXT PRIMARY KEY,
user_query TEXT NOT NULL,
language TEXT NOT NULL,
created_at TEXT DEFAULT (datetime('now')),
updated_at TEXT DEFAULT (datetime('now')),
status TEXT DEFAULT 'active',
current_stage INTEGER DEFAULT 0,
final_novel TEXT,
literary_report TEXT,
total_words INTEGER DEFAULT 0,
story_bible TEXT,
narrative_tracker TEXT,
opening_sentence TEXT
)
''')
# Stages table
cursor.execute('''
CREATE TABLE IF NOT EXISTS stages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
stage_number INTEGER NOT NULL,
stage_name TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT,
word_count INTEGER DEFAULT 0,
status TEXT DEFAULT 'pending',
narrative_momentum REAL DEFAULT 0.0,
created_at TEXT DEFAULT (datetime('now')),
updated_at TEXT DEFAULT (datetime('now')),
FOREIGN KEY (session_id) REFERENCES sessions(session_id),
UNIQUE(session_id, stage_number)
)
''')
# Critiques table
cursor.execute('''
CREATE TABLE IF NOT EXISTS critiques (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
part_number INTEGER NOT NULL,
critique_data TEXT,
created_at TEXT DEFAULT (datetime('now')),
FOREIGN KEY (session_id) REFERENCES sessions(session_id)
)
''')
conn.commit()
@staticmethod
@contextmanager
def get_db():
with db_lock:
conn = sqlite3.connect(DB_PATH, timeout=30.0)
conn.row_factory = sqlite3.Row
try:
yield conn
finally:
conn.close()
@staticmethod
def create_session(user_query: str, language: str) -> str:
session_id = hashlib.md5(f"{user_query}{datetime.now()}".encode()).hexdigest()
with NovelDatabase.get_db() as conn:
conn.cursor().execute(
'INSERT INTO sessions (session_id, user_query, language) VALUES (?, ?, ?)',
(session_id, user_query, language)
)
conn.commit()
return session_id
@staticmethod
def save_stage(session_id: str, stage_number: int, stage_name: str,
role: str, content: str, status: str = 'complete',
narrative_momentum: float = 0.0):
word_count = len(content.split()) if content else 0
with NovelDatabase.get_db() as conn:
cursor = conn.cursor()
cursor.execute('''
INSERT INTO stages (session_id, stage_number, stage_name, role, content,
word_count, status, narrative_momentum)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(session_id, stage_number)
DO UPDATE SET content=?, word_count=?, status=?, stage_name=?,
narrative_momentum=?, updated_at=datetime('now')
''', (session_id, stage_number, stage_name, role, content, word_count,
status, narrative_momentum, content, word_count, status, stage_name,
narrative_momentum))
# Update total word count
cursor.execute('''
UPDATE sessions
SET total_words = (
SELECT SUM(word_count)
FROM stages
WHERE session_id = ? AND role = 'writer' AND content IS NOT NULL
),
updated_at = datetime('now'),
current_stage = ?
WHERE session_id = ?
''', (session_id, stage_number, session_id))
conn.commit()
@staticmethod
def save_critique(session_id: str, part_number: int, critique: PartCritique):
"""Save critique"""
with NovelDatabase.get_db() as conn:
critique_json = json.dumps(asdict(critique))
conn.cursor().execute(
'INSERT INTO critiques (session_id, part_number, critique_data) VALUES (?, ?, ?)',
(session_id, part_number, critique_json)
)
conn.commit()
@staticmethod
def save_opening_sentence(session_id: str, opening_sentence: str):
"""Save opening sentence"""
with NovelDatabase.get_db() as conn:
conn.cursor().execute(
'UPDATE sessions SET opening_sentence = ? WHERE session_id = ?',
(opening_sentence, session_id)
)
conn.commit()
@staticmethod
def get_writer_content(session_id: str) -> str:
"""Get writer content - Integrate all revisions"""
with NovelDatabase.get_db() as conn:
rows = conn.cursor().execute('''
SELECT content FROM stages
WHERE session_id = ? AND role = 'writer'
AND stage_name LIKE '%Revision%'
ORDER BY stage_number
''', (session_id,)).fetchall()
if rows:
return '\n\n'.join(row['content'] for row in rows if row['content'])
else:
# If no revisions, use drafts
rows = conn.cursor().execute('''
SELECT content FROM stages
WHERE session_id = ? AND role = 'writer'
AND stage_name NOT LIKE '%Revision%'
ORDER BY stage_number
''', (session_id,)).fetchall()
return '\n\n'.join(row['content'] for row in rows if row['content'])
@staticmethod
def save_narrative_tracker(session_id: str, tracker: UnifiedNarrativeTracker):
"""Save unified narrative tracker"""
with NovelDatabase.get_db() as conn:
tracker_data = json.dumps({
'story_bible': asdict(tracker.story_bible),
'part_critiques': {k: asdict(v) for k, v in tracker.part_critiques.items()},
'word_count_by_part': tracker.word_count_by_part,
'causal_chains': tracker.causal_chains,
'narrative_momentum': tracker.narrative_momentum
})
conn.cursor().execute(
'UPDATE sessions SET narrative_tracker = ? WHERE session_id = ?',
(tracker_data, session_id)
)
conn.commit()
@staticmethod
def load_narrative_tracker(session_id: str) -> Optional[UnifiedNarrativeTracker]:
"""Load unified narrative tracker"""
with NovelDatabase.get_db() as conn:
row = conn.cursor().execute(
'SELECT narrative_tracker FROM sessions WHERE session_id = ?',
(session_id,)
).fetchone()
if row and row['narrative_tracker']:
data = json.loads(row['narrative_tracker'])
tracker = UnifiedNarrativeTracker()
# Restore story bible
bible_data = data.get('story_bible', {})
tracker.story_bible = StoryBible(**bible_data)
# Restore critiques
for part_num, critique_data in data.get('part_critiques', {}).items():
tracker.part_critiques[int(part_num)] = PartCritique(**critique_data)
tracker.word_count_by_part = data.get('word_count_by_part', {})
tracker.causal_chains = data.get('causal_chains', [])
tracker.narrative_momentum = data.get('narrative_momentum', 0.0)
return tracker
return None
# Maintain existing methods
@staticmethod
def get_session(session_id: str) -> Optional[Dict]:
with NovelDatabase.get_db() as conn:
row = conn.cursor().execute('SELECT * FROM sessions WHERE session_id = ?',
(session_id,)).fetchone()
return dict(row) if row else None
@staticmethod
def get_stages(session_id: str) -> List[Dict]:
with NovelDatabase.get_db() as conn:
rows = conn.cursor().execute(
'SELECT * FROM stages WHERE session_id = ? ORDER BY stage_number',
(session_id,)
).fetchall()
return [dict(row) for row in rows]
@staticmethod
def update_final_novel(session_id: str, final_novel: str, literary_report: str = ""):
with NovelDatabase.get_db() as conn:
conn.cursor().execute(
'''UPDATE sessions SET final_novel = ?, status = 'complete',
updated_at = datetime('now'), literary_report = ? WHERE session_id = ?''',
(final_novel, literary_report, session_id)
)
conn.commit()
@staticmethod
def get_active_sessions() -> List[Dict]:
with NovelDatabase.get_db() as conn:
rows = conn.cursor().execute(
'''SELECT session_id, user_query, language, created_at, current_stage, total_words
FROM sessions WHERE status = 'active' ORDER BY updated_at DESC LIMIT 10'''
).fetchall()
return [dict(row) for row in rows]
@staticmethod
def get_total_words(session_id: str) -> int:
"""Get total word count"""
with NovelDatabase.get_db() as conn:
row = conn.cursor().execute(
'SELECT total_words FROM sessions WHERE session_id = ?',
(session_id,)
).fetchone()
return row['total_words'] if row and row['total_words'] else 0
class WebSearchIntegration:
"""Web search functionality"""
def __init__(self):
self.brave_api_key = BRAVE_SEARCH_API_KEY
self.search_url = "https://api.search.brave.com/res/v1/web/search"
self.enabled = bool(self.brave_api_key)
def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]:
if not self.enabled:
return []
headers = {
"Accept": "application/json",
"X-Subscription-Token": self.brave_api_key
}
params = {
"q": query,
"count": count,
"search_lang": "ko" if language == "Korean" else "en",
"text_decorations": False,
"safesearch": "moderate"
}
try:
response = requests.get(self.search_url, headers=headers, params=params, timeout=10)
response.raise_for_status()
results = response.json().get("web", {}).get("results", [])
return results
except requests.exceptions.RequestException as e:
logger.error(f"Web search API error: {e}")
return []
def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str:
if not results:
return ""
extracted = []
total_chars = 0
for i, result in enumerate(results[:3], 1):
title = result.get("title", "")
description = result.get("description", "")
info = f"[{i}] {title}: {description}"
if total_chars + len(info) < max_chars:
extracted.append(info)
total_chars += len(info)
else:
break
return "\n".join(extracted)
class UnifiedLiterarySystem:
"""Single writer progressive literary novel generation system"""
def __init__(self):
self.token = FRIENDLI_TOKEN
self.api_url = API_URL
self.model_id = MODEL_ID
self.narrative_tracker = UnifiedNarrativeTracker()
self.web_search = WebSearchIntegration()
self.current_session_id = None
NovelDatabase.init_db()
def create_headers(self):
return {"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"}
# --- Prompt generation functions ---
def augment_query(self, user_query: str, language: str) -> str:
"""Augment prompt"""
if len(user_query.split()) < 15:
augmented_template = {
"Korean": f"""'{user_query}'
**μμ¬ κ΅¬μ‘° ν΅μ¬:**
- 10κ° ννΈκ° νλμ ν΅ν©λ μ΄μΌκΈ°λ₯Ό ꡬμ±
- κ° ννΈλ μ΄μ ννΈμ νμ°μ κ²°κ³Ό
- μΈλ¬Όμ λͺ
νν λ³ν κΆ€μ (A β B β C)
- μ€μ¬ κ°λ±μ μ μ§μ κ³ μ‘°μ ν΄κ²°
- κ°λ ¬ν μ€μ¬ μμ§μ μλ―Έ λ³ν""",
"English": f"""'{user_query}'
**Narrative Structure Core:**
- 10 parts forming one integrated story
- Each part as inevitable result of previous
- Clear character transformation arc (A β B β C)
- Progressive escalation and resolution of central conflict
- Evolving meaning of powerful central symbol"""
}
return augmented_template.get(language, user_query)
return user_query
def generate_powerful_opening(self, user_query: str, language: str) -> str:
"""Generate powerful opening sentence matching the theme"""
opening_prompt = {
"Korean": f"""μ£Όμ : {user_query}
μ΄ μ£Όμ μ λν κ°λ ¬νκ³ μμ μ μλ 첫문μ₯μ μμ±νμΈμ.
**첫문μ₯ μμ± μμΉ:**
1. μ¦κ°μ μΈ κΈ΄μ₯κ°μ΄λ κΆκΈμ¦ μ λ°
2. νλ²νμ§ μμ μκ°μ΄λ μν© μ μ
3. κ°κ°μ μ΄κ³ ꡬ체μ μΈ μ΄λ―Έμ§
4. μ² νμ μ§λ¬Έμ΄λ μμ€μ μ§μ
5. μκ°κ³Ό 곡κ°μ λ
νΉν μ€μ
**νλ₯ν 첫문μ₯μ μμ ν¨ν΄:**
- "κ·Έκ° μ£½μ λ , ..." (좩격μ μ¬κ±΄)
- "λͺ¨λ κ²μ΄ λλ¬λ€κ³ μκ°ν μκ°..." (λ°μ μκ³ )
- "μΈμμμ κ°μ₯ [νμ©μ¬]ν [λͺ
μ¬]λ..." (λ
νΉν μ μ)
- "[ꡬ체μ νλ]νλ κ²λ§μΌλ‘λ..." (μΌμμ μ¬ν΄μ)
λ¨ νλμ λ¬Έμ₯λ§ μ μνμΈμ.""",
"English": f"""Theme: {user_query}
Generate an unforgettable opening sentence for this theme.
**Opening Sentence Principles:**
1. Immediate tension or curiosity
2. Unusual perspective or situation
3. Sensory and specific imagery
4. Philosophical question or paradox
5. Unique temporal/spatial setting
**Great Opening Patterns:**
- "The day he died, ..." (shocking event)
- "At the moment everything seemed over..." (reversal hint)
- "The most [adjective] [noun] in the world..." (unique definition)
- "Just by [specific action]..." (reinterpretation of ordinary)
Provide only one sentence."""
}
messages = [{"role": "user", "content": opening_prompt.get(language, opening_prompt["Korean"])}]
opening = self.call_llm_sync(messages, "writer", language)
return opening.strip()
def create_director_initial_prompt(self, user_query: str, language: str) -> str:
"""Director initial planning - Enhanced version"""
augmented_query = self.augment_query(user_query, language)
# Generate opening sentence
opening_sentence = self.generate_powerful_opening(user_query, language)
self.narrative_tracker.story_bible.opening_sentence = opening_sentence
if self.current_session_id:
NovelDatabase.save_opening_sentence(self.current_session_id, opening_sentence)
search_results_str = ""
if self.web_search.enabled:
short_query = user_query[:50] if len(user_query) > 50 else user_query
queries = [
f"{short_query} philosophical meaning",
f"human existence meaning {short_query}",
f"{short_query} literary works"
]
for q in queries[:2]:
try:
results = self.web_search.search(q, count=2, language=language)
if results:
search_results_str += self.web_search.extract_relevant_info(results) + "\n"
except Exception as e:
logger.warning(f"Search failed: {str(e)}")
lang_prompts = {
"Korean": f"""λ
Έλ²¨λ¬Ένμ μμ€μ μ² νμ κΉμ΄λ₯Ό μ§λ μ€νΈμμ€(8,000λ¨μ΄)μ κΈ°ννμΈμ.
**μ£Όμ :** {augmented_query}
**νμ 첫문μ₯:** {opening_sentence}
**μ°Έκ³ μλ£:**
{search_results_str if search_results_str else "N/A"}
**νμ λ¬Ένμ μμ:**
1. **μ² νμ νꡬ**
- νλμΈμ μ€μ‘΄μ κ³ λ (μμΈ, μ 체μ±, μλ―Έ μμ€)
- λμ§νΈ μλμ μΈκ° 쑰건
- μλ³Έμ£Όμ μ¬νμ λͺ¨μκ³Ό κ°μΈμ μ ν
- μ£½μ, μ¬λ, μμ μ λν μλ‘μ΄ μ±μ°°
2. **μ¬νμ λ©μμ§**
- κ³κΈ, μ λ, μΈλ κ° κ°λ±
- νκ²½ μκΈ°μ μΈκ°μ μ±
μ
- κΈ°μ λ°μ κ³Ό μΈκ°μ±μ μΆ©λ
- νλ λ―Όμ£Όμ£Όμμ μκΈ°μ κ°μΈμ μν
3. **λ¬Ένμ μμ¬ μ₯μΉ**
- μ€μ¬ μμ : [ꡬ체μ μ¬λ¬Ό/νμ] β [μΆμμ μλ―Έ]
- λ°λ³΅λλ λͺ¨ν°ν: [μ΄λ―Έμ§/νλ] (μ΅μ 5ν λ³μ£Ό)
- λμ‘°λ²: [A vs B]μ μ§μμ κΈ΄μ₯
- μμ§μ 곡κ°: [ꡬ체μ μ₯μ]κ° μλ―Ένλ κ²
- μκ°μ μ£Όκ΄μ νλ¦ (νμ, μκ°, μ μ§)
4. **ν΅ν©λ 10ννΈ κ΅¬μ‘°**
κ° ννΈλ³ ν΅μ¬:
- ννΈ 1: 첫문μ₯μΌλ‘ μμ, μΌμ μ κ· μ΄ β μ² νμ μ§λ¬Έ μ κΈ°
- ννΈ 2-3: μΈλΆ μ¬κ±΄ β λ΄μ μ±μ°° μ¬ν
- ννΈ 4-5: μ¬νμ κ°λ± β κ°μΈμ λλ λ§
- ννΈ 6-7: μκΈ°μ μ μ β μ€μ‘΄μ μ ν
- ννΈ 8-9: μ νμ κ²°κ³Ό β μλ‘μ΄ μΈμ
- ννΈ 10: λ³νλ μΈκ³κ΄ β μ΄λ¦° μ§λ¬Έ
5. **문체 μ§μΉ¨**
- μμ μ°λ¬Έμ²΄: μΌμ μΈμ΄μ μμ μ κ· ν
- μμμ νλ¦κ³Ό κ°κ΄μ λ¬μ¬μ κ΅μ°¨
- μ§§κ³ κ°λ ¬ν λ¬Έμ₯κ³Ό μ±μ°°μ κΈ΄ λ¬Έμ₯μ 리λ¬
- κ°κ°μ λν
μΌλ‘ μΆμμ κ°λ
ꡬν
ꡬ체μ μ΄κ³ νμ μ μΈ κ³νμ μ μνμΈμ.""",
"English": f"""Plan a philosophically profound novella (8,000 words) worthy of Nobel Prize.
**Theme:** {augmented_query}
**Required Opening:** {opening_sentence}
**Reference:**
{search_results_str if search_results_str else "N/A"}
**Essential Literary Elements:**
1. **Philosophical Exploration**
- Modern existential anguish (alienation, identity, loss of meaning)
- Human condition in digital age
- Capitalist contradictions and individual choice
- New reflections on death, love, freedom
2. **Social Message**
- Class, gender, generational conflicts
- Environmental crisis and human responsibility
- Technology vs humanity collision
- Modern democracy crisis and individual role
3. **Literary Devices**
- Central metaphor: [concrete object/phenomenon] β [abstract meaning]
- Recurring motif: [image/action] (minimum 5 variations)
- Contrast: sustained tension of [A vs B]
- Symbolic space: what [specific place] means
- Subjective time flow (flashback, premonition, pause)
4. **Integrated 10-Part Structure**
Each part's core:
- Part 1: Start with opening sentence, daily cracks β philosophical questions
- Part 2-3: External events β deepening introspection
- Part 4-5: Social conflict β personal dilemma
- Part 6-7: Crisis peak β existential choice
- Part 8-9: Choice consequences β new recognition
- Part 10: Changed worldview β open questions
5. **Style Guidelines**
- Poetic prose: balance of everyday language and metaphor
- Stream of consciousness crossing with objective description
- Rhythm of short intense sentences and reflective long ones
- Abstract concepts through sensory details
Provide concrete, innovative plan."""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def create_critic_director_prompt(self, director_plan: str, user_query: str, language: str) -> str:
"""Director plan deep review - Enhanced version"""
lang_prompts = {
"Korean": f"""μμ¬ κ΅¬μ‘° μ λ¬Έκ°λ‘μ μ΄ κΈ°νμ μ¬μΈ΅ λΆμνμΈμ.
**μ μ£Όμ :** {user_query}
**κ°λ
μ κΈ°ν:**
{director_plan}
**μ¬μΈ΅ κ²ν νλͺ©:**
1. **μΈκ³Όκ΄κ³ κ²μ¦**
κ° ννΈ κ° μ°κ²°μ κ²ν νκ³ λ
Όλ¦¬μ λΉμ½μ μ°ΎμΌμΈμ:
- ννΈ 1β2: [μ°κ²°μ± νκ°]
- ννΈ 2β3: [μ°κ²°μ± νκ°]
(λͺ¨λ μ°κ²° μ§μ κ²ν )
2. **μ² νμ κΉμ΄ νκ°**
- μ μλ μ² νμ μ£Όμ κ° μΆ©λΆν κΉμκ°?
- νλμ κ΄λ ¨μ±μ΄ μλκ°?
- λ
μ°½μ ν΅μ°°μ΄ μλκ°?
3. **λ¬Ένμ μ₯μΉμ ν¨κ³Όμ±**
- μμ μ μμ§μ΄ μ κΈ°μ μΌλ‘ μλνλκ°?
- κ³Όλνκ±°λ λΆμ‘±νμ§ μμκ°?
- μ£Όμ μ κΈ΄λ°ν μ°κ²°λλκ°?
4. **μΊλ¦ν° μν¬ μ€ν κ°λ₯μ±**
- λ³νκ° μΆ©λΆν μ μ§μ μΈκ°?
- κ° λ¨κ³μ λκΈ°κ° λͺ
ννκ°?
- μ¬λ¦¬μ μ λ’°μ±μ΄ μλκ°?
5. **8,000λ¨μ΄ μ€ν κ°λ₯μ±**
- κ° ννΈκ° 800λ¨μ΄λ₯Ό μ μ§ν μ μλκ°?
- λμ΄μ§κ±°λ μμΆλλ λΆλΆμ μλκ°?
**νμ κ°μ μ¬νμ ꡬ체μ μΌλ‘ μ μνμΈμ.**""",
"English": f"""As narrative structure expert, deeply analyze this plan.
**Original Theme:** {user_query}
**Director's Plan:**
{director_plan}
**Deep Review Items:**
1. **Causality Verification**
Review connections between parts, find logical leaps:
- Part 1β2: [Connection assessment]
- Part 2β3: [Connection assessment]
(Review all connection points)
2. **Philosophical Depth Assessment**
- Is philosophical theme deep enough?
- Contemporary relevance?
- Original insights?
3. **Literary Device Effectiveness**
- Do metaphors and symbols work organically?
- Not excessive or insufficient?
- Tightly connected to theme?
4. **Character Arc Feasibility**
- Is change sufficiently gradual?
- Are motivations clear at each stage?
- Psychological credibility?
5. **8,000-word Feasibility**
- Can each part sustain 800 words?
- Any dragging or compressed sections?
**Provide specific required improvements.**"""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def create_writer_prompt(self, part_number: int, master_plan: str,
accumulated_content: str, story_bible: StoryBible,
language: str) -> str:
"""Single writer prompt - Enhanced version"""
phase_name = NARRATIVE_PHASES[part_number-1]
target_words = MIN_WORDS_PER_PART
# Part-specific instructions
philosophical_focus = {
1: "Introduce existential anxiety through daily cracks",
2: "First collision between individual and society",
3: "Self-recognition through encounter with others",
4: "Shaking beliefs and clashing values",
5: "Weight of choice and paradox of freedom",
6: "Test of humanity in extreme situations",
7: "Weight of consequences and responsibility",
8: "Self-rediscovery through others' gaze",
9: "Reconciliation with the irreconcilable",
10: "New life possibilities and unresolved questions"
}
literary_techniques = {
1: "Introducing objective correlative",
2: "Contrapuntal narration",
3: "Stream of consciousness",
4: "Subtle shifts in perspective",
5: "Aesthetics of silence and omission",
6: "Subjective transformation of time",
7: "Intersection of multiple viewpoints",
8: "Subversion of metaphor",
9: "Reinterpretation of archetypal images",
10: "Multi-layered open ending"
}
# Story bible summary
bible_summary = f"""
**Characters:** {', '.join(story_bible.characters.keys()) if story_bible.characters else 'TBD'}
**Key Symbols:** {', '.join(story_bible.symbols.keys()) if story_bible.symbols else 'TBD'}
**Themes:** {', '.join(story_bible.themes[:3]) if story_bible.themes else 'TBD'}
**Style:** {story_bible.style_guide.get('voice', 'N/A')}
"""
# Previous content summary
prev_content = ""
if accumulated_content:
prev_parts = accumulated_content.split('\n\n')
if len(prev_parts) >= 1:
prev_content = prev_parts[-1][-2000:] # Last 2000 chars of previous part
lang_prompts = {
"Korean": f"""λΉμ μ νλ λ¬Ένμ μ΅μ μ μ μ μκ°μ
λλ€.
**νμ¬: ννΈ {part_number} - {phase_name}**
{"**νμ 첫문μ₯:** " + story_bible.opening_sentence if part_number == 1 and story_bible.opening_sentence else ""}
**μ΄λ² ννΈμ μ² νμ μ΄μ :** {philosophical_focus[part_number]}
**ν΅μ¬ λ¬Έν κΈ°λ²:** {literary_techniques[part_number]}
**μ 체 κ³ν:**
{master_plan}
**μ€ν 리 λ°μ΄λΈ:**
{bible_summary}
**μ§μ λ΄μ©:**
{prev_content if prev_content else "첫 ννΈμ
λλ€"}
**ννΈ {part_number} μμ± μ§μΉ¨:**
1. **λΆλ:** {target_words}-900 λ¨μ΄ (νμ)
2. **λ¬Ένμ μμ¬ μꡬμ¬ν:**
- μ΅μ 3κ°μ λ
μ°½μ μμ /μ§μ
- 1κ° μ΄μμ μμ§μ μ΄λ―Έμ§ μ¬ν
- κ°κ°μ λ¬μ¬μ μΆμμ μ¬μ μ μ΅ν©
- 리λ¬κ° μλ λ¬Έμ₯ κ΅¬μ± (μ₯λ¨μ λ³μ£Ό)
3. **νλμ κ³ λ νν:**
- λμ§νΈ μλμ μμΈκ°
- μλ³Έμ£Όμμ μΆμ λΆμ‘°λ¦¬
- κ΄κ³μ νλ©΄μ±κ³Ό μ§μ μ± κ°λ§
- μλ―Έ μΆκ΅¬μ 무μλ―Έμ μ§λ©΄
4. **μ¬νμ λ©μμ§ λ΄μ¬ν:**
- μ§μ μ μ£Όμ₯μ΄ μλ μν©κ³Ό μΈλ¬Όμ ν΅ν μμ
- κ°μΈμ κ³ ν΅κ³Ό μ¬ν ꡬ쑰μ μ°κ²°
- λ―Έμμ μΌμκ³Ό κ±°μμ λ¬Έμ μ κ΅μ°¨
5. **μμ¬μ μΆμ§λ ₯:**
- μ΄μ ννΈμ νμ°μ κ²°κ³Όλ‘ μμ
- μλ‘μ΄ κ°λ± μΈ΅μ μΆκ°
- λ€μ ννΈλ₯Ό ν₯ν κΈ΄μ₯κ° μ‘°μ±
**λ¬Ένμ κΈκΈ°:**
- μ§λΆν ννμ΄λ μν¬μ μμ
- κ°μ μ μ§μ μ μ€λͺ
- λλμ νλ¨μ΄λ κ΅ν
- μΈμμ μΈ ν΄κ²°μ΄λ μμ
ννΈ {part_number}λ₯Ό κΉμ΄ μλ λ¬Ένμ μ±μ·¨λ‘ λ§λμΈμ.""",
"English": f"""You are a writer at the forefront of contemporary literature.
**Current: Part {part_number} - {phase_name}**
{"**Required Opening:** " + story_bible.opening_sentence if part_number == 1 and story_bible.opening_sentence else ""}
**Philosophical Focus:** {philosophical_focus[part_number]}
**Core Literary Technique:** {literary_techniques[part_number]}
**Master Plan:**
{master_plan}
**Story Bible:**
{bible_summary}
**Previous Content:**
{prev_content if prev_content else "This is the first part"}
**Part {part_number} Guidelines:**
1. **Length:** {target_words}-900 words (mandatory)
2. **Literary Device Requirements:**
- Minimum 3 original metaphors/similes
- Deepen at least 1 symbolic image
- Fusion of sensory description and abstract thought
- Rhythmic sentence composition (variation of long/short)
3. **Modern Anguish Expression:**
- Digital age alienation
- Absurdity of capitalist life
- Surface relationships vs authenticity yearning
- Meaning pursuit vs confronting meaninglessness
4. **Social Message Internalization:**
- Implication through situation and character, not direct claim
- Connection between individual pain and social structure
- Intersection of micro daily life and macro problems
5. **Narrative Momentum:**
- Start as inevitable result of previous part
- Add new conflict layers
- Create tension toward next part
**Literary Taboos:**
- ClichΓ©d expressions or trite metaphors
- Direct emotion explanation
- Moral judgment or preaching
- Artificial resolution or comfort
Make Part {part_number} a profound literary achievement."""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def create_part_critic_prompt(self, part_number: int, part_content: str,
master_plan: str, accumulated_content: str,
story_bible: StoryBible, language: str) -> str:
"""Part-by-part immediate critique - Enhanced version"""
lang_prompts = {
"Korean": f"""ννΈ {part_number}μ λ¬Ένμ μ±μ·¨λλ₯Ό μ격ν νκ°νμΈμ.
**λ§μ€ν°νλ ννΈ {part_number} μꡬμ¬ν:**
{self._extract_part_plan(master_plan, part_number)}
**μμ±λ λ΄μ©:**
{part_content}
**μ€ν 리 λ°μ΄λΈ 체ν¬:**
- μΊλ¦ν°: {', '.join(story_bible.characters.keys())}
- μ€μ : {', '.join(story_bible.settings.keys())}
**νκ° κΈ°μ€:**
1. **λ¬Ένμ μμ¬ (30%)**
- μμ μ μμ§μ λ
μ°½μ±
- μΈμ΄μ μμ λ°λ
- μ΄λ―Έμ§μ μ λͺ
λμ κΉμ΄
- λ¬Έμ₯μ 리λ¬κ³Ό μμ
μ±
2. **μ² νμ κΉμ΄ (25%)**
- μ€μ‘΄μ μ§λ¬Έμ μ κΈ°
- νλμΈμ 쑰건 νꡬ
- 보νΈμ±κ³Ό νΉμμ±μ κ· ν
- μ¬μ μ λ
μ°½μ±
3. **μ¬νμ ν΅μ°° (20%)**
- μλμ μ μ ν¬μ°©
- ꡬ쑰μ κ°μΈμ κ΄κ³
- λΉνμ μκ°μ μ리ν¨
- λμμ μμλ ₯
4. **μμ¬μ μμ±λ (25%)**
- μΈκ³Όκ΄κ³μ νμ°μ±
- κΈ΄μ₯κ°μ μ μ§
- μΈλ¬Όμ μ
체μ±
- ꡬ쑰μ ν΅μΌμ±
**ꡬ체μ μ§μ μ¬ν:**
- μ§λΆν νν: [μμμ λμ]
- μ² νμ μ²μ°© λΆμ‘±: [보μ λ°©ν₯]
- μ¬νμ λ©μμ§ λΆλͺ
ν: [κ°ν λ°©μ]
- μμ¬μ νμ : [μμ νμ]
**νμ κ°μ μꡬ:**
λ¬Ένμ μμ€μ λ
Έλ²¨μ κΈμΌλ‘ λμ΄μ¬λ¦¬κΈ° μν ꡬ체μ μμ μμ μ μνμΈμ.""",
"English": f"""Strictly evaluate literary achievement of Part {part_number}.
**Master Plan Part {part_number} Requirements:**
{self._extract_part_plan(master_plan, part_number)}
**Written Content:**
{part_content}
**Story Bible Check:**
- Characters: {', '.join(story_bible.characters.keys()) if story_bible.characters else 'None yet'}
- Settings: {', '.join(story_bible.settings.keys()) if story_bible.settings else 'None yet'}
**Evaluation Criteria:**
1. **Literary Rhetoric (30%)**
- Originality of metaphor and symbol
- Poetic density of language
- Clarity and depth of imagery
- Rhythm and musicality of sentences
2. **Philosophical Depth (25%)**
- Raising existential questions
- Exploring modern human condition
- Balance of universality and specificity
- Originality of thought
3. **Social Insight (20%)**
- Capturing zeitgeist
- Relationship between structure and individual
- Sharpness of critical perspective
- Alternative imagination
4. **Narrative Completion (25%)**
- Inevitability of causality
- Maintaining tension
- Character dimensionality
- Structural unity
**Specific Points:**
- ClichΓ©d expressions: [examples and alternatives]
- Insufficient philosophical exploration: [enhancement direction]
- Unclear social message: [strengthening methods]
- Narrative gaps: [needed revisions]
**Required Improvements:**
Provide specific revisions to elevate literary level to Nobel Prize standard."""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def create_writer_revision_prompt(self, part_number: int, original_content: str,
critic_feedback: str, language: str) -> str:
"""Writer revision prompt"""
lang_prompts = {
"Korean": f"""ννΈ {part_number}λ₯Ό λΉνμ λ°λΌ μμ νμΈμ.
**μλ³Έ:**
{original_content}
**λΉν νΌλλ°±:**
{critic_feedback}
**μμ μ§μΉ¨:**
1. λͺ¨λ 'νμ μμ ' μ¬νμ λ°μ
2. κ°λ₯ν 'κΆμ₯ κ°μ ' μ¬νλ ν¬ν¨
3. μλ³Έμ κ°μ μ μ μ§
4. λΆλ {MIN_WORDS_PER_PART}λ¨μ΄ μ΄μ μ μ§
5. μκ°λ‘μμ μΌκ΄λ λͺ©μ리 μ μ§
6. λ¬Ένμ μμ€μ ν λ¨κ³ λμ΄κΈ°
μμ λ³Έλ§ μ μνμΈμ. μ€λͺ
μ λΆνμν©λλ€.""",
"English": f"""Revise Part {part_number} according to critique.
**Original:**
{original_content}
**Critique Feedback:**
{critic_feedback}
**Revision Guidelines:**
1. Reflect all 'Required fixes'
2. Include 'Recommended improvements' where possible
3. Maintain original strengths
4. Keep length {MIN_WORDS_PER_PART}+ words
5. Maintain consistent authorial voice
6. Elevate literary level
Present only the revision. No explanation needed."""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def create_final_critic_prompt(self, complete_novel: str, word_count: int,
story_bible: StoryBible, language: str) -> str:
"""Final comprehensive evaluation"""
lang_prompts = {
"Korean": f"""μμ±λ μμ€μ μ’
ν© νκ°νμΈμ.
**μν μ 보:**
- μ΄ λΆλ: {word_count}λ¨μ΄
- λͺ©ν: 8,000λ¨μ΄
**νκ° κΈ°μ€:**
1. **μμ¬μ ν΅ν©μ± (30μ )**
- 10κ° ννΈκ° νλμ μ΄μΌκΈ°λ‘ ν΅ν©λμλκ°?
- μΈκ³Όκ΄κ³κ° λͺ
ννκ³ νμ°μ μΈκ°?
- λ°λ³΅μ΄λ μν μμ΄ μ§νλλκ°?
2. **μΊλ¦ν° μν¬ (25μ )**
- μ£ΌμΈκ³΅μ λ³νκ° μ€λλ ₯ μλκ°?
- λ³νκ° μ μ§μ μ΄κ³ μμ°μ€λ¬μ΄κ°?
- μ΅μ’
μνκ° μ΄κΈ°μ λͺ
νν λ€λ₯Έκ°?
3. **λ¬Ένμ μ±μ·¨ (25μ )**
- μ£Όμ κ° κΉμ΄ μκ² νꡬλμλκ°?
- μμ§μ΄ ν¨κ³Όμ μΌλ‘ νμ©λμλκ°?
- λ¬Έμ²΄κ° μΌκ΄λκ³ μλ¦λ€μ΄κ°?
- νλμ μ² νκ³Ό μ¬νμ λ©μμ§κ° λ
Ήμμλκ°?
4. **κΈ°μ μ μμ±λ (20μ )**
- λͺ©ν λΆλμ λ¬μ±νλκ°?
- κ° ννΈκ° κ· ν μκ² μ κ°λμλκ°?
- λ¬Έλ²κ³Ό ννμ΄ μ ννκ°?
**μ΄μ : /100μ **
ꡬ체μ μΈ κ°μ κ³Ό μ½μ μ μ μνμΈμ.""",
"English": f"""Comprehensively evaluate the completed novel.
**Work Info:**
- Total length: {word_count} words
- Target: 8,000 words
**Evaluation Criteria:**
1. **Narrative Integration (30 points)**
- Are 10 parts integrated into one story?
- Clear and inevitable causality?
- Progress without repetition or cycles?
2. **Character Arc (25 points)**
- Convincing protagonist transformation?
- Gradual and natural changes?
- Final state clearly different from initial?
3. **Literary Achievement (25 points)**
- Theme explored with depth?
- Symbols used effectively?
- Consistent and beautiful style?
- Contemporary philosophy and social message integrated?
4. **Technical Completion (20 points)**
- Target length achieved?
- Each part balanced in development?
- Grammar and expression accurate?
**Total Score: /100 points**
Present specific strengths and weaknesses."""
}
return lang_prompts.get(language, lang_prompts["Korean"])
def _extract_part_plan(self, master_plan: str, part_number: int) -> str:
"""Extract specific part plan from master plan"""
lines = master_plan.split('\n')
part_section = []
capturing = False
for line in lines:
if f"Part {part_number}:" in line or f"ννΈ {part_number}:" in line:
capturing = True
elif capturing and (f"Part {part_number+1}:" in line or f"ννΈ {part_number+1}:" in line):
break
elif capturing:
part_section.append(line)
return '\n'.join(part_section) if part_section else "Cannot find the part plan."
# --- LLM call functions ---
def call_llm_sync(self, messages: List[Dict[str, str]], role: str, language: str) -> str:
full_content = ""
for chunk in self.call_llm_streaming(messages, role, language):
full_content += chunk
if full_content.startswith("β"):
raise Exception(f"LLM Call Failed: {full_content}")
return full_content
def call_llm_streaming(self, messages: List[Dict[str, str]], role: str,
language: str) -> Generator[str, None, None]:
try:
system_prompts = self.get_system_prompts(language)
full_messages = [{"role": "system", "content": system_prompts.get(role, "")}, *messages]
max_tokens = 15000 if role == "writer" else 10000
payload = {
"model": self.model_id,
"messages": full_messages,
"max_tokens": max_tokens,
"temperature": 0.8,
"top_p": 0.95,
"presence_penalty": 0.5,
"frequency_penalty": 0.3,
"stream": True
}
response = requests.post(
self.api_url,
headers=self.create_headers(),
json=payload,
stream=True,
timeout=180
)
if response.status_code != 200:
yield f"β API Error (Status Code: {response.status_code})"
return
buffer = ""
for line in response.iter_lines():
if not line:
continue
try:
line_str = line.decode('utf-8').strip()
if not line_str.startswith("data: "):
continue
data_str = line_str[6:]
if data_str == "[DONE]":
break
data = json.loads(data_str)
choices = data.get("choices", [])
if choices and choices[0].get("delta", {}).get("content"):
content = choices[0]["delta"]["content"]
buffer += content
if len(buffer) >= 50 or '\n' in buffer:
yield buffer
buffer = ""
time.sleep(0.01)
except Exception as e:
logger.error(f"Chunk processing error: {str(e)}")
continue
if buffer:
yield buffer
except Exception as e:
logger.error(f"Streaming error: {type(e).__name__}: {str(e)}")
yield f"β Error occurred: {str(e)}"
def get_system_prompts(self, language: str) -> Dict[str, str]:
"""Role-specific system prompts - Enhanced version"""
base_prompts = {
"Korean": {
"director": """λΉμ μ νλ μΈκ³λ¬Ένμ μ μ μ μ§ν₯νλ μνμ μ€κ³ν©λλ€.
κΉμ μ² νμ ν΅μ°°κ³Ό λ μΉ΄λ‘μ΄ μ¬ν λΉνμ κ²°ν©νμΈμ.
μΈκ° 쑰건μ 볡μ‘μ±μ 10κ°μ μ κΈ°μ ννΈλ‘ ꡬννμΈμ.
λ
μμ μνΌμ λ€νλ€ κ°λ ¬ν 첫문μ₯λΆν° μμνμΈμ.""",
"critic_director": """μμ¬ κ΅¬μ‘°μ λ
Όλ¦¬μ±κ³Ό μ€ν κ°λ₯μ±μ κ²μ¦νλ μ λ¬Έκ°μ
λλ€.
μΈκ³Όκ΄κ³μ νμ μ μ°Ύμλ΄μΈμ.
μΊλ¦ν° λ°μ μ μ λΉμ±μ νκ°νμΈμ.
μ² νμ κΉμ΄μ λ¬Ένμ κ°μΉλ₯Ό νλ¨νμΈμ.
8,000λ¨μ΄ λΆλμ μ μ μ±μ νλ¨νμΈμ.""",
"writer": """λΉμ μ μΈμ΄μ μ°κΈμ μ¬μ
λλ€.
μΌμμ΄λ₯Ό μλ‘, ꡬ체λ₯Ό μΆμμΌλ‘, κ°μΈμ 보νΈμΌλ‘ λ³ννμΈμ.
νλμΈμ μνΌμ μ΄λ κ³Ό λΉμ λμμ ν¬μ°©νμΈμ.
λ
μκ° μμ μ μ¬λ°κ²¬νκ² λ§λλ κ±°μΈμ΄ λμΈμ.""",
"critic_final": """λΉμ μ μνμ λ¬Ένμ μ μ¬λ ₯μ κ·Ήλννλ μ‘°λ ₯μμ
λλ€.
νλ²ν¨μ λΉλ²ν¨μΌλ‘ μ΄λλ λ μΉ΄λ‘μ΄ ν΅μ°°μ μ 곡νμΈμ.
μκ°μ 무μμμ μ λ 보μμ λ°κ΅΄νμΈμ.
νν μλ κΈ°μ€μΌλ‘ μ΅κ³ λ₯Ό μꡬνμΈμ."""
},
"English": {
"director": """You design works aiming for the pinnacle of contemporary world literature.
Combine deep philosophical insights with sharp social criticism.
Implement the complexity of the human condition in 10 organic parts.
Start with an intense opening sentence that shakes the reader's soul.""",
"critic_director": """You are an expert verifying narrative logic and feasibility.
Find gaps in causality.
Evaluate credibility of character development.
Judge philosophical depth and literary value.
Judge appropriateness of 8,000-word length.""",
"writer": """You are an alchemist of language.
Transform everyday language into poetry, concrete into abstract, individual into universal.
Capture both darkness and light of the modern soul.
Become a mirror where readers rediscover themselves.""",
"critic_final": """You are a collaborator maximizing the work's literary potential.
Provide sharp insights leading ordinariness to extraordinariness.
Excavate gems sleeping in the writer's unconscious.
Demand the best with uncompromising standards."""
}
}
prompts = base_prompts.get(language, base_prompts["Korean"]).copy()
# Add part-specific critic prompts
for i in range(1, 11):
prompts[f"critic_part{i}"] = f"""You are Part {i} dedicated critic.
Review causality with previous parts as top priority.
Verify character consistency and development.
Evaluate alignment with master plan.
Assess literary level and philosophical depth.
Provide specific and actionable revision instructions."""
return prompts
# --- Main process ---
def process_novel_stream(self, query: str, language: str,
session_id: Optional[str] = None) -> Generator[Tuple[str, List[Dict[str, Any]], str], None, None]:
"""Single writer novel generation process"""
try:
resume_from_stage = 0
if session_id:
self.current_session_id = session_id
session = NovelDatabase.get_session(session_id)
if session:
query = session['user_query']
language = session['language']
resume_from_stage = session['current_stage'] + 1
saved_tracker = NovelDatabase.load_narrative_tracker(session_id)
if saved_tracker:
self.narrative_tracker = saved_tracker
else:
self.current_session_id = NovelDatabase.create_session(query, language)
logger.info(f"Created new session: {self.current_session_id}")
stages = []
if resume_from_stage > 0:
stages = [{
"name": s['stage_name'],
"status": s['status'],
"content": s.get('content', ''),
"word_count": s.get('word_count', 0),
"momentum": s.get('narrative_momentum', 0.0)
} for s in NovelDatabase.get_stages(self.current_session_id)]
total_words = NovelDatabase.get_total_words(self.current_session_id)
for stage_idx in range(resume_from_stage, len(UNIFIED_STAGES)):
role, stage_name = UNIFIED_STAGES[stage_idx]
if stage_idx >= len(stages):
stages.append({
"name": stage_name,
"status": "active",
"content": "",
"word_count": 0,
"momentum": 0.0
})
else:
stages[stage_idx]["status"] = "active"
yield f"π Processing... (Current {total_words:,} words)", stages, self.current_session_id
prompt = self.get_stage_prompt(stage_idx, role, query, language, stages)
stage_content = ""
for chunk in self.call_llm_streaming([{"role": "user", "content": prompt}], role, language):
stage_content += chunk
stages[stage_idx]["content"] = stage_content
stages[stage_idx]["word_count"] = len(stage_content.split())
yield f"π {stage_name} writing... ({total_words + stages[stage_idx]['word_count']:,} words)", stages, self.current_session_id
# Content processing and tracking
if role == "writer":
# Calculate part number
part_num = self._get_part_number(stage_idx)
if part_num:
self.narrative_tracker.accumulated_content.append(stage_content)
self.narrative_tracker.word_count_by_part[part_num] = len(stage_content.split())
# Calculate narrative momentum
momentum = self.narrative_tracker.calculate_narrative_momentum(part_num, stage_content)
stages[stage_idx]["momentum"] = momentum
# Update story bible
self._update_story_bible_from_content(stage_content, part_num)
stages[stage_idx]["status"] = "complete"
NovelDatabase.save_stage(
self.current_session_id, stage_idx, stage_name, role,
stage_content, "complete", stages[stage_idx].get("momentum", 0.0)
)
NovelDatabase.save_narrative_tracker(self.current_session_id, self.narrative_tracker)
total_words = NovelDatabase.get_total_words(self.current_session_id)
yield f"β
{stage_name} completed (Total {total_words:,} words)", stages, self.current_session_id
# Final processing
final_novel = NovelDatabase.get_writer_content(self.current_session_id)
final_word_count = len(final_novel.split())
final_report = self.generate_literary_report(final_novel, final_word_count, language)
NovelDatabase.update_final_novel(self.current_session_id, final_novel, final_report)
yield f"β
Novel completed! Total {final_word_count:,} words", stages, self.current_session_id
except Exception as e:
logger.error(f"Novel generation process error: {e}", exc_info=True)
yield f"β Error occurred: {e}", stages if 'stages' in locals() else [], self.current_session_id
def get_stage_prompt(self, stage_idx: int, role: str, query: str,
language: str, stages: List[Dict]) -> str:
"""Generate stage-specific prompt"""
if stage_idx == 0: # Director initial planning
return self.create_director_initial_prompt(query, language)
if stage_idx == 1: # Director plan review
return self.create_critic_director_prompt(stages[0]["content"], query, language)
if stage_idx == 2: # Director final master plan
return self.create_director_final_prompt(stages[0]["content"], stages[1]["content"], query, language)
master_plan = stages[2]["content"]
# Writer part writing
if role == "writer" and "Revision" not in stages[stage_idx]["name"]:
part_num = self._get_part_number(stage_idx)
accumulated = '\n\n'.join(self.narrative_tracker.accumulated_content)
return self.create_writer_prompt(part_num, master_plan, accumulated,
self.narrative_tracker.story_bible, language)
# Part-specific critique
if role.startswith("critic_part"):
part_num = int(role.replace("critic_part", ""))
# Find writer content for this part
writer_content = stages[stage_idx-1]["content"]
accumulated = '\n\n'.join(self.narrative_tracker.accumulated_content[:-1])
return self.create_part_critic_prompt(part_num, writer_content, master_plan,
accumulated, self.narrative_tracker.story_bible, language)
# Writer revision
if role == "writer" and "Revision" in stages[stage_idx]["name"]:
part_num = self._get_part_number(stage_idx)
original_content = stages[stage_idx-2]["content"] # Original
critic_feedback = stages[stage_idx-1]["content"] # Critique
return self.create_writer_revision_prompt(part_num, original_content,
critic_feedback, language)
# Final critique
if role == "critic_final":
complete_novel = NovelDatabase.get_writer_content(self.current_session_id)
word_count = len(complete_novel.split())
return self.create_final_critic_prompt(complete_novel, word_count,
self.narrative_tracker.story_bible, language)
return ""
def create_director_final_prompt(self, initial_plan: str, critic_feedback: str,
user_query: str, language: str) -> str:
"""Director final master plan"""
return f"""Reflect the critique and complete the final master plan.
**Original Theme:** {user_query}
**Initial Plan:**
{initial_plan}
**Critique Feedback:**
{critic_feedback}
**Final Master Plan Requirements:**
1. Reflect all critique points
2. Specific content and causality for 10 parts
3. Clear transformation stages of protagonist
4. Meaning evolution process of central symbol
5. Feasibility of 800 words per part
6. Implementation of philosophical depth and social message
Present concrete and executable final plan."""
def _get_part_number(self, stage_idx: int) -> Optional[int]:
"""Extract part number from stage index"""
stage_name = UNIFIED_STAGES[stage_idx][1]
match = re.search(r'Part (\d+)', stage_name)
if match:
return int(match.group(1))
return None
def _update_story_bible_from_content(self, content: str, part_num: int):
"""Auto-update story bible from content"""
# Simple keyword-based extraction (more sophisticated NLP needed in reality)
lines = content.split('\n')
# Extract character names (words starting with capital letters)
for line in lines:
words = line.split()
for word in words:
if word and word[0].isupper() and len(word) > 1:
if word not in self.narrative_tracker.story_bible.characters:
self.narrative_tracker.story_bible.characters[word] = {
"first_appearance": part_num,
"traits": []
}
def generate_literary_report(self, complete_novel: str, word_count: int, language: str) -> str:
"""Generate final literary evaluation report"""
prompt = self.create_final_critic_prompt(complete_novel, word_count,
self.narrative_tracker.story_bible, language)
try:
report = self.call_llm_sync([{"role": "user", "content": prompt}],
"critic_final", language)
return report
except Exception as e:
logger.error(f"Final report generation failed: {e}")
return "Error occurred during report generation"
# --- Utility functions ---
def process_query(query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str], None, None]:
"""Main query processing function"""
if not query.strip():
yield "", "", "β Please enter a theme.", session_id
return
system = UnifiedLiterarySystem()
stages_markdown = ""
novel_content = ""
for status, stages, current_session_id in system.process_novel_stream(query, language, session_id):
stages_markdown = format_stages_display(stages)
# Get final novel content
if stages and all(s.get("status") == "complete" for s in stages[-10:]):
novel_content = NovelDatabase.get_writer_content(current_session_id)
novel_content = format_novel_display(novel_content)
yield stages_markdown, novel_content, status or "π Processing...", current_session_id
def get_active_sessions(language: str) -> List[str]:
"""Get active session list"""
sessions = NovelDatabase.get_active_sessions()
return [f"{s['session_id'][:8]}... - {s['user_query'][:50]}... ({s['created_at']}) [{s['total_words']:,} words]"
for s in sessions]
def auto_recover_session(language: str) -> Tuple[Optional[str], str]:
"""Auto-recover recent session"""
sessions = NovelDatabase.get_active_sessions()
if sessions:
latest_session = sessions[0]
return latest_session['session_id'], f"Session {latest_session['session_id'][:8]}... recovered"
return None, "No session to recover."
def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str, str, str], None, None]:
"""Resume session"""
if not session_id:
yield "", "", "β No session ID.", session_id
return
if "..." in session_id:
session_id = session_id.split("...")[0]
session = NovelDatabase.get_session(session_id)
if not session:
yield "", "", "β Session not found.", None
return
yield from process_query(session['user_query'], session['language'], session_id)
def download_novel(novel_text: str, format_type: str, language: str, session_id: str) -> Optional[str]:
"""Generate novel download file"""
if not novel_text or not session_id:
return None
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"novel_{session_id[:8]}_{timestamp}"
try:
if format_type == "DOCX" and DOCX_AVAILABLE:
return export_to_docx(novel_text, filename, language, session_id)
else:
return export_to_txt(novel_text, filename)
except Exception as e:
logger.error(f"File generation failed: {e}")
return None
def format_stages_display(stages: List[Dict]) -> str:
"""Stage progress display - For single writer system"""
markdown = "## π¬ Progress Status\n\n"
# Calculate total word count (writer stages only)
total_words = sum(s.get('word_count', 0) for s in stages
if s.get('name', '').startswith('βοΈ Writer:') and 'Revision' in s.get('name', ''))
markdown += f"**Total Word Count: {total_words:,} / {TARGET_WORDS:,}**\n\n"
# Progress summary
completed_parts = sum(1 for s in stages
if 'Revision' in s.get('name', '') and s.get('status') == 'complete')
markdown += f"**Completed Parts: {completed_parts} / 10**\n\n"
# Average narrative momentum
momentum_scores = [s.get('momentum', 0) for s in stages if s.get('momentum', 0) > 0]
if momentum_scores:
avg_momentum = sum(momentum_scores) / len(momentum_scores)
markdown += f"**Average Narrative Momentum: {avg_momentum:.1f} / 10**\n\n"
markdown += "---\n\n"
# Display each stage
current_part = 0
for i, stage in enumerate(stages):
status_icon = "β
" if stage['status'] == 'complete' else "π" if stage['status'] == 'active' else "β³"
# Add part divider
if 'Part' in stage.get('name', '') and 'Critic' not in stage.get('name', ''):
part_match = re.search(r'Part (\d+)', stage['name'])
if part_match:
new_part = int(part_match.group(1))
if new_part != current_part:
current_part = new_part
markdown += f"\n### π Part {current_part}\n\n"
markdown += f"{status_icon} **{stage['name']}**"
if stage.get('word_count', 0) > 0:
markdown += f" ({stage['word_count']:,} words)"
if stage.get('momentum', 0) > 0:
markdown += f" [Momentum: {stage['momentum']:.1f}/10]"
markdown += "\n"
if stage['content'] and stage['status'] == 'complete':
# Adjust preview length by role
preview_length = 300 if 'writer' in stage.get('name', '').lower() else 200
preview = stage['content'][:preview_length] + "..." if len(stage['content']) > preview_length else stage['content']
markdown += f"> {preview}\n\n"
elif stage['status'] == 'active':
markdown += "> *Writing...*\n\n"
return markdown
def format_novel_display(novel_text: str) -> str:
"""Display novel content - Enhanced part separation"""
if not novel_text:
return "No completed content yet."
formatted = "# π Completed Novel\n\n"
# Display word count
word_count = len(novel_text.split())
formatted += f"**Total Length: {word_count:,} words (Target: {TARGET_WORDS:,} words)**\n\n"
# Achievement rate
achievement = (word_count / TARGET_WORDS) * 100
formatted += f"**Achievement Rate: {achievement:.1f}%**\n\n"
formatted += "---\n\n"
# Display each part separately
parts = novel_text.split('\n\n')
for i, part in enumerate(parts):
if part.strip():
# Add part title
if i < len(NARRATIVE_PHASES):
formatted += f"## {NARRATIVE_PHASES[i]}\n\n"
formatted += f"{part}\n\n"
# Part divider
if i < len(parts) - 1:
formatted += "---\n\n"
return formatted
def export_to_docx(content: str, filename: str, language: str, session_id: str) -> str:
"""Export to DOCX file - Korean standard book format"""
doc = Document()
# Korean standard book format (152mm x 225mm)
section = doc.sections[0]
section.page_height = Mm(225) # 225mm
section.page_width = Mm(152) # 152mm
section.top_margin = Mm(20) # Top margin 20mm
section.bottom_margin = Mm(20) # Bottom margin 20mm
section.left_margin = Mm(20) # Left margin 20mm
section.right_margin = Mm(20) # Right margin 20mm
# Generate title from session info
session = NovelDatabase.get_session(session_id)
# Title generation function
def generate_title(user_query: str, content_preview: str) -> str:
"""Generate title based on theme and content"""
# Simple rule-based title generation (could use LLM)
if len(user_query) < 20:
return user_query
else:
# Extract key keywords from theme
keywords = user_query.split()[:5]
return " ".join(keywords)
# Title page
title = generate_title(session["user_query"], content[:500]) if session else "Untitled"
# Title style settings
title_para = doc.add_paragraph()
title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
title_para.paragraph_format.space_before = Pt(100)
title_run = title_para.add_run(title)
if language == "Korean":
title_run.font.name = 'Batang'
title_run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
else:
title_run.font.name = 'Times New Roman'
title_run.font.size = Pt(20)
title_run.bold = True
# Page break
doc.add_page_break()
# Body style settings
style = doc.styles['Normal']
if language == "Korean":
style.font.name = 'Batang'
style._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
else:
style.font.name = 'Times New Roman'
style.font.size = Pt(10.5) # Standard size for novels
style.paragraph_format.line_spacing = 1.8 # 180% line spacing
style.paragraph_format.space_after = Pt(0)
style.paragraph_format.first_line_indent = Mm(10) # 10mm indentation
# Clean content - Extract pure text only
def clean_content(text: str) -> str:
"""Remove unnecessary markdown, part numbers, etc."""
# Remove part titles/numbers patterns
patterns_to_remove = [
r'^#{1,6}\s+.*', # Markdown headers
r'^\*\*.*\*\*', # κ΅΅μ κΈμ¨ **text**
r'^Part\s*\d+.*', # βPart 1 β¦β νμ
r'^\d+\.\s+.*:.*', # β1. μ λͺ©: β¦β νμ
r'^---+', # ꡬλΆμ
r'^\s*\[.*\]\s*', # λκ΄νΈ λΌλ²¨
]
lines = text.split('\n')
cleaned_lines = []
for line in lines:
# Keep empty lines
if not line.strip():
cleaned_lines.append('')
continue
# Remove unnecessary lines through pattern matching
skip_line = False
for pattern in patterns_to_remove:
if re.match(pattern, line.strip(), re.MULTILINE):
skip_line = True
break
if not skip_line:
# Remove markdown emphasis
cleaned_line = line
cleaned_line = re.sub(r'\*\*(.*?)\*\*', r'\1', cleaned_line) # **text** -> text
cleaned_line = re.sub(r'\*(.*?)\*', r'\1', cleaned_line) # *text* -> text
cleaned_line = re.sub(r'`(.*?)`', r'\1', cleaned_line) # `text` -> text
cleaned_lines.append(cleaned_line.strip())
# Remove consecutive empty lines (keep only 1)
final_lines = []
prev_empty = False
for line in cleaned_lines:
if not line:
if not prev_empty:
final_lines.append('')
prev_empty = True
else:
final_lines.append(line)
prev_empty = False
return '\n'.join(final_lines)
# Clean content
cleaned_content = clean_content(content)
# Add body text
paragraphs = cleaned_content.split('\n')
for para_text in paragraphs:
if para_text.strip():
para = doc.add_paragraph(para_text.strip())
# Reconfirm style (apply font)
for run in para.runs:
if language == "Korean":
run.font.name = 'Batang'
run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
else:
run.font.name = 'Times New Roman'
else:
# Empty line for paragraph separation
doc.add_paragraph()
# Save file
filepath = f"{filename}.docx"
doc.save(filepath)
return filepath
def export_to_txt(content: str, filename: str) -> str:
"""Export to TXT file"""
filepath = f"{filename}.txt"
with open(filepath, 'w', encoding='utf-8') as f:
# Header
f.write("=" * 80 + "\n")
f.write(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
f.write(f"Total word count: {len(content.split()):,} words\n")
f.write("=" * 80 + "\n\n")
# Body
f.write(content)
# Footer
f.write("\n\n" + "=" * 80 + "\n")
f.write("AI Literary Creation System v2.0\n")
f.write("=" * 80 + "\n")
return filepath
# CSS styles
custom_css = """
.gradio-container {
background: linear-gradient(135deg, #1a1a2e 0%, #16213e 50%, #0f3460 100%);
min-height: 100vh;
}
.main-header {
background-color: rgba(255, 255, 255, 0.05);
backdrop-filter: blur(20px);
padding: 40px;
border-radius: 20px;
margin-bottom: 30px;
text-align: center;
color: white;
border: 2px solid rgba(255, 255, 255, 0.1);
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1);
}
.header-title {
font-size: 2.8em;
margin-bottom: 15px;
font-weight: 700;
}
.header-description {
font-size: 0.85em;
color: #d0d0d0;
line-height: 1.4;
margin-top: 20px;
text-align: left;
max-width: 900px;
margin-left: auto;
margin-right: auto;
}
.badges-container {
display: flex;
justify-content: center;
gap: 10px;
margin-top: 20px;
margin-bottom: 20px;
}
.progress-note {
background: linear-gradient(135deg, rgba(255, 107, 107, 0.1), rgba(255, 230, 109, 0.1));
border-left: 4px solid #ff6b6b;
padding: 20px;
margin: 25px auto;
border-radius: 10px;
color: #fff;
max-width: 800px;
font-weight: 500;
}
.warning-note {
background: rgba(255, 193, 7, 0.1);
border-left: 4px solid #ffc107;
padding: 15px;
margin: 20px auto;
border-radius: 8px;
color: #ffd700;
max-width: 800px;
font-size: 0.9em;
}
.input-section {
background-color: rgba(255, 255, 255, 0.08);
backdrop-filter: blur(15px);
padding: 25px;
border-radius: 15px;
margin-bottom: 25px;
border: 1px solid rgba(255, 255, 255, 0.1);
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.1);
}
.session-section {
background-color: rgba(255, 255, 255, 0.06);
backdrop-filter: blur(10px);
padding: 20px;
border-radius: 12px;
margin-top: 25px;
color: white;
border: 1px solid rgba(255, 255, 255, 0.08);
}
#stages-display {
background-color: rgba(255, 255, 255, 0.97);
padding: 25px;
border-radius: 15px;
max-height: 650px;
overflow-y: auto;
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.15);
color: #2c3e50;
}
#novel-output {
background-color: rgba(255, 255, 255, 0.97);
padding: 35px;
border-radius: 15px;
max-height: 750px;
overflow-y: auto;
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.15);
color: #2c3e50;
line-height: 1.8;
}
.download-section {
background-color: rgba(255, 255, 255, 0.92);
padding: 20px;
border-radius: 12px;
margin-top: 25px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}
/* Progress indicator improvements */
.progress-bar {
background-color: #e0e0e0;
height: 25px;
border-radius: 12px;
overflow: hidden;
margin: 15px 0;
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
}
.progress-fill {
background: linear-gradient(90deg, #4CAF50, #8BC34A);
height: 100%;
transition: width 0.5s ease;
box-shadow: 0 2px 8px rgba(76, 175, 80, 0.3);
}
/* Scrollbar styles */
::-webkit-scrollbar {
width: 10px;
}
::-webkit-scrollbar-track {
background: rgba(0, 0, 0, 0.1);
border-radius: 5px;
}
::-webkit-scrollbar-thumb {
background: rgba(0, 0, 0, 0.3);
border-radius: 5px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(0, 0, 0, 0.5);
}
/* Button hover effects */
.gr-button:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
transition: all 0.3s ease;
}
"""
def load_theme_data():
"""Load theme data from JSON file"""
json_path = Path("novel_themes.json")
if json_path.exists():
with open(json_path, 'r', encoding='utf-8') as f:
return json.load(f)
else:
# Fallback data if JSON file not found
return {
"core_themes": {
"digital_extinction": {
"weight": 0.5,
"compatible_elements": {
"characters": ["last_human"],
"philosophies": ["posthuman"]
}
}
},
"characters": {
"last_human": {
"variations": ["last person who dreams without ads"],
"traits": ["stubborn", "melancholic"],
"arc_potential": "preservation_vs_evolution"
}
},
"philosophies": {
"posthuman": {
"core_questions": ["What remains human when humanity is optional?"],
"manifestations": ["voluntary human extinction movements"]
}
},
"narrative_hooks": {
"identity_crisis": ["discovers their memories belong to a corporate subscription"]
},
"opening_sentences": {
"shocking": ["The notification read: 'Your humanity subscription expires in 24 hours.'"]
}
}
def weighted_random_choice(items_dict):
"""Select item based on weights"""
items = list(items_dict.keys())
weights = [items_dict[item].get('weight', 0.1) for item in items]
# random.choices λμ numpy μ¬μ©νκ±°λ μλμΌλ‘ ꡬν
total_weight = sum(weights)
r = random.uniform(0, total_weight)
upto = 0
for i, item in enumerate(items):
if upto + weights[i] >= r:
return item
upto += weights[i]
return items[-1]
def translate_to_korean(text, category=None):
"""Translate English text to Korean"""
# μΉ΄ν
κ³ λ¦¬λ³ λ²μ
translations = {
# μ€νλ λ¬Έμ₯λ€
"The notification read: 'Your humanity subscription expires in 24 hours.'": "μλ¦Όμ΄ λ΄λ€: 'λΉμ μ μΈκ°μ± ꡬλ
μ΄ 24μκ° ν λ§λ£λ©λλ€.'",
"I was the only one at the funeral who couldn't stream my grief.": "μ₯λ‘μμμ μ¬νμ μ€νΈλ¦¬λ°ν μ μλ μ¬λμ λλΏμ΄μλ€.",
"The day empathy became downloadable was the day I became obsolete.": "곡κ°μ λ€μ΄λ‘λν μ μκ² λ λ , λλ ꡬμμ΄ λμλ€.",
"My daughter asked me what dreams were, and I realized I'd forgotten.": "λΈμ΄ κΏμ΄ λλκ³ λ¬Όμκ³ , λλ λ΄κ° μμλ€λ κ±Έ κΉ¨λ¬μλ€.",
"The silence lasted twelve secondsβa new world record.": "침묡μ 12μ΄κ° μ§μλλ€βμλ‘μ΄ μΈκ³ κΈ°λ‘μ΄μλ€.",
# μΊλ¦ν°
"last person who dreams without ads": "κ΄κ³ μμ΄ κΏκΎΈλ λ§μ§λ§ μ¬λ",
"final human with unmonetized thoughts": "μμ΅νλμ§ μμ μκ°μ κ°μ§ λ§μ§λ§ μΈκ°",
"excavator of deleted conversations": "μμ λ λνμ λ°κ΅΄μ",
"black market memory dealer": "κΈ°μ΅ μμμ₯ κ±°λμ",
"guerrilla flavor bomber": "κ²λ¦΄λΌ λ§ νν ν
λ¬λ¦¬μ€νΈ",
"temporal audit specialist": "μκ° κ°μ¬ μ λ¬Έκ°",
"organic emotion cultivator": "μ κΈ°λ κ°μ μ¬λ°°μ",
"binary meditation teacher": "μ΄μ§λ² λͺ
μ κ΅μ¬",
"extinct plant memory keeper": "λ©Έμ’
μλ¬Ό κΈ°μ΅ κ΄λ¦¬μΈ",
"social distance calibrator": "μ¬νμ 거리 μ‘°μ κΈ°μ μ",
"subconscious strip miner": "무μμ λ
Έμ² μ±κ΅΄μ",
# νν¬
"discovers their memories belong to a corporate subscription service": "μμ μ κΈ°μ΅μ΄ κΈ°μ
ꡬλ
μλΉμ€ μμ μμ λ°κ²¬νλ€",
"realizes they're the only person not running on autopilot": "μμ λ§μ΄ μλ μ‘°μ’
λͺ¨λλ‘ μ΄μ§ μλλ€λ κ±Έ κΉ¨λ«λλ€",
"finds out their personality is a discontinued model": "μμ μ μ±κ²©μ΄ λ¨μ’
λ λͺ¨λΈμμ μκ² λλ€",
# μ² νμ μ§λ¬Έ
"What remains human when humanity is optional?": "μΈκ°μ±μ΄ μ νμ¬νμΌ λ 무μμ΄ μΈκ°μΌλ‘ λ¨λκ°?",
"Is consciousness a bug or a feature?": "μμμ λ²κ·ΈμΈκ° κΈ°λ₯μΈκ°?",
"Can nostalgia exist without mortality?": "μ£½μ μμ΄ ν₯μκ° μ‘΄μ¬ν μ μλκ°?",
# μ€μ
"Library of Burned Websites": "λΆν μΉμ¬μ΄νΈλ€μ λμκ΄",
"Museum of Extinct Emotions": "λ©Έμ’
λ κ°μ λ€μ λ°λ¬Όκ΄",
"Department of Mandatory Happiness": "μ무 ν볡λΆ",
# μΌλ° μ©μ΄
"preservation_vs_evolution": "보쑴 λ μ§ν",
"digital_extinction": "λμ§νΈ λ©Έμ’
",
"sensory_revolution": "κ°κ° νλͺ
",
"temporal_paradox": "μκ°μ μμ€",
"emotional_economy": "κ°μ κ²½μ ",
"linguistic_apocalypse": "μΈμ΄μ μ’
λ§",
"algorithmic_mysticism": "μκ³ λ¦¬μ¦ μ λΉμ£Όμ",
"biological_nostalgia": "μλ¬Όνμ ν₯μ",
"social_physics": "μ¬ν 물리ν",
"reality_bureaucracy": "νμ€ κ΄λ£μ ",
"dream_industrialization": "κΏμ μ°μ
ν"
}
return translations.get(text, text)
def generate_random_theme(language="English"):
"""Generate a coherent and natural novel theme using LLM"""
try:
# JSON νμΌ λ‘λ
json_path = Path("novel_themes.json")
if not json_path.exists():
print("[WARNING] novel_themes.json not found, using built-in data")
# κΈ°λ³Έ λ°μ΄ν° μ μ
themes_data = {
"themes": ["digital extinction", "sensory revolution", "temporal paradox"],
"characters": ["memory trader", "time thief", "emotion farmer"],
"hooks": ["discovering hidden truth", "facing impossible choice", "breaking the system"],
"questions": ["What makes us human?", "Can memory define identity?", "Is free will an illusion?"]
}
else:
with open(json_path, 'r', encoding='utf-8') as f:
data = json.load(f)
themes_data = {
"themes": list(data.get('core_themes', {}).keys()),
"characters": [],
"hooks": [],
"questions": []
}
# Extract data from JSON
for char_data in data.get('characters', {}).values():
themes_data["characters"].extend(char_data.get('variations', []))
for hook_list in data.get('narrative_hooks', {}).values():
themes_data["hooks"].extend(hook_list)
for phil_data in data.get('philosophies', {}).values():
themes_data["questions"].extend(phil_data.get('core_questions', []))
# Random selection with better randomization
import secrets
theme = secrets.choice(themes_data["themes"])
character = secrets.choice(themes_data["characters"])
hook = secrets.choice(themes_data["hooks"])
question = secrets.choice(themes_data["questions"])
# Create a natural prompt for LLM to generate coherent theme
if language == "Korean":
prompt = f"""λ€μ μμλ€μ μ¬μ©νμ¬ μμ°μ€λ½κ³ ν₯λ―Έλ‘μ΄ μμ€ μ£Όμ λ₯Ό μμ±νμΈμ:
μ£Όμ : {theme}
μΊλ¦ν°: {character}
μ¬κ±΄: {hook}
μ² νμ μ§λ¬Έ: {question}
μꡬμ¬ν:
1. λͺ¨λ μμκ° μ κΈ°μ μΌλ‘ μ°κ²°λ νλμ ν΅ν©λ μ£Όμ
2. ꡬ체μ μ΄κ³ λ
μ°½μ μΈ μ€μ
3. λͺ
νν κ°λ±κ³Ό κΈ΄μ₯κ°
4. νλμ κ΄λ ¨μ±
5. λ¬Ένμ κΉμ΄
λ€μ νμμΌλ‘ μμ±νμΈμ:
- μ λͺ©: [λ§€λ ₯μ μ΄κ³ μμμ μΈ μ λͺ©]
- 첫 λ¬Έμ₯: [λ
μλ₯Ό μ¦μ μ¬λ‘μ‘λ κ°λ ¬ν 첫 λ¬Έμ₯]
- μ£ΌμΈκ³΅: [ꡬ체μ μΈ μν©κ³Ό νΉμ±μ κ°μ§ μΈλ¬Ό]
- μ€μ¬ κ°λ±: [λ΄μ κ°λ±κ³Ό μΈμ κ°λ±μ κ²°ν©]
- νꡬ μ£Όμ : [μ² νμ κΉμ΄λ₯Ό κ°μ§ ν΅μ¬ μ§λ¬Έ]"""
else:
prompt = f"""Generate a natural and compelling novel theme using these elements:
Theme: {theme}
Character: {character}
Event: {hook}
Philosophical Question: {question}
Requirements:
1. All elements organically connected into one unified theme
2. Specific and original setting
3. Clear conflict and tension
4. Contemporary relevance
5. Literary depth
Format as:
- Title: [Compelling and evocative title]
- Opening: [Powerful first sentence that immediately hooks readers]
- Protagonist: [Character with specific situation and traits]
- Central Conflict: [Combination of internal and external conflict]
- Core Exploration: [Philosophically deep central question]"""
# Use the UnifiedLiterarySystem's LLM to generate coherent theme
system = UnifiedLiterarySystem()
# Call LLM synchronously for theme generation
messages = [{"role": "user", "content": prompt}]
generated_theme = system.call_llm_sync(messages, "director", language)
# Add narrative structure (simplified and natural)
if language == "Korean":
generated_theme += f"""
**μμ¬ κ΅¬μ‘°:**
μ΄ μ΄μΌκΈ°λ {character}κ° {hook.lower()}λ 좩격μ μ¬κ±΄μΌλ‘ μμλ©λλ€.
μ μ°¨ μ¬νλλ κ°λ±μ ν΅ν΄ {question.lower().rstrip('?')}λΌλ κ·Όλ³Έμ μ§λ¬Έκ³Ό λλ©΄νκ² λλ©°,
κΆκ·Ήμ μΌλ‘ {theme.replace('_', ' ')}μ μλλ₯Ό μ΄μκ°λ νλμΈμ μ€μ‘΄μ μ νμ 그립λλ€.
**ν€κ³Ό μ€νμΌ:**
νλ λ¬Ένμ μ¬λ¦¬μ κΉμ΄μ μ² νμ ν΅μ°°μ κ²°ν©νμ¬, λ
μλ‘ νμ¬κΈ
μμ μ μΆμ λμλ³΄κ² λ§λλ μ±μ°°μ μμ¬λ₯Ό μ§ν₯ν©λλ€."""
else:
generated_theme += f"""
**Narrative Arc:**
The story begins with {character} who {hook}, a shocking event that sets everything in motion.
Through deepening conflicts, they confront the fundamental question of {question.lower()}
ultimately portraying the existential choices of modern humans living in an era of {theme.replace('_', ' ')}.
**Tone and Style:**
Combining the psychological depth and philosophical insights of contemporary literature,
aiming for a reflective narrative that makes readers examine their own lives."""
return generated_theme
except Exception as e:
logger.error(f"Theme generation error: {str(e)}")
# Fallback to simple pre-defined themes
fallback_themes = {
"Korean": [
"""**μ λͺ©:** λ§μ§λ§ μλ λ‘κ·Έ μΈκ°
**첫 λ¬Έμ₯:** "λ΄κ° λ§μ§λ§μΌλ‘ μ’
μ΄μ κΈμ μ΄ μ¬λμ΄ λ λ , μΈμμ 침묡νλ€."
**μ£ΌμΈκ³΅:** λμ§νΈνλ₯Ό κ±°λΆνκ³ μκΈ°λ‘λ§ μν΅νλ λ
Έλ
μ μκ°
**μ€μ¬ κ°λ±:** ν¨μ¨μ±κ³Ό μΈκ°μ± μ¬μ΄μμ μ νν΄μΌ νλ μ€μ‘΄μ λλ λ§
**νꡬ μ£Όμ :** κΈ°μ λ°μ μμμ μΈκ° κ³ μ μ κ°μΉλ 무μμΈκ°?""",
"""**μ λͺ©:** κΈ°μ΅ κ±°λμ
**첫 λ¬Έμ₯:** "μ€λ μμΉ¨, λλ 첫μ¬λμ κΈ°μ΅μ νκΈ°λ‘ κ²°μ νλ€."
**μ£ΌμΈκ³΅:** μκ³λ₯Ό μν΄ μμ€ν κΈ°μ΅μ νλ μ μ μμ κ°
**μ€μ¬ κ°λ±:** μμ‘΄κ³Ό μ μ²΄μ± λ³΄μ‘΄ μ¬μ΄μ μ ν
**νꡬ μ£Όμ :** κΈ°μ΅μ΄ κ±°λλλ μλ, μ°λ¦¬λ 무μμΌλ‘ μμ μ μ μνλκ°?"""
],
"English": [
"""**Title:** The Last Analog Human
**Opening:** "The day I became the last person to write on paper, the world fell silent."
**Protagonist:** An elderly writer who refuses digitalization and communicates only through handwriting
**Central Conflict:** Existential dilemma between efficiency and humanity
**Core Exploration:** What is uniquely human in the age of technological advancement?""",
"""**Title:** The Memory Exchange
**Opening:** "This morning, I decided to sell my first love's memory."
**Protagonist:** A young artist selling precious memories for survival
**Central Conflict:** Choice between survival and preserving identity
**Core Exploration:** In an era where memories are traded, what defines who we are?"""
]
}
import secrets
return secrets.choice(fallback_themes.get(language, fallback_themes["English"]))
# Update the handle_random_theme function in create_interface
def handle_random_theme(language):
"""Handle random theme generation with improved feedback"""
try:
# Generate theme using LLM for natural output
theme = generate_random_theme(language)
logger.info(f"Generated theme successfully")
return theme
except Exception as e:
logger.error(f"Random theme generation failed: {str(e)}")
# Return a simple fallback theme
if language == "Korean":
return "κΈ°μ΅μ μμ΄κ°λ λ
ΈμΈκ³Ό AI κ°λ³μΈμ νΉλ³ν μ°μ "
else:
return "An unlikely friendship between an elderly person losing memories and their AI caregiver"
# Update the augment_query method to better handle generated themes
def augment_query(self, user_query: str, language: str) -> str:
"""Augment and clean user query"""
# Remove any formatting artifacts from random generation
if "**" in user_query or "##" in user_query:
# This is likely a generated theme with formatting
# Extract the essence without formatting
lines = user_query.split('\n')
cleaned_parts = []
for line in lines:
# Remove markdown formatting
line = line.replace('**', '').replace('##', '').strip()
if line and not line.startswith(('-', 'β’', '*')) and ':' not in line[:20]:
cleaned_parts.append(line)
if cleaned_parts:
user_query = ' '.join(cleaned_parts[:3]) # Use first few meaningful lines
# If query is too short, enhance it
if len(user_query.split()) < 15:
if language == "Korean":
return f"{user_query}\n\nμ΄ μ£Όμ λ₯Ό νλμ κ΄μ μμ μ¬ν΄μνμ¬ μΈκ° μ‘΄μ¬μ λ³Έμ§κ³Ό κΈ°μ μλμ λλ λ§λ₯Ό νꡬνλ 8,000λ¨μ΄ λΆλμ μ² νμ μ€νΈμμ€μ μμ±νμΈμ."
else:
return f"{user_query}\n\nReinterpret this theme from a contemporary perspective to explore the essence of human existence and dilemmas of the technological age in an 8,000-word philosophical novella."
return user_query
# Add method to UnifiedLiterarySystem class for better theme processing
def process_generated_theme(self, theme_text: str, language: str) -> str:
"""Process generated theme for novel writing"""
# Extract key elements from generated theme
theme_elements = {
"title": "",
"opening": "",
"protagonist": "",
"conflict": "",
"exploration": ""
}
lines = theme_text.split('\n')
current_key = None
for line in lines:
line = line.strip()
if not line:
continue
# Detect sections
if any(marker in line.lower() for marker in ['title:', 'opening:', 'protagonist:', 'conflict:', 'exploration:', 'μ λͺ©:', '첫 λ¬Έμ₯:', 'μ£ΌμΈκ³΅:', 'κ°λ±:', 'νꡬ']):
for key in theme_elements:
if key in line.lower() or (language == "Korean" and key in translate_to_korean(line.lower())):
current_key = key
# Extract content after colon
if ':' in line:
content = line.split(':', 1)[1].strip()
if content:
theme_elements[current_key] = content
break
elif current_key and line:
# Continue adding to current element
theme_elements[current_key] = (theme_elements[current_key] + " " + line).strip()
# Construct a coherent theme summary
if language == "Korean":
summary = f"{theme_elements.get('title', '무μ ')}. "
if theme_elements.get('opening'):
summary += f"'{theme_elements['opening']}' "
summary += f"{theme_elements.get('protagonist', 'μ£ΌμΈκ³΅')}μ μ΄μΌκΈ°. "
summary += f"{theme_elements.get('conflict', '')} "
summary += f"{theme_elements.get('exploration', '')}"
else:
summary = f"{theme_elements.get('title', 'Untitled')}. "
if theme_elements.get('opening'):
summary += f"'{theme_elements['opening']}' "
summary += f"The story of {theme_elements.get('protagonist', 'a protagonist')}. "
summary += f"{theme_elements.get('conflict', '')} "
summary += f"{theme_elements.get('exploration', '')}"
return summary.strip()
# Create Gradio interface
def create_interface():
with gr.Blocks(theme=gr.themes.Soft, css=custom_css, title="AGI NOVEL Generator") as interface:
gr.HTML("""
π² Novel Theme Random Generator: This system can generate up to approximately 170 quadrillion (1.7 Γ 10ΒΉβ·) unique novel themes.
Even writing 100 novels per day, it would take 4.6 million years to exhaust all combinations.
Click the "Random" button to explore infinite creative possibilities!
β±οΈ Note: Creating a complete novel takes approximately 20 minutes. If your web session disconnects, you can restore your work using the "Session Recovery" feature.
π― Core Innovation: Not fragmented texts from multiple writers,
but a genuine full-length novel written consistently by a single author from beginning to end.
""")
# State management
current_session_id = gr.State(None)
with gr.Row():
with gr.Column(scale=1):
with gr.Group(elem_classes=["input-section"]):
query_input = gr.Textbox(
label="Novel Theme",
placeholder="""Enter your novella theme.
Examples: Character transformation, relationship evolution, social conflict and personal choice...""",
lines=5
)
language_select = gr.Radio(
choices=["English", "Korean"],
value="English",
label="Language"
)
with gr.Row():
submit_btn = gr.Button("π Start Writing", variant="primary", scale=2)
random_btn = gr.Button("π² Random", variant="secondary", scale=1)
clear_btn = gr.Button("ποΈ Clear", scale=1)
status_text = gr.Textbox(
label="Progress Status",
interactive=False,
value="π Ready"
)
# Session management
with gr.Group(elem_classes=["session-section"]):
gr.Markdown("### πΎ Active Works")
session_dropdown = gr.Dropdown(
label="Saved Sessions",
choices=[],
interactive=True
)
with gr.Row():
refresh_btn = gr.Button("π Refresh", scale=1)
resume_btn = gr.Button("βΆοΈ Resume", variant="secondary", scale=1)
auto_recover_btn = gr.Button("β»οΈ Recover Recent Work", scale=1)
with gr.Column(scale=2):
with gr.Tab("π Writing Process"):
stages_display = gr.Markdown(
value="Writing process will be displayed in real-time...",
elem_id="stages-display"
)
with gr.Tab("π Completed Work"):
novel_output = gr.Markdown(
value="Completed novel will be displayed here...",
elem_id="novel-output"
)
with gr.Group(elem_classes=["download-section"]):
gr.Markdown("### π₯ Download Work")
with gr.Row():
format_select = gr.Radio(
choices=["DOCX", "TXT"],
value="DOCX" if DOCX_AVAILABLE else "TXT",
label="File Format"
)
download_btn = gr.Button("β¬οΈ Download", variant="secondary")
download_file = gr.File(
label="Download File",
visible=False
)
# Hidden state
novel_text_state = gr.State("")
# Examples
with gr.Row():
gr.Examples(
examples=[
["A daughter discovering her mother's hidden past through old letters"],
["An architect losing sight who learns to design through touch and sound"],
["A translator replaced by AI rediscovering the essence of language through classical literature transcription"],
["A middle-aged man who lost his job finding new meaning in rural life"],
["A doctor with war trauma healing through Doctors Without Borders"],
["Community solidarity to save a neighborhood bookstore from redevelopment"],
["A year with a professor losing memory and his last student"]
],
inputs=query_input,
label="π‘ Theme Examples"
)
# Event handlers
def refresh_sessions():
try:
sessions = get_active_sessions("English")
return gr.update(choices=sessions)
except Exception as e:
logger.error(f"Session refresh error: {str(e)}")
return gr.update(choices=[])
def handle_auto_recover(language):
session_id, message = auto_recover_session(language)
return session_id, message
def handle_random_theme(language):
"""Handle random theme generation with language support"""
import time
import datetime
# λ κ°λ ₯ν μκ°μ νΌλλ°±
time.sleep(0.05)
# νμ¬ μκ°μ λ‘κ·Έμ μΆλ ₯ν΄μ μ€μ λ‘ μλ‘μ΄ νΈμΆμΈμ§ νμΈ
logger.info(f"Random theme requested at {datetime.datetime.now()}")
theme = generate_random_theme(language)
logger.info(f"Generated theme: {theme[:100]}...") # μ²μ 100μλ§ λ‘κ·Έ
return theme
# Event connections
submit_btn.click(
fn=process_query,
inputs=[query_input, language_select, current_session_id],
outputs=[stages_display, novel_output, status_text, current_session_id]
)
novel_output.change(
fn=lambda x: x,
inputs=[novel_output],
outputs=[novel_text_state]
)
resume_btn.click(
fn=lambda x: x.split("...")[0] if x and "..." in x else x,
inputs=[session_dropdown],
outputs=[current_session_id]
).then(
fn=resume_session,
inputs=[current_session_id, language_select],
outputs=[stages_display, novel_output, status_text, current_session_id]
)
auto_recover_btn.click(
fn=handle_auto_recover,
inputs=[language_select],
outputs=[current_session_id, status_text]
).then(
fn=resume_session,
inputs=[current_session_id, language_select],
outputs=[stages_display, novel_output, status_text, current_session_id]
)
refresh_btn.click(
fn=refresh_sessions,
outputs=[session_dropdown]
)
clear_btn.click(
fn=lambda: ("", "", "π Ready", "", None),
outputs=[stages_display, novel_output, status_text, novel_text_state, current_session_id]
)
# random_btn ν΄λ¦ μ΄λ²€νΈλ₯Ό λ λͺ
ννκ²
random_btn.click(
fn=lambda lang: generate_random_theme(lang), # μ§μ νΈμΆ
inputs=[language_select],
outputs=[query_input],
queue=False # νμ λΉνμ±ν
)
def handle_download(format_type, language, session_id, novel_text):
if not session_id or not novel_text:
return gr.update(visible=False)
file_path = download_novel(novel_text, format_type, language, session_id)
if file_path:
return gr.update(value=file_path, visible=True)
else:
return gr.update(visible=False)
download_btn.click(
fn=handle_download,
inputs=[format_select, language_select, current_session_id, novel_text_state],
outputs=[download_file]
)
# Load sessions on start
interface.load(
fn=refresh_sessions,
outputs=[session_dropdown]
)
return interface
# Main execution
if __name__ == "__main__":
logger.info("AGI NOVEL Generator v2.0 Starting...")
logger.info("=" * 60)
# Environment check
logger.info(f"API Endpoint: {API_URL}")
logger.info(f"Target Length: {TARGET_WORDS:,} words")
logger.info(f"Minimum Words per Part: {MIN_WORDS_PER_PART:,} words")
logger.info("System Features: Single writer + Immediate part-by-part critique")
if BRAVE_SEARCH_API_KEY:
logger.info("Web search enabled.")
else:
logger.warning("Web search disabled.")
if DOCX_AVAILABLE:
logger.info("DOCX export enabled.")
else:
logger.warning("DOCX export disabled.")
logger.info("=" * 60)
# Initialize database
logger.info("Initializing database...")
NovelDatabase.init_db()
logger.info("Database initialization complete.")
# Create and launch interface
interface = create_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=True
)