vikramvasudevan commited on
Commit
a683f71
·
verified ·
1 Parent(s): 6ad8f62

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. db.py +30 -15
  2. modules/audio/model.py +9 -0
  3. modules/audio/service.py +36 -3
  4. modules/dropbox/audio.py +109 -9
  5. server.py +72 -13
db.py CHANGED
@@ -180,7 +180,9 @@ class SanatanDatabase:
180
  """
181
  Fetch all matching verses from the collection with optional pagination,
182
  sorted by _global_index ascending.
 
183
  """
 
184
  def normalize_for_match(s: str) -> str:
185
  s = unicodedata.normalize("NFD", s)
186
  s = "".join(ch for ch in s if not unicodedata.combining(ch))
@@ -199,6 +201,10 @@ class SanatanDatabase:
199
  metadata_where_clause.to_chroma_where() if metadata_where_clause else None
200
  )
201
 
 
 
 
 
202
  # First, try strict filter
203
  data = collection.get(include=["metadatas", "documents"], where=where_clause)
204
 
@@ -212,7 +218,8 @@ class SanatanDatabase:
212
  regex_filters = [
213
  f
214
  for f in metadata_where_clause.filters
215
- if f.metadata_search_operator == "$eq" and isinstance(f.metadata_value, str)
 
216
  ]
217
 
218
  if regex_filters:
@@ -225,7 +232,9 @@ class SanatanDatabase:
225
  norm_val = normalize_for_match(field_val)
226
  norm_query = normalize_for_match(f.metadata_value)
227
 
228
- if not re.search(re.escape(norm_query), norm_val, flags=re.IGNORECASE):
 
 
229
  ok = False
230
  break
231
  if ok:
@@ -247,19 +256,25 @@ class SanatanDatabase:
247
 
248
  ids_sorted, documents_sorted, metadatas_sorted = zip(*combined)
249
 
250
- # Apply pagination
251
- start = (page - 1) * page_size
252
- end = start + page_size
253
-
254
- paged_data = {
255
- "ids": list(ids_sorted[start:end]),
256
- "documents": list(documents_sorted[start:end]),
257
- "metadatas": list(metadatas_sorted[start:end]),
258
- "total_matches": total_matches,
259
- }
260
-
261
- return paged_data
262
-
 
 
 
 
 
 
263
 
264
  def search(
265
  self,
 
180
  """
181
  Fetch all matching verses from the collection with optional pagination,
182
  sorted by _global_index ascending.
183
+ If page or page_size is None, return all results without pagination.
184
  """
185
+
186
  def normalize_for_match(s: str) -> str:
187
  s = unicodedata.normalize("NFD", s)
188
  s = "".join(ch for ch in s if not unicodedata.combining(ch))
 
201
  metadata_where_clause.to_chroma_where() if metadata_where_clause else None
202
  )
203
 
204
+ # If the conversion returns an empty dict, treat it as None
205
+ if isinstance(where_clause, dict) and not where_clause:
206
+ where_clause = None
207
+
208
  # First, try strict filter
209
  data = collection.get(include=["metadatas", "documents"], where=where_clause)
210
 
 
218
  regex_filters = [
219
  f
220
  for f in metadata_where_clause.filters
221
+ if f.metadata_search_operator == "$eq"
222
+ and isinstance(f.metadata_value, str)
223
  ]
224
 
225
  if regex_filters:
 
232
  norm_val = normalize_for_match(field_val)
233
  norm_query = normalize_for_match(f.metadata_value)
234
 
235
+ if not re.search(
236
+ re.escape(norm_query), norm_val, flags=re.IGNORECASE
237
+ ):
238
  ok = False
239
  break
240
  if ok:
 
256
 
257
  ids_sorted, documents_sorted, metadatas_sorted = zip(*combined)
258
 
259
+ # --- Apply pagination only if both page and page_size are not None ---
260
+ if page is not None and page_size is not None:
261
+ start = (page - 1) * page_size
262
+ end = start + page_size
263
+ paged_data = {
264
+ "ids": list(ids_sorted[start:end]),
265
+ "documents": list(documents_sorted[start:end]),
266
+ "metadatas": list(metadatas_sorted[start:end]),
267
+ "total_matches": total_matches,
268
+ }
269
+ return paged_data
270
+ else:
271
+ # Return all results
272
+ return {
273
+ "ids": list(ids_sorted),
274
+ "documents": list(documents_sorted),
275
+ "metadatas": list(metadatas_sorted),
276
+ "total_matches": total_matches,
277
+ }
278
 
279
  def search(
280
  self,
modules/audio/model.py CHANGED
@@ -1,5 +1,14 @@
 
1
  from pydantic import BaseModel
2
 
3
  class AudioRequest(BaseModel):
4
  scripture_name: str
5
  global_index: int
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
  from pydantic import BaseModel
3
 
4
  class AudioRequest(BaseModel):
5
  scripture_name: str
6
  global_index: int
7
+
8
+ class AudioType(str, Enum):
9
+ recitation = "recitation"
10
+ virutham = "virutham"
11
+ upanyasam = "upanyasam"
12
+ santhai = "santhai"
13
+ any = "any"
14
+ none = "none"
modules/audio/service.py CHANGED
@@ -1,8 +1,8 @@
1
- from modules.audio.model import AudioRequest
2
- from modules.dropbox.audio import get_audio_urls
3
  from config import SanatanConfig
4
  from db import SanatanDatabase
5
-
6
 
7
  async def svc_get_audio_urls(req: AudioRequest):
8
  config = SanatanConfig().get_scripture_by_name(req.scripture_name)
@@ -16,3 +16,36 @@ async def svc_get_audio_urls(req: AudioRequest):
16
  )
17
  urls = {"recitation": data.get("audio", "")}
18
  return urls
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.audio.model import AudioRequest, AudioType
2
+ from modules.dropbox.audio import get_audio_urls, get_global_indices_with_audio
3
  from config import SanatanConfig
4
  from db import SanatanDatabase
5
+ from typing import List
6
 
7
  async def svc_get_audio_urls(req: AudioRequest):
8
  config = SanatanConfig().get_scripture_by_name(req.scripture_name)
 
16
  )
17
  urls = {"recitation": data.get("audio", "")}
18
  return urls
19
+
20
+
21
+ async def svc_get_indices_with_audio(scripture_name: str, audio_type: AudioType) -> List[int]:
22
+ """
23
+ Service function to get all global indices for a scripture
24
+ that have audio files of the specified type.
25
+
26
+ Args:
27
+ scripture_name: Name of the scripture.
28
+ audio_type: AudioType enum value.
29
+
30
+ Returns:
31
+ List[int]: Sorted list of global indices.
32
+ """
33
+ config = SanatanConfig().get_scripture_by_name(scripture_name)
34
+ audio_storage = config.get("audio_storage", "dropbox")
35
+
36
+ if audio_storage == "dropbox":
37
+ indices = await get_global_indices_with_audio(scripture_name, audio_type)
38
+ else:
39
+ # Fallback for database storage: iterate all documents and filter by audio_type
40
+ db = SanatanDatabase()
41
+ collection_name = config["collection_name"]
42
+ total = db.count(collection_name=collection_name)
43
+ all_docs = db.fetch_all_matches(collection_name, page_size=total)
44
+ indices = []
45
+ for doc in all_docs:
46
+ audio_field = doc.get("audio", "")
47
+ if audio_field.lower().startswith(audio_type.value):
48
+ indices.append(doc["_global_index"])
49
+ indices.sort()
50
+
51
+ return indices
modules/dropbox/audio.py CHANGED
@@ -4,14 +4,29 @@ from fastapi import HTTPException
4
  import dropbox
5
  from dropbox.files import FolderMetadata, FileMetadata
6
  from datetime import datetime, timedelta, timezone
7
- from modules.audio.model import AudioRequest
 
 
8
  import logging
9
  from modules.dropbox.client import dbx
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  logging.basicConfig()
12
  logger = logging.getLogger(__name__)
13
  logger.setLevel(logging.INFO)
14
 
 
15
  def list_dropbox_folder_hierarchy(dbx: dropbox.Dropbox, base_path: str = ""):
16
  """
17
  Recursively fetches the folder/file hierarchy from Dropbox starting at base_path.
@@ -62,15 +77,12 @@ def list_dropbox_folder_hierarchy(dbx: dropbox.Dropbox, base_path: str = ""):
62
 
63
  return hierarchy
64
 
 
65
  # cache = {(scripture_name, global_index, type): {"url": ..., "expiry": ...}}
66
  audio_cache: dict[tuple[str, int, str], dict] = {}
67
  CACHE_TTL = timedelta(hours=3, minutes=30) # refresh before 4h expiry
68
 
69
 
70
- from dropbox.files import FileMetadata
71
- from datetime import datetime, timezone
72
- from fastapi import HTTPException
73
-
74
  async def get_audio_urls(req: AudioRequest):
75
  base_path = f"/{req.scripture_name}/audio"
76
  prefix = f"{req.global_index}-"
@@ -93,7 +105,8 @@ async def get_audio_urls(req: AudioRequest):
93
 
94
  # Filter files matching the prefix
95
  matching_files = [
96
- entry for entry in entries
 
97
  if isinstance(entry, FileMetadata) and entry.name.startswith(prefix)
98
  ]
99
 
@@ -102,7 +115,7 @@ async def get_audio_urls(req: AudioRequest):
102
 
103
  for entry in matching_files:
104
  filename = entry.name
105
- file_type = filename[len(prefix):].rsplit(".", 1)[0]
106
 
107
  cache_key = (req.scripture_name, req.global_index, file_type)
108
 
@@ -134,10 +147,97 @@ async def cleanup_audio_url_cache(interval_seconds: int = 600):
134
  print(f"Cleaned up {len(expired_keys)} expired cache entries")
135
  await asyncio.sleep(interval_seconds)
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  if __name__ == "__main__":
138
  # Create Dropbox client with your access token
139
  # data = list_dropbox_folder_hierarchy(dbx, "")
 
 
 
140
  data = asyncio.run(
141
- get_audio_urls(AudioRequest(scripture_name="divya_prabandham", global_index=0))
 
 
142
  )
143
- print(json.dumps(data, indent=2))
 
 
4
  import dropbox
5
  from dropbox.files import FolderMetadata, FileMetadata
6
  from datetime import datetime, timedelta, timezone
7
+ from config import SanatanConfig
8
+ from db import SanatanDatabase
9
+ from modules.audio.model import AudioRequest, AudioType
10
  import logging
11
  from modules.dropbox.client import dbx
12
+ from fastapi import HTTPException
13
+ from enum import Enum
14
+ import dropbox
15
+ from dropbox.files import FileMetadata
16
+ from dropbox.files import FileMetadata
17
+ from datetime import datetime, timezone
18
+ from fastapi import HTTPException
19
+ from typing import List, Set
20
+ from datetime import datetime, timezone, timedelta
21
+ from fastapi import HTTPException
22
+ import dropbox
23
+ from dropbox.files import FileMetadata
24
 
25
  logging.basicConfig()
26
  logger = logging.getLogger(__name__)
27
  logger.setLevel(logging.INFO)
28
 
29
+
30
  def list_dropbox_folder_hierarchy(dbx: dropbox.Dropbox, base_path: str = ""):
31
  """
32
  Recursively fetches the folder/file hierarchy from Dropbox starting at base_path.
 
77
 
78
  return hierarchy
79
 
80
+
81
  # cache = {(scripture_name, global_index, type): {"url": ..., "expiry": ...}}
82
  audio_cache: dict[tuple[str, int, str], dict] = {}
83
  CACHE_TTL = timedelta(hours=3, minutes=30) # refresh before 4h expiry
84
 
85
 
 
 
 
 
86
  async def get_audio_urls(req: AudioRequest):
87
  base_path = f"/{req.scripture_name}/audio"
88
  prefix = f"{req.global_index}-"
 
105
 
106
  # Filter files matching the prefix
107
  matching_files = [
108
+ entry
109
+ for entry in entries
110
  if isinstance(entry, FileMetadata) and entry.name.startswith(prefix)
111
  ]
112
 
 
115
 
116
  for entry in matching_files:
117
  filename = entry.name
118
+ file_type = filename[len(prefix) :].rsplit(".", 1)[0]
119
 
120
  cache_key = (req.scripture_name, req.global_index, file_type)
121
 
 
147
  print(f"Cleaned up {len(expired_keys)} expired cache entries")
148
  await asyncio.sleep(interval_seconds)
149
 
150
+
151
+ from datetime import datetime, timezone, timedelta
152
+
153
+ # Simple in-memory cache
154
+ _audio_indices_cache: dict[tuple[str, str], dict] = {}
155
+ CACHE_TTL_2 = timedelta(minutes=10)
156
+
157
+ async def get_global_indices_with_audio(scripture_name: str, audio_type: AudioType):
158
+ """
159
+ Returns a sorted list of global indices for a given scripture that have audio of the specified type.
160
+ Supports AudioType.any, AudioType.none, and specific types.
161
+ Uses in-memory caching for repeated calls.
162
+ """
163
+ now = datetime.now(timezone.utc)
164
+ cache_key = (scripture_name, audio_type.value)
165
+
166
+ # Check cache
167
+ cached = _audio_indices_cache.get(cache_key)
168
+ if cached and cached["expiry"] > now:
169
+ return cached["indices"]
170
+
171
+ # Step 1: list all files in Dropbox folder
172
+ base_path = f"/{scripture_name}/audio"
173
+ entries = []
174
+
175
+ try:
176
+ result = dbx.files_list_folder(base_path)
177
+ entries.extend(result.entries)
178
+ while result.has_more:
179
+ result = dbx.files_list_folder_continue(result.cursor)
180
+ entries.extend(result.entries)
181
+ except dropbox.exceptions.ApiError:
182
+ raise HTTPException(status_code=404, detail="Audio directory not found")
183
+
184
+ # Step 2: collect all global indices with any audio
185
+ all_indices_with_audio = set()
186
+ for entry in entries:
187
+ if not isinstance(entry, FileMetadata) or "-" not in entry.name:
188
+ continue
189
+ global_index_str, _ = entry.name.split("-", 1)
190
+ try:
191
+ global_index = int(global_index_str)
192
+ except ValueError:
193
+ continue
194
+ all_indices_with_audio.add(global_index)
195
+
196
+ # Step 3: filter based on audio_type
197
+ if audio_type == AudioType.none:
198
+ db = SanatanDatabase()
199
+ config = SanatanConfig()
200
+ total_verses = db.count(
201
+ collection_name=config.get_collection_name(scripture_name=scripture_name)
202
+ )
203
+ indices = set(range(1, total_verses + 1)) - all_indices_with_audio
204
+ elif audio_type == AudioType.any:
205
+ indices = all_indices_with_audio
206
+ else:
207
+ indices = set()
208
+ for entry in entries:
209
+ if not isinstance(entry, FileMetadata) or "-" not in entry.name:
210
+ continue
211
+ global_index_str, rest = entry.name.split("-", 1)
212
+ try:
213
+ global_index = int(global_index_str)
214
+ except ValueError:
215
+ continue
216
+ file_type = rest.rsplit(".", 1)[0].strip().lower()
217
+ if file_type.startswith(audio_type.value):
218
+ indices.add(global_index)
219
+
220
+ # Cache the result
221
+ _audio_indices_cache[cache_key] = {
222
+ "indices": sorted(indices),
223
+ "expiry": now + CACHE_TTL_2
224
+ }
225
+
226
+ return sorted(indices)
227
+
228
+
229
+
230
+
231
  if __name__ == "__main__":
232
  # Create Dropbox client with your access token
233
  # data = list_dropbox_folder_hierarchy(dbx, "")
234
+ # data = asyncio.run(
235
+ # get_audio_urls(AudioRequest(scripture_name="divya_prabandham", global_index=0))
236
+ # )
237
  data = asyncio.run(
238
+ get_global_indices_with_audio(
239
+ scripture_name="divya_prabandham", audio_type=AudioType.upanyasam
240
+ )
241
  )
242
+ # print(json.dumps(data, indent=2))
243
+ print(len(data))
server.py CHANGED
@@ -12,8 +12,8 @@ from chat_utils import chat
12
  from config import SanatanConfig
13
  from db import SanatanDatabase
14
  from metadata import MetadataWhereClause
15
- from modules.audio.model import AudioRequest
16
- from modules.audio.service import svc_get_audio_urls
17
  from modules.config.categories import get_scripture_categories
18
  from modules.quiz.answer_validator import validate_answer
19
  from modules.quiz.models import Question
@@ -336,15 +336,17 @@ async def search_scripture_find_first_match(
336
  logger.error("Error while searching %s", e, exc_info=True)
337
  return {"error": str(e)}
338
 
 
339
  class ScriptureMultiSearchRequest(BaseModel):
340
  filter_obj: Optional[MetadataWhereClause] = None
341
  page: int = 1
342
  page_size: int = 20
 
 
343
 
344
  @router.post("/scripture/{scripture_name}/search/all")
345
  async def search_scripture_find_all_matches(
346
- scripture_name: str,
347
- req: ScriptureMultiSearchRequest
348
  ):
349
  """
350
  Search scripture collection and return all matching results with pagination.
@@ -352,17 +354,20 @@ async def search_scripture_find_all_matches(
352
  - `filter_obj`: MetadataWhereClause (filters, groups, operator)
353
  - `page`: 1-based page number
354
  - `page_size`: Number of results per page
 
355
  """
356
  filter_obj = req.filter_obj
357
  page = req.page
358
  page_size = req.page_size
 
359
  try:
360
  logger.info(
361
- "search_scripture_find_all_matches: searching for %s with filters %s | page=%s, page_size=%s",
362
  scripture_name,
363
  filter_obj,
364
  page,
365
  page_size,
 
366
  )
367
 
368
  db = SanatanDatabase()
@@ -373,38 +378,92 @@ async def search_scripture_find_all_matches(
373
  if not config:
374
  return {"error": f"Scripture '{scripture_name}' not found"}
375
 
 
376
  results = db.fetch_all_matches(
377
  collection_name=config["collection_name"],
378
  metadata_where_clause=filter_obj,
379
- page=page,
380
- page_size=page_size,
381
  )
382
 
383
- # Flatten + canonicalize results
384
  formatted_results = []
 
385
  for i in range(len(results["metadatas"])):
386
  doc_id = results["ids"][i]
387
  metadata_doc = results["metadatas"][i]
388
  metadata_doc["id"] = doc_id
389
 
390
- document_text = results["documents"][i] if results.get("documents") else None
391
-
 
392
  canonical_doc = SanatanConfig().canonicalize_document(
393
  scripture_name, document_text, metadata_doc
394
  )
395
  formatted_results.append(canonical_doc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
 
397
  return {
398
- "results": formatted_results,
399
- "total_matches": results.get("total_matches", 0),
400
  "page": page,
401
  "page_size": page_size,
402
  }
403
-
404
  except Exception as e:
405
  logger.error("Error while searching %s", e, exc_info=True)
406
  return {"error": str(e)}
407
 
 
408
  @router.post("/audio")
409
  async def generate_audio_urls(req: AudioRequest):
410
  logger.info("generate_audio_urls: %s", req)
 
12
  from config import SanatanConfig
13
  from db import SanatanDatabase
14
  from metadata import MetadataWhereClause
15
+ from modules.audio.model import AudioRequest, AudioType
16
+ from modules.audio.service import svc_get_audio_urls, svc_get_indices_with_audio
17
  from modules.config.categories import get_scripture_categories
18
  from modules.quiz.answer_validator import validate_answer
19
  from modules.quiz.models import Question
 
336
  logger.error("Error while searching %s", e, exc_info=True)
337
  return {"error": str(e)}
338
 
339
+
340
  class ScriptureMultiSearchRequest(BaseModel):
341
  filter_obj: Optional[MetadataWhereClause] = None
342
  page: int = 1
343
  page_size: int = 20
344
+ has_audio: Optional[AudioType] = None # new optional field
345
+
346
 
347
  @router.post("/scripture/{scripture_name}/search/all")
348
  async def search_scripture_find_all_matches(
349
+ scripture_name: str, req: ScriptureMultiSearchRequest
 
350
  ):
351
  """
352
  Search scripture collection and return all matching results with pagination.
 
354
  - `filter_obj`: MetadataWhereClause (filters, groups, operator)
355
  - `page`: 1-based page number
356
  - `page_size`: Number of results per page
357
+ - `has_audio` : optional. can take values any|none|recitation|virutham|upanyasam
358
  """
359
  filter_obj = req.filter_obj
360
  page = req.page
361
  page_size = req.page_size
362
+ has_audio = req.has_audio
363
  try:
364
  logger.info(
365
+ "search_scripture_find_all_matches: searching for %s with filters %s | page=%s, page_size=%s, has_audio=%s",
366
  scripture_name,
367
  filter_obj,
368
  page,
369
  page_size,
370
+ has_audio
371
  )
372
 
373
  db = SanatanDatabase()
 
378
  if not config:
379
  return {"error": f"Scripture '{scripture_name}' not found"}
380
 
381
+ # 1️⃣ Fetch all matching metadata WITHOUT pagination yet
382
  results = db.fetch_all_matches(
383
  collection_name=config["collection_name"],
384
  metadata_where_clause=filter_obj,
385
+ page=None, # Fetch all to apply audio filter
386
+ page_size=None,
387
  )
388
 
 
389
  formatted_results = []
390
+ all_indices = [] # Keep track of all _global_index
391
  for i in range(len(results["metadatas"])):
392
  doc_id = results["ids"][i]
393
  metadata_doc = results["metadatas"][i]
394
  metadata_doc["id"] = doc_id
395
 
396
+ document_text = (
397
+ results["documents"][i] if results.get("documents") else None
398
+ )
399
  canonical_doc = SanatanConfig().canonicalize_document(
400
  scripture_name, document_text, metadata_doc
401
  )
402
  formatted_results.append(canonical_doc)
403
+ all_indices.append(canonical_doc["_global_index"])
404
+
405
+ # 2️⃣ Apply has_audio filter
406
+ if has_audio:
407
+ if has_audio == AudioType.none:
408
+ # Fetch all indices that have any audio type
409
+ all_audio_indices = set()
410
+ for atype in [
411
+ AudioType.recitation,
412
+ AudioType.virutham,
413
+ AudioType.upanyasam,
414
+ AudioType.santhai,
415
+ ]:
416
+ indices = await svc_get_indices_with_audio(scripture_name, atype)
417
+ all_audio_indices.update(indices)
418
+
419
+ # Keep only indices that are NOT in all_audio_indices
420
+ formatted_results = [
421
+ r
422
+ for r in formatted_results
423
+ if r["_global_index"] not in all_audio_indices
424
+ ]
425
+ else:
426
+ if has_audio == AudioType.any:
427
+ # Combine indices for all audio types
428
+ audio_indices = set()
429
+ for atype in [
430
+ AudioType.recitation,
431
+ AudioType.virutham,
432
+ AudioType.upanyasam,
433
+ AudioType.santhai,
434
+ ]:
435
+ indices = await svc_get_indices_with_audio(
436
+ scripture_name, atype
437
+ )
438
+ audio_indices.update(indices)
439
+ else:
440
+ audio_indices = set(
441
+ await svc_get_indices_with_audio(scripture_name, has_audio)
442
+ )
443
+
444
+ # Keep only indices that match
445
+ formatted_results = [
446
+ r for r in formatted_results if r["_global_index"] in audio_indices
447
+ ]
448
+
449
+ # 3️⃣ Apply pagination on filtered results
450
+ total_matches = len(formatted_results)
451
+ start_idx = (page - 1) * page_size
452
+ end_idx = start_idx + page_size
453
+ paginated_results = formatted_results[start_idx:end_idx]
454
 
455
  return {
456
+ "results": paginated_results,
457
+ "total_matches": total_matches,
458
  "page": page,
459
  "page_size": page_size,
460
  }
461
+
462
  except Exception as e:
463
  logger.error("Error while searching %s", e, exc_info=True)
464
  return {"error": str(e)}
465
 
466
+
467
  @router.post("/audio")
468
  async def generate_audio_urls(req: AudioRequest):
469
  logger.info("generate_audio_urls: %s", req)