Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 2,712 Bytes
e8bcdf0 70f23d5 e8bcdf0 70f23d5 e8bcdf0 70f23d5 e8bcdf0 70f23d5 e8bcdf0 4e80516 e8bcdf0 36c0ef0 e8bcdf0 70f23d5 e8bcdf0 70f23d5 e8bcdf0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import random
from typing import Literal
from config import SanatanConfig
from modules.quiz.models import Question
from sanatan_assistant import query, allowedCollections
from openai import OpenAI
client = OpenAI()
def generate_question(
collection: allowedCollections,
complexity: Literal["beginner", "intermediate", "advanced"],
mode: Literal["mcq", "open"],
preferred_lamguage: str = "English",
) -> Question:
"""
Fetch a random scripture record and have the LLM generate a structured Question.
"""
print("Generating question ...", collection, complexity,mode, preferred_lamguage)
# 1. Fetch random scripture record
context = query(
collection_name=collection,
query=None,
metadata_where_clause=None,
n_results=1,
search_type="random",
)
if not context:
raise ValueError(f"No records found in collection {collection}")
# 2. Prompt (grounded in record only)
prompt = f"""
You are a quiz generator. Use ONLY the following scripture record to create a question.
Context from {collection}:
{context}
Rules:
- Do not invent facts beyond the context.
- Difficulty level: {complexity}
- Mode: {mode}
- If mode is 'mcq', generate 3–4 plausible choices (with one correct).
- If mode is 'open', leave 'choices' empty and provide a reference answer.
- Provide all fields in JSON. The `native_lyrics` field MUST always be populated from the context, even if short. Do not omit.
- Ensure all text fields, including 'native_lyrics', are valid JSON strings.
- Escape all newlines as \\n. Do not omit any part of the verse.
- User's preferred language is {preferred_lamguage}. Translate everything except the native verses to this language.
"""
# 3. Structured response with Pydantic class reference
response = client.chat.completions.parse(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}],
response_format=Question,
)
# print(response)
return response.choices[0].message.parsed
# Example usage
if __name__ == "__main__":
for i in range(3):
q = generate_question(
collection=random.choice(
[
s["collection_name"]
for s in SanatanConfig.scriptures
if s["collection_name"] != "yt_metadata"
]
),
complexity=random.choice(["beginner", "intermediate", "advanced"]),
mode=random.choice(["mcq", "open"]),
preferred_lamguage="Tamil",
)
print(q.model_dump_json(indent=1))
print("_______________________")
|