moza2025 commited on
Commit
67f6f5c
·
verified ·
1 Parent(s): 3a11dd7

Create TabsTask.py

Browse files
Files changed (1) hide show
  1. TabsTask.py +109 -0
TabsTask.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
4
+ from google.colab import userdata
5
+ import gtts
6
+
7
+ # === Load Pipeline ===
8
+ sentiment_pipeline = pipeline("sentiment-analysis", verbose = 0, model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
9
+
10
+ # === Sentiment Analysis ===
11
+ def analyze_sentiment(text):
12
+ result = sentiment_pipeline(text)[0]
13
+ return f"{result['label']} (Score: {result['score']:.2f})"
14
+
15
+
16
+ # Initialize the summarization pipeline, tokenizer, and model
17
+ model_name = "sshleifer/distilbart-cnn-12-6"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
20
+ summary_pipe = pipeline("summarization", model=model, tokenizer=tokenizer)
21
+
22
+
23
+ # Function to chunk text with an overlap
24
+ def chunk_text_with_overlap(tokens, max_length, overlap):
25
+ chunks = []
26
+ for i in range(0, len(tokens), max_length - overlap):
27
+ chunk = tokens[i:i + max_length]
28
+ chunks.append(chunk)
29
+ return chunks
30
+
31
+
32
+ # === Summarization ===
33
+ def summarize_text(text):
34
+
35
+ # Get the maximum length from the model configuration
36
+ max_length = model.config.max_position_embeddings
37
+ print('max_length:', max_length)
38
+
39
+ # Define the overlap
40
+ overlap = 50 # Adjust overlap as needed
41
+
42
+ # Tokenize the text
43
+ tokens = tokenizer(text, return_tensors='pt', truncation=False)['input_ids'][0]
44
+
45
+ # Chunk the tokens with overlap
46
+ chunks = chunk_text_with_overlap(tokens, max_length, overlap)
47
+
48
+ # Summarize each chunk
49
+ summaries = []
50
+ for chunk in chunks:
51
+ input_ids = chunk.unsqueeze(0) # Add batch dimension
52
+ summary_ids = model.generate(input_ids, max_length=max_length, num_beams=4, length_penalty=2.0, early_stopping=True)
53
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
54
+ summaries.append(summary)
55
+
56
+ # Combine the summaries into a final summary
57
+ final_summary = ' '.join(summaries)
58
+
59
+ return final_summary
60
+
61
+
62
+ # === Text-to-Speech ===
63
+ def text_to_speech(text):
64
+ tts = gtts.gTTS(text)
65
+ tts.save("output.mp3")
66
+ return "output.mp3"
67
+
68
+
69
+ chat_pipeline = pipeline("text-generation", model="yasserrmd/Human-Like-Qwen2.5-1.5B-Instruct")
70
+ # === Chatbot ===
71
+ def chatbot(message, chat_history):
72
+ # Generate response
73
+ result = chat_pipeline(message, max_new_tokens=10)
74
+
75
+ # Extract only the reply
76
+ bot_reply = result[0]["generated_text"]
77
+
78
+ return bot_reply
79
+
80
+ # === Build Gradio Interface ===
81
+ with gr.Blocks() as demo:
82
+ with gr.Tab("Sentiment Analysis"):
83
+ gr.Markdown("### 🔍 Sentiment Analysis")
84
+ sentiment_input = gr.Textbox(label="Enter text", lines=3, placeholder="Type a sentence to analyze...")
85
+ sentiment_button = gr.Button("Analyze")
86
+ sentiment_output = gr.Textbox(label="Sentiment")
87
+ sentiment_button.click(analyze_sentiment, inputs=sentiment_input, outputs=sentiment_output)
88
+
89
+ with gr.Tab("Summarization"):
90
+ gr.Markdown("### ✂️ Summarization")
91
+ summary_input = gr.Textbox(label="Enter text", lines=8, placeholder="Paste long text here...")
92
+ summary_button = gr.Button("Summarize")
93
+ summary_output = gr.Textbox(label="Summary")
94
+ summary_button.click(summarize_text, inputs=summary_input, outputs=summary_output)
95
+
96
+ with gr.Tab("Text-to-Speech"):
97
+ gr.Markdown("### 🗣️ Text-to-Speech (using Bark)")
98
+ tts_input = gr.Textbox(label="Enter text to speak", lines=3, placeholder="Try something like: 'Hello, how are you?'")
99
+ tts_button = gr.Button("Generate Speech")
100
+ tts_output = gr.Audio(label="Generated Speech", type="numpy")
101
+ tts_button.click(text_to_speech, inputs=tts_input, outputs=tts_output)
102
+
103
+ with gr.ChatInterface(
104
+ chatbot,
105
+ type="messages",
106
+ title="Chatbot",
107
+ description="Start the conversation by sending text messages or ask questions!",
108
+ )
109
+ demo.launch()