from urlextract import URLExtract from wordcloud import WordCloud import pandas as pd from collections import Counter import emoji from nltk.sentiment.vader import SentimentIntensityAnalyzer import nltk nltk.download('vader_lexicon') extract = URLExtract() sentiment_analyzer = SentimentIntensityAnalyzer() def fetch_stats(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] # Number of messahes num_messages = df.shape[0] # fetch the total number of words words = [] for message in df['message']: words.extend(message.split()) # fetch number of media messages num_media_messages = df[df['message'] == '\n'].shape[0] # fetch number of links shared links = [] for message in df['message']: links.extend(extract.find_urls(message)) return num_messages,len(words),num_media_messages,len(links) #return num_messages, len(words), num_media_messages, len(links) def most_busy_users(df): x = df['user'].value_counts().head() df = round((df['user'].value_counts() / df.shape[0]) * 100, 2).reset_index().rename( columns={'index': 'name', 'user': 'percent'}) return x, df def create_wordcloud(selected_user, df): f = open('stop_hinglish.txt', 'r') stop_words = f.read().splitlines() if selected_user != 'Overall': df = df[df['user'] == selected_user] temp = df[df['user'] != 'group_notification'] temp = temp[temp['message'] != '\n'] def remove_stop_words(message): y = [word for word in message.lower().split() if word not in stop_words] return " ".join(y) wc = WordCloud(width=500, height=500, min_font_size=10, background_color='white') temp['message'] = temp['message'].apply(remove_stop_words) df_wc = wc.generate(temp['message'].str.cat(sep=" ")) return df_wc def most_common_words(selected_user, df): f = open('stop_hinglish.txt', 'r') stop_words = f.read().splitlines() if selected_user != 'Overall': df = df[df['user'] == selected_user] temp = df[df['user'] != 'group_notification'] temp = temp[temp['message'] != '\n'] words = [word for message in temp['message'] for word in message.lower().split() if word not in stop_words] most_common_df = pd.DataFrame(Counter(words).most_common(20)) return most_common_df def emoji_helper(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] emojis = [c for message in df['message'] for c in message if c in emoji.UNICODE_EMOJI['en']] emoji_df = pd.DataFrame(Counter(emojis).most_common(len(Counter(emojis)))) return emoji_df def monthly_timeline(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] timeline = df.groupby(['year', 'month_num', 'month']).count()['message'].reset_index() timeline['time'] = timeline['month'] + "-" + timeline['year'].astype(str) return timeline def daily_timeline(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] daily_timeline = df.groupby('only_date').count()['message'].reset_index() return daily_timeline def week_activity_map(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] return df['day_name'].value_counts() def month_activity_map(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] return df['month'].value_counts() def activity_heatmap(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] user_heatmap = df.pivot_table(index='day_name', columns='period', values='message', aggfunc='count').fillna(0) return user_heatmap def sentiment_analysis(selected_user, df): if selected_user != 'Overall': df = df[df['user'] == selected_user] df['sentiment'] = df['message'].apply(lambda x: sentiment_analyzer.polarity_scores(x)) df['sentiment_score'] = df['sentiment'].apply(lambda x: x['compound']) df['sentiment_label'] = df['sentiment_score'].apply(lambda x: 'Positive' if x > 0 else ('Negative' if x < 0 else 'Neutral')) sentiment_df = df.groupby('sentiment_label').count()['message'].reset_index() return sentiment_df