LiuPengNGP commited on
Commit
c3f2fb1
·
1 Parent(s): b8f0f7f
1.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import face_emo_analysize
2
+ face_emo_analysize.face_emo_analysize()
__pycache__/face_emo_analysize.cpython-310.pyc CHANGED
Binary files a/__pycache__/face_emo_analysize.cpython-310.pyc and b/__pycache__/face_emo_analysize.cpython-310.pyc differ
 
__pycache__/face_emo_analysize.cpython-311.pyc ADDED
Binary file (18.3 kB). View file
 
__pycache__/my_uie.cpython-310.pyc CHANGED
Binary files a/__pycache__/my_uie.cpython-310.pyc and b/__pycache__/my_uie.cpython-310.pyc differ
 
face_emo_analysize.py CHANGED
@@ -6,6 +6,21 @@ import time
6
  import torch
7
  from PIL import Image
8
  from torchvision import transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
 
11
  # 定义预处理函数
@@ -133,151 +148,168 @@ def display_FPS(img, text, margin=1.0, box_scale=1.0):
133
  def face_emo_analysize():
134
  # 初始化MediaPipe Face Mesh
135
  mp_face_mesh = mp.solutions.face_mesh
 
 
 
 
 
 
 
 
136
 
137
  # 加载PyTorch模型
138
  name = '0_66_49_wo_gl'
139
- pth_model = torch.jit.load('torchscript_model_0_66_37_wo_gl.pth'.format(name)).to(
140
- 'cuda')
141
- pth_model.eval()
142
-
143
- # 定义情感字典
144
- DICT_EMO = {0: 'Neutral', 1: 'Happiness', 2: 'Sadness', 3: 'Surprise', 4: 'Fear', 5: 'Disgust', 6: 'Anger'}
145
-
146
- # 打开摄像头
147
- cap = cv2.VideoCapture(0)
148
- w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
149
- h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
150
- fps = np.round(cap.get(cv2.CAP_PROP_FPS))
151
-
152
- # 设置视频写入器
153
- path_save_video = 'result2.mp4'
154
- vid_writer = cv2.VideoWriter(path_save_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
155
-
156
- # 使用MediaPipe Face Mesh进行面部检测
157
- emotion_stats = {}
158
- with mp_face_mesh.FaceMesh(
159
- max_num_faces=1,
160
- refine_landmarks=False,
161
- min_detection_confidence=0.5,
162
- min_tracking_confidence=0.5) as face_mesh:
163
- while cap.isOpened():
164
- t1 = time.time()
165
- success, frame = cap.read()
166
- if frame is None: break
167
-
168
- frame_copy = frame.copy()
169
- frame_copy.flags.writeable = False
170
- frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
171
- results = face_mesh.process(frame_copy)
172
- frame_copy.flags.writeable = True
173
-
174
- if results.multi_face_landmarks:
175
- for fl in results.multi_face_landmarks:
176
- startX, startY, endX, endY = get_box(fl, w, h)
177
- cur_face = frame_copy[startY:endY, startX: endX]
178
-
179
- # 使用PyTorch模型进行情感预测
180
- cur_face = pth_processing(Image.fromarray(cur_face))
181
- output = torch.nn.functional.softmax(pth_model(cur_face), dim=1).cpu().detach().numpy()[0]
182
-
183
- # 获取情感类别和概率
184
- cl = np.argmax(output)
185
- label = DICT_EMO[cl]
186
- prob = output[cl]
187
-
188
- # 记录情感统计信息
189
- if label not in emotion_stats:
190
- emotion_stats[label] = {'start_time': t1, 'duration': 0, 'total_prob': prob, 'count': 1}
191
- else:
192
- emotion_stats[label]['duration'] += (t1 - emotion_stats[label]['start_time'])
193
- emotion_stats[label]['total_prob'] += prob
194
- emotion_stats[label]['count'] += 1
195
- emotion_stats[label]['start_time'] = t1
196
-
197
- # 显示情感结果和概率
198
- frame = display_EMO_PRED(frame, (startX, startY, endX, endY), label, prob, line_width=3)
199
-
200
- t2 = time.time()
201
-
202
- # 显示FPS
203
- frame = display_FPS(frame, 'FPS: {0:.1f}'.format(1 / (t2 - t1)), box_scale=.5)
204
-
205
- # 写入视频
206
- vid_writer.write(frame)
207
-
208
- # 显示帧
209
- cv2.imshow('Webcam', frame)
210
- if cv2.waitKey(1) & 0xFF == ord('\x1b'):
211
- break
212
-
213
- # 释放资源
214
- vid_writer.release()
215
- cap.release()
216
- cv2.destroyAllWindows()
217
-
218
- # 打印情感统计信息
219
- for emotion, stats in emotion_stats.items():
220
- avg_prob = stats['total_prob'] / stats['count']
221
- print(f'Emotion: {emotion}, Duration: {stats["duration"]:.2f} seconds, Average Probability: {avg_prob:.2f}')
222
-
223
- # 将视频转换为GIF
224
- from moviepy.editor import VideoFileClip
225
-
226
-
227
- def convert_mp4_to_gif(input_path, output_path, fps=10):
228
- clip = VideoFileClip(input_path)
229
- clip.write_gif(output_path, fps=fps)
230
- #此时我们获得了各表情的持续时间与平均概率,我们可以计算大小,如果负向情绪大于正向情绪那么情感就是负的,再计算平均值即可.
231
- positive_emotions = ['Happiness', 'Surprise']
232
- negative_emotions = ['Anger', 'Fear', 'Sadness', 'Disgust']
233
-
234
- # 初始化正向和负向情感的统计信息
235
- positive_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
236
- negative_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
237
-
238
- # 统计正向和负向情感��持续时间和概率
239
- for emotion, stats in emotion_stats.items():
240
- if emotion in positive_emotions:
241
- positive_stats['duration'] += stats['duration']
242
- positive_stats['total_prob'] += stats['total_prob']
243
- positive_stats['count'] += stats['count']
244
- elif emotion in negative_emotions:
245
- negative_stats['duration'] += stats['duration']
246
- negative_stats['total_prob'] += stats['total_prob']
247
- negative_stats['count'] += stats['count']
248
-
249
- # 计算正向和负向情感的平均概率
250
- if positive_stats['count'] > 0:
251
- positive_avg_prob = positive_stats['total_prob'] / positive_stats['count']
252
- else:
253
- positive_avg_prob = 0
254
-
255
- if negative_stats['count'] > 0:
256
- negative_avg_prob = negative_stats['total_prob'] / negative_stats['count']
257
- else:
258
- negative_avg_prob = 0
259
-
260
- # 比较正向和负向情感的持续时间
261
- if negative_stats['duration'] > positive_stats['duration']:
262
- print(f'负向情感持续时间更长: {negative_stats["duration"]:.2f} seconds')
263
- print(f'负向情感的平均概率: {negative_avg_prob:.2f}')
264
- outcome = "负向,概率:"+str(negative_avg_prob)
265
- return outcome
266
- else:
267
- print(f'正向情感持续时间更长: {positive_stats["duration"]:.2f} seconds')
268
- print(f'正向情感的平均概率: {positive_avg_prob:.2f}')
269
- outcome = "正向,概率:"+str(positive_avg_prob)
270
- return outcome
271
- # 将视频转换为GIF
272
- from moviepy.editor import VideoFileClip
273
-
274
-
275
- def convert_mp4_to_gif(input_path, output_path, fps=10):
276
- clip = VideoFileClip(input_path)
277
- clip.write_gif(output_path, fps=fps)
278
-
279
- # 示例使用
280
- input_video_path = "result.mp4"
281
- output_gif_path = "result.gif"
282
-
283
- convert_mp4_to_gif(input_video_path, output_gif_path)
 
 
 
 
 
 
 
 
 
 
6
  import torch
7
  from PIL import Image
8
  from torchvision import transforms
9
+ # 示例代码
10
+ import torch
11
+ import torch.nn as nn
12
+
13
+ # 假设你有一个模型类
14
+ class MyModel(nn.Module):
15
+ def __init__(self):
16
+ super(MyModel, self).__init__()
17
+ self.fc = nn.Linear(10, 10)
18
+
19
+ def forward(self, x):
20
+ return self.fc(x)
21
+
22
+
23
+
24
 
25
 
26
  # 定义预处理函数
 
148
  def face_emo_analysize():
149
  # 初始化MediaPipe Face Mesh
150
  mp_face_mesh = mp.solutions.face_mesh
151
+ # 创建模型实例并保存
152
+ model = MyModel()
153
+ torch.save(model.state_dict(), 'model.pth')
154
+
155
+ # 加载模型
156
+ model = MyModel()
157
+ model.load_state_dict(torch.load('model.pth', weights_only=True))
158
+ model.eval()
159
 
160
  # 加载PyTorch模型
161
  name = '0_66_49_wo_gl'
162
+ #pth_model = torch.jit.load('torchscript_model_0_66_37_wo_gl.pth'.format(name)).to(
163
+ # 'cuda')
164
+ model_path = 'torchscript_model_0_66_37_wo_gl.pth'
165
+ try:
166
+ model = torch.jit.load(model_path)
167
+ model.to('cuda')
168
+ model.eval()
169
+ except Exception as e:
170
+ #print(f"Failed to load model: {e}"
171
+
172
+
173
+ # pth_model.eval()
174
+
175
+ # 定义情感字典
176
+ DICT_EMO = {0: 'Neutral', 1: 'Happiness', 2: 'Sadness', 3: 'Surprise', 4: 'Fear', 5: 'Disgust', 6: 'Anger'}
177
+
178
+ # 打开摄像头
179
+ cap = cv2.VideoCapture(0)
180
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
181
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
182
+ fps = np.round(cap.get(cv2.CAP_PROP_FPS))
183
+
184
+ # 设置视频写入器
185
+ path_save_video = 'result2.mp4'
186
+ vid_writer = cv2.VideoWriter(path_save_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
187
+
188
+ # 使用MediaPipe Face Mesh进行面部检测
189
+ emotion_stats = {}
190
+ with mp_face_mesh.FaceMesh(
191
+ max_num_faces=1,
192
+ refine_landmarks=False,
193
+ min_detection_confidence=0.5,
194
+ min_tracking_confidence=0.5) as face_mesh:
195
+ while cap.isOpened():
196
+ t1 = time.time()
197
+ success, frame = cap.read()
198
+ if frame is None: break
199
+
200
+ frame_copy = frame.copy()
201
+ frame_copy.flags.writeable = False
202
+ frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
203
+ results = face_mesh.process(frame_copy)
204
+ frame_copy.flags.writeable = True
205
+
206
+ if results.multi_face_landmarks:
207
+ for fl in results.multi_face_landmarks:
208
+ startX, startY, endX, endY = get_box(fl, w, h)
209
+ cur_face = frame_copy[startY:endY, startX: endX]
210
+
211
+ # 使用PyTorch模型进行情感预测
212
+ cur_face = pth_processing(Image.fromarray(cur_face))
213
+ output = torch.nn.functional.softmax(pth_model(cur_face), dim=1).cpu().detach().numpy()[0]
214
+
215
+ # 获取情感类别和概率
216
+ cl = np.argmax(output)
217
+ label = DICT_EMO[cl]
218
+ prob = output[cl]
219
+
220
+ # 记录情感统计信息
221
+ if label not in emotion_stats:
222
+ emotion_stats[label] = {'start_time': t1, 'duration': 0, 'total_prob': prob, 'count': 1}
223
+ else:
224
+ emotion_stats[label]['duration'] += (t1 - emotion_stats[label]['start_time'])
225
+ emotion_stats[label]['total_prob'] += prob
226
+ emotion_stats[label]['count'] += 1
227
+ emotion_stats[label]['start_time'] = t1
228
+
229
+ # 显示情感结果和概率
230
+ frame = display_EMO_PRED(frame, (startX, startY, endX, endY), label, prob, line_width=3)
231
+
232
+ t2 = time.time()
233
+
234
+ # 显示FPS
235
+ frame = display_FPS(frame, 'FPS: {0:.1f}'.format(1 / (t2 - t1)), box_scale=.5)
236
+
237
+ # 写入视频
238
+ vid_writer.write(frame)
239
+
240
+ # 显示帧
241
+ cv2.imshow('Webcam', frame)
242
+ if cv2.waitKey(1) & 0xFF == ord('\x1b'):
243
+ break
244
+
245
+ # 释放资源
246
+ vid_writer.release()
247
+ cap.release()
248
+ cv2.destroyAllWindows()
249
+
250
+ # 打印情感统计信息
251
+ for emotion, stats in emotion_stats.items():
252
+ avg_prob = stats['total_prob'] / stats['count']
253
+ print(f'Emotion: {emotion}, Duration: {stats["duration"]:.2f} seconds, Average Probability: {avg_prob:.2f}')
254
+
255
+ # 将视频转换为GIF
256
+ from moviepy.editor import VideoFileClip
257
+
258
+
259
+ def convert_mp4_to_gif(input_path, output_path, fps=10):
260
+ clip = VideoFileClip(input_path)
261
+ clip.write_gif(output_path, fps=fps)
262
+ #此时我们获得了各表情的持续时间与平均概率,我们可以计算大小,如果负向情绪大于正向情绪那么情感就是负的,再计算平均值即可.
263
+ positive_emotions = ['Happiness', 'Surprise']
264
+ negative_emotions = ['Anger', 'Fear', 'Sadness', 'Disgust']
265
+
266
+ # 初始化正向和负向情感的统计信息
267
+ positive_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
268
+ negative_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
269
+
270
+ # 统计正向和负向情感的持续时间和概率
271
+ for emotion, stats in emotion_stats.items():
272
+ if emotion in positive_emotions:
273
+ positive_stats['duration'] += stats['duration']
274
+ positive_stats['total_prob'] += stats['total_prob']
275
+ positive_stats['count'] += stats['count']
276
+ elif emotion in negative_emotions:
277
+ negative_stats['duration'] += stats['duration']
278
+ negative_stats['total_prob'] += stats['total_prob']
279
+ negative_stats['count'] += stats['count']
280
+
281
+ # 计算正向和负向情感的平均概率
282
+ if positive_stats['count'] > 0:
283
+ positive_avg_prob = positive_stats['total_prob'] / positive_stats['count']
284
+ else:
285
+ positive_avg_prob = 0
286
+
287
+ if negative_stats['count'] > 0:
288
+ negative_avg_prob = negative_stats['total_prob'] / negative_stats['count']
289
+ else:
290
+ negative_avg_prob = 0
291
+
292
+ # 比较正向和负向情感的持续时间
293
+ if negative_stats['duration'] > positive_stats['duration']:
294
+ print(f'负向情感持续时间更长: {negative_stats["duration"]:.2f} seconds')
295
+ print(f'负向情感的平均概率: {negative_avg_prob:.2f}')
296
+ outcome = "负向,概率:"+str(negative_avg_prob)
297
+ return outcome
298
+ else:
299
+ print(f'正向情感持续时间更长: {positive_stats["duration"]:.2f} seconds')
300
+ print(f'正向情感的平均概率: {positive_avg_prob:.2f}')
301
+ outcome = "正向,概率:"+str(positive_avg_prob)
302
+ return outcome
303
+ # 将视频转换为GIF
304
+ from moviepy.editor import VideoFileClip
305
+
306
+
307
+ def convert_mp4_to_gif(input_path, output_path, fps=10):
308
+ clip = VideoFileClip(input_path)
309
+ clip.write_gif(output_path, fps=fps)
310
+
311
+ # 示例使用
312
+ input_video_path = "result.mp4"
313
+ output_gif_path = "result.gif"
314
+
315
+ convert_mp4_to_gif(input_video_path, output_gif_path)
model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694736e9a1f66d1925fd74f90b165c8de09f690c4ccdf1ec73722b37db3f5373
3
+ size 1868