.gitattributes CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  *.catboost filter=lfs diff=lfs merge=lfs -text
37
  *.png filter=lfs diff=lfs merge=lfs -text
38
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
 
36
  *.catboost filter=lfs diff=lfs merge=lfs -text
37
  *.png filter=lfs diff=lfs merge=lfs -text
38
  *.jpeg filter=lfs diff=lfs merge=lfs -text
39
+ assets/example_video.mpg filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from gradio_demo import main_page, nailfold_features_page, scaling_page, vdo_page
3
 
4
  with gr.Blocks() as demo:
5
  with gr.Tab("📐 Pixel-to-mm Scaling"):
@@ -11,6 +11,9 @@ with gr.Blocks() as demo:
11
  with gr.Tab("🎞️ Video to Nailfold Features"):
12
  vdo_page.vdo_demo.render()
13
 
 
 
 
14
  # with gr.Tab("🚀 Full Pipeline"):
15
  # main_page.demo.render()
16
 
 
1
  import gradio as gr
2
+ from gradio_demo import main_page, nailfold_features_page, scaling_page, vdo_page, velocity_page
3
 
4
  with gr.Blocks() as demo:
5
  with gr.Tab("📐 Pixel-to-mm Scaling"):
 
11
  with gr.Tab("🎞️ Video to Nailfold Features"):
12
  vdo_page.vdo_demo.render()
13
 
14
+ with gr.Tab("🚀 Velocity Estimation [Experimental]"):
15
+ velocity_page.velocity_demo.render()
16
+
17
  # with gr.Tab("🚀 Full Pipeline"):
18
  # main_page.demo.render()
19
 
assets/example_video.mpg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8450b6843120ece55161f0cc488f3683e4994cc5afce0b225f409c2210db72bf
3
+ size 13416448
assets/test_velo_0_w33_h91.mp4 ADDED
Binary file (19.2 kB). View file
 
gradio_demo/scaling_page.py CHANGED
@@ -26,7 +26,12 @@ with gr.Blocks(title="Pixel-to-mm Scaling") as scaling_demo:
26
 
27
  image_input = gr.ImageEditor(label="Upload and Crop Image", type="pil")
28
  measure_btn = gr.Button(" Measure Width")
29
-
 
 
 
 
 
30
  output_img = gr.Image(label="Cropped Result")
31
  result_text = gr.Textbox(label="Width in Pixels")
32
 
 
26
 
27
  image_input = gr.ImageEditor(label="Upload and Crop Image", type="pil")
28
  measure_btn = gr.Button(" Measure Width")
29
+ # gr.Examples(
30
+ # examples=[["assets/example_reticule.png"]],
31
+ # inputs=[image_input],
32
+ # label="Try with Example",
33
+ # )
34
+
35
  output_img = gr.Image(label="Cropped Result")
36
  result_text = gr.Textbox(label="Width in Pixels")
37
 
gradio_demo/utils.py CHANGED
@@ -1,19 +1,24 @@
1
  import os
2
- import cv2
3
  import math
4
  import random
 
5
  import shutil
6
  import tempfile
7
  import textwrap
 
 
8
  import numpy as np
9
  import pandas as pd
10
  from PIL import Image
11
  import torch
12
- from collections import Counter
 
 
 
13
  from matplotlib import pyplot as plt
14
  from matplotlib.backends.backend_pdf import PdfPages
15
- from ultralytics import YOLO
16
- from supervised.automl import AutoML
17
 
18
  # Declare global variables
19
  midframe_path = ""
@@ -461,3 +466,412 @@ def full_pipeline(video_file, name, sex, age, weight, heightt, SBP, DBP, PR, FBS
461
  )
462
 
463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
2
  import math
3
  import random
4
+ import re
5
  import shutil
6
  import tempfile
7
  import textwrap
8
+ from collections import Counter
9
+ from pathlib import Path
10
  import numpy as np
11
  import pandas as pd
12
  from PIL import Image
13
  import torch
14
+ import cv2
15
+ from ultralytics import YOLO
16
+ from supervised.automl import AutoML
17
+ from skimage.morphology import skeletonize
18
  from matplotlib import pyplot as plt
19
  from matplotlib.backends.backend_pdf import PdfPages
20
+ import gradio as gr
21
+
22
 
23
  # Declare global variables
24
  midframe_path = ""
 
466
  )
467
 
468
 
469
+ def extract_middle_frame(video_path, output_name):
470
+ """
471
+ Extracts the middle frame from a video file and saves it as an image.
472
+ """
473
+ cap = cv2.VideoCapture(video_path)
474
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
475
+ if total_frames <= 0:
476
+ cap.release()
477
+ middle_frame_number = total_frames // 2
478
+ cap.set(cv2.CAP_PROP_POS_FRAMES, middle_frame_number)
479
+ ret, frame = cap.read()
480
+ success = cv2.imwrite(output_name, frame, [cv2.IMWRITE_PNG_COMPRESSION, 0])
481
+ cap.release()
482
+
483
+
484
+ def boxCenter(coords):
485
+ """
486
+ Calculate the center of a bounding box given its coordinates.
487
+ """
488
+ [left, top, right, bottom] = coords
489
+ return [int(left+right)/2, int(top+bottom)/2]
490
+
491
+
492
+ def closestBox(boxes, coords):
493
+ """
494
+ Find the closest bounding box to a given coordinate.
495
+ """
496
+ distance = []
497
+ center = boxCenter(coords)
498
+ for box in boxes:
499
+ coord_pred = [int(box['x1']), int(box['y1']), int(box['x2']), int(box['y2'])]
500
+ boxCent = boxCenter(coord_pred)
501
+ distance.append(math.dist(boxCent,center))
502
+ return boxes[distance.index(min(distance))], distance.index(min(distance))
503
+
504
+
505
+ def adjustBoxSize(coords, box_width, box_height):
506
+ """
507
+ Adjust the bounding box size based on the given width and height.
508
+ """
509
+ [centerX, centerY] = boxCenter(coords)
510
+ return [centerX-box_width/2, centerY-box_height/2, centerX+box_width/2, centerY+box_height/2]
511
+
512
+
513
+ def adjustBoundaries(coords, screen):
514
+ """
515
+ Adjust the bounding box coordinates to fit within the screen dimensions.
516
+ """
517
+ [left, top, right, bottom] = coords
518
+ [width, height]=screen
519
+ if left<0:
520
+ right=right-left
521
+ left=0
522
+ if top<0:
523
+ bottom=bottom-top
524
+ top=0
525
+ if right>width:
526
+ left=left-(right-width)
527
+ right=width
528
+ if bottom>height:
529
+ top=top-(bottom-height)
530
+ bottom=height
531
+ return [round(left), round(top), round(right), round(bottom)]
532
+
533
+
534
+ def get_coordinate(input_img, num_label, model, results):
535
+ """
536
+ Get the coordinates of the bounding box for a specific label in an image.
537
+ """
538
+ result = model.predict(input_img, verbose=False, save=False, conf=0.5, iou=0.3)
539
+ for result in results:
540
+ df_result = result.to_df()
541
+
542
+ x1 = df_result['box'][num_label]['x1']
543
+ y1 = df_result['box'][num_label]['y1']
544
+ x2 = df_result['box'][num_label]['x2']
545
+ y2 = df_result['box'][num_label]['y2']
546
+
547
+ cropCoords = [x1, y1, x2, y2]
548
+ return cropCoords
549
+
550
+
551
+ def process_video_temp(input_path, output_folder=None, slow_factor=2.0):
552
+ """
553
+ Process a video file to create:
554
+ 1. Slow-motion version
555
+ 2. Grayscale version
556
+ 3. Grayscale + slow-motion version
557
+
558
+ All outputs are saved as temporary files with readable names.
559
+
560
+ Args:
561
+ input_path (str): path to the input video
562
+ output_folder (str, optional): folder to save temp files. Defaults to system temp.
563
+ slow_factor (float or str, optional): factor to slow down video. Defaults to 2.0
564
+
565
+ Returns:
566
+ tuple: paths to (slow_video, gray_video, gray_slow_video)
567
+ """
568
+ if output_folder is None:
569
+ output_folder = tempfile.gettempdir()
570
+
571
+ slow_factor = float(slow_factor) # ensure numeric
572
+ cap = cv2.VideoCapture(input_path)
573
+ if not cap.isOpened():
574
+ raise ValueError(f"Cannot open video: {input_path}")
575
+
576
+ fps = float(cap.get(cv2.CAP_PROP_FPS))
577
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
578
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
579
+ name_file = os.path.splitext(os.path.basename(input_path))[0]
580
+
581
+ new_fps = fps / slow_factor
582
+
583
+ # Generate readable temp file paths
584
+ output_path_slow = os.path.join(output_folder, f"{name_file}_slow.mp4")
585
+ output_path_gray = os.path.join(output_folder, f"{name_file}_gray.mp4")
586
+ output_path_both = os.path.join(output_folder, f"{name_file}_gray_slow.mp4")
587
+
588
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
589
+
590
+ out_slow = cv2.VideoWriter(output_path_slow, fourcc, new_fps, (width, height))
591
+ out_gray = cv2.VideoWriter(output_path_gray, fourcc, fps, (width, height), isColor=False)
592
+ out_both = cv2.VideoWriter(output_path_both, fourcc, new_fps, (width, height), isColor=False)
593
+
594
+ while True:
595
+ ret, frame = cap.read()
596
+ if not ret:
597
+ break
598
+
599
+ # Slow motion
600
+ out_slow.write(frame)
601
+
602
+ # Grayscale
603
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
604
+ out_gray.write(gray_frame)
605
+
606
+ # Grayscale + slow
607
+ out_both.write(gray_frame)
608
+
609
+ cap.release()
610
+ out_slow.release()
611
+ out_gray.release()
612
+ out_both.release()
613
+
614
+ return output_path_slow, output_path_gray, output_path_both
615
+
616
+ def process_frame_split_horizontal_lines(
617
+ frame, line_y_ratio=0.8,
618
+ left_line_ratio=(0.1, 0.4),
619
+ right_line_ratio=(0.6, 0.9),
620
+ line_color=(0, 255, 0),
621
+ line_thickness=2):
622
+ """
623
+ show the frame with split horizontal lines
624
+ """
625
+ height, width = frame.shape[:2]
626
+ line_y = int(height * line_y_ratio)
627
+ left_start_x = int(width * left_line_ratio[0])
628
+ left_end_x = int(width * left_line_ratio[1])
629
+ right_start_x = int(width * right_line_ratio[0])
630
+ right_end_x = int(width * right_line_ratio[1])
631
+ cv2.line(frame, (left_start_x, line_y), (left_end_x, line_y), line_color, line_thickness)
632
+ cv2.line(frame, (right_start_x, line_y), (right_end_x, line_y), line_color, line_thickness)
633
+ return frame
634
+
635
+
636
+ def get_average_intensity_on_line(frame, line_y, x_start, x_end):
637
+ """
638
+ Calculates the average intensity along a horizontal line segment in a grayscale frame.
639
+ """
640
+ h, w = frame.shape[:2]
641
+ line_pixels = frame[line_y, x_start:x_end+1]
642
+ return np.mean(line_pixels)
643
+
644
+
645
+ def extract_video_data_temp(video_file):
646
+ """
647
+ Analyze a video file to extract the middle frame, detect objects, and crop object videos.
648
+ Temporary files have readable names based on the original video name.
649
+
650
+ Returns:
651
+ {
652
+ "middle_frame": temp_middle_frame_path,
653
+ "cropped_videos": [list of temp video paths],
654
+ "temp_files": all_temp_files
655
+ }
656
+ """
657
+ temp_files = []
658
+ base_name = os.path.splitext(os.path.basename(video_file))[0]
659
+ temp_dir = tempfile.gettempdir()
660
+
661
+ # Middle frame with readable name
662
+ temp_middle_frame_name = os.path.join(temp_dir, f"{base_name}_middle.png")
663
+ extract_middle_frame(video_file, temp_middle_frame_name)
664
+ temp_files.append(temp_middle_frame_name)
665
+
666
+ input_img = cv2.imread(temp_middle_frame_name)
667
+ results = obj_det_model.predict(input_img, conf=0.5, iou=0.3)
668
+
669
+ temp_videos = []
670
+
671
+ for i in range(len(results)):
672
+ cropCoords = get_coordinate(input_img, i, obj_det_model, results)
673
+
674
+ vidCapture = cv2.VideoCapture(video_file)
675
+ fps = vidCapture.get(cv2.CAP_PROP_FPS)
676
+ width = int(vidCapture.get(cv2.CAP_PROP_FRAME_WIDTH))
677
+ height = int(vidCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
678
+ skip_frames = int(1.5 * fps)
679
+
680
+ # Skip initial frames
681
+ for _ in range(skip_frames):
682
+ ret, frame = vidCapture.read()
683
+
684
+ box_left, box_top, box_right, box_bottom = cropCoords
685
+ box_left = max(box_left, 0)
686
+ box_top = max(box_top, 0)
687
+ box_right = min(box_right, width)
688
+ box_bottom = min(box_bottom, height)
689
+
690
+ box_width = box_right - box_left
691
+ box_height = box_bottom - box_top
692
+
693
+ # Temp video with readable name
694
+ temp_video_name = os.path.join(temp_dir, f"{base_name}_{i}_w{int(box_width)}_h{int(box_height)}.mp4")
695
+ temp_videos.append(temp_video_name)
696
+
697
+ outputWriter = cv2.VideoWriter(
698
+ temp_video_name,
699
+ cv2.VideoWriter_fourcc(*'mp4v'),
700
+ fps,
701
+ (int(box_width), int(box_height))
702
+ )
703
+
704
+ lastBoxCoords = [box_left, box_top, box_right, box_bottom]
705
+
706
+ while True:
707
+ ret, im = vidCapture.read()
708
+ if not ret:
709
+ break
710
+
711
+ results_frame = obj_det_model.predict(im)
712
+ for result in results_frame:
713
+ df_result = result.to_df()
714
+
715
+ boxes = df_result['box']
716
+ box, distance = closestBox(boxes, lastBoxCoords)
717
+
718
+ if distance < 4:
719
+ newCoords = adjustBoxSize([box['x1'], box['y1'], box['x2'], box['y2']], box_width, box_height)
720
+ newCoords = adjustBoundaries(newCoords, [width, height])
721
+ box_left, box_top, box_right, box_bottom = newCoords
722
+
723
+ imCropped = im[int(box_top):int(box_bottom), int(box_left): int(box_right)]
724
+ outputWriter.write(imCropped)
725
+
726
+ vidCapture.release()
727
+ outputWriter.release()
728
+
729
+ all_temp_files = temp_files + temp_videos
730
+ return {"middle_frame": temp_middle_frame_name, "cropped_videos": temp_videos, "temp_files": all_temp_files}
731
+
732
+
733
+ def analyze_video_intensity(input_video_path):
734
+
735
+ base_name = os.path.splitext(os.path.basename(input_video_path))[0]
736
+ working_path = f"velocity_data/{base_name}"
737
+ os.makedirs(working_path, exist_ok=True)
738
+ output_slow, output_gray, output_both = process_video_temp(input_video_path, working_path)
739
+
740
+ cap = cv2.VideoCapture(output_both)
741
+
742
+ left_intensities = []
743
+ right_intensities = []
744
+ frame_numbers = []
745
+ frame_count = 0
746
+
747
+ line_y_ratio = 0.9
748
+ left_line_ratio = (0.1, 0.4)
749
+ right_line_ratio = (0.6, 0.9)
750
+
751
+ while True:
752
+ ret, frame = cap.read()
753
+ if not ret:
754
+ break
755
+
756
+ current_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
757
+ height, width = current_frame_gray.shape[:2]
758
+ line_y_pixel = int(height * line_y_ratio)
759
+ left_start_pixel = int(width * left_line_ratio[0])
760
+ left_end_pixel = int(width * left_line_ratio[1])
761
+ right_start_pixel = int(width * right_line_ratio[0])
762
+ right_end_pixel = int(width * right_line_ratio[1])
763
+
764
+ avg_left_intensity = get_average_intensity_on_line(current_frame_gray, line_y_pixel, left_start_pixel, left_end_pixel)
765
+ left_intensities.append(avg_left_intensity)
766
+
767
+ avg_right_intensity = get_average_intensity_on_line(current_frame_gray, line_y_pixel, right_start_pixel, right_end_pixel)
768
+ right_intensities.append(avg_right_intensity)
769
+
770
+ frame_numbers.append(frame_count)
771
+ frame_count += 1
772
+
773
+ processed_frame = process_frame_split_horizontal_lines(frame)
774
+
775
+ cap.release()
776
+ cv2.destroyAllWindows()
777
+
778
+ time = np.array(frame_numbers)
779
+ intensity_difference = abs(np.array(right_intensities) - np.array(left_intensities))
780
+
781
+ df = pd.DataFrame(columns=["intensity_dif", "time"])
782
+ df["intensity_dif"] = intensity_difference
783
+ df["time"] = time
784
+
785
+ dif_time = []
786
+ for inten, time in zip(df["intensity_dif"], df["time"]):
787
+ if inten > 10.5:
788
+ round_time = round(float(time), 4)
789
+ dif_time.append(round_time/2)
790
+ if round_time/2 > 4:
791
+ break
792
+
793
+ differences = []
794
+ for i in range(1, len(dif_time)):
795
+ difference = dif_time[i] - dif_time[i-1]
796
+ differences.append(difference)
797
+
798
+ avg_time = np.mean(differences)
799
+ return avg_time
800
+
801
+
802
+ def estimate_nailfold_distance_temp(input_video_path):
803
+ """
804
+ Extract velocity data from a video file:
805
+ - Detects capillary masks using YOLO segmentation
806
+ - Calculates skeleton length
807
+ - Saves mask as a temporary PNG for visualization
808
+ """
809
+
810
+ # Handle Gradio dict
811
+ if isinstance(input_video_path, dict):
812
+ input_video_path = input_video_path.get("name")
813
+
814
+ if not input_video_path or not os.path.exists(input_video_path):
815
+ raise FileNotFoundError(f"Video file not found: {input_video_path}")
816
+
817
+ base_name = os.path.splitext(os.path.basename(input_video_path))[0]
818
+ temp_dir = tempfile.mkdtemp()
819
+
820
+ # --- Extract middle frame robustly ---
821
+ middle_frame_path = os.path.join(temp_dir, f"{base_name}_middle.png")
822
+
823
+ cap = cv2.VideoCapture(input_video_path)
824
+ if not cap.isOpened():
825
+ raise FileNotFoundError(f"Cannot open video file: {input_video_path}")
826
+
827
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
828
+ if frame_count <= 0:
829
+ raise ValueError(f"No frames found in video: {input_video_path}")
830
+
831
+ middle_index = frame_count // 2
832
+ cap.set(cv2.CAP_PROP_POS_FRAMES, middle_index)
833
+ ret, frame = cap.read()
834
+ cap.release()
835
+
836
+ if not ret or frame is None:
837
+ raise FileNotFoundError(f"Failed to read middle frame from video: {input_video_path}")
838
+
839
+ cv2.imwrite(middle_frame_path, frame)
840
+
841
+ # --- Detect width/height ---
842
+ match = re.search(r'w(\d+)_h(\d+)', base_name)
843
+ if match:
844
+ w, h = int(match.group(1)), int(match.group(2))
845
+ else:
846
+ # fallback: use video resolution
847
+ h = int(frame.shape[0])
848
+ w = int(frame.shape[1])
849
+
850
+ # --- YOLO segmentation ---
851
+ model = YOLO("models/segment_yolo11.pt")
852
+ input_img = cv2.imread(middle_frame_path)
853
+
854
+ results = model.predict(
855
+ input_img, save=False, show_labels=False,
856
+ show_conf=False, show_boxes=False
857
+ )
858
+
859
+ mask_image_path = None
860
+ for result in results:
861
+ if result.masks is not None:
862
+ for mask in result.masks.data:
863
+ mask_np = (mask.cpu().numpy() * 255).astype(np.uint8)
864
+ mask_resized = cv2.resize(mask_np, (w, h))
865
+
866
+ mask_image_path = os.path.join(temp_dir, f"{base_name}_mask.png")
867
+ cv2.imwrite(mask_image_path, mask_resized)
868
+
869
+ skeleton = skeletonize(mask_resized > 0, method='lee')
870
+ unique_elements, counts = np.unique(skeleton, return_counts=True)
871
+ value_ele = dict(zip(unique_elements, counts)).get(True, 0)
872
+
873
+ skeleton_length_mm = value_ele / 587.0
874
+ return skeleton_length_mm, mask_image_path
875
+
876
+ raise ValueError("No mask detected in the middle frame.")
877
+
gradio_demo/vdo_page.py CHANGED
@@ -62,6 +62,11 @@ with gr.Blocks(title="🎞️ Video to Nailfold Features") as vdo_demo:
62
  video_input = gr.Video(label="Upload Video File")
63
  mm_per_pixel = gr.Number(label="1 mm equals (pixels)", value=587, step=1)
64
  extract_btn = gr.Button("Extract & Analyze")
 
 
 
 
 
65
 
66
  with gr.Column():
67
  midframe_output = gr.Image(label="Extracted Midframe Visualization")
 
62
  video_input = gr.Video(label="Upload Video File")
63
  mm_per_pixel = gr.Number(label="1 mm equals (pixels)", value=587, step=1)
64
  extract_btn = gr.Button("Extract & Analyze")
65
+ gr.Examples(
66
+ examples=[["assets/example_video.mpg", 587]],
67
+ inputs=[video_input, mm_per_pixel],
68
+ label="Try with Example",
69
+ )
70
 
71
  with gr.Column():
72
  midframe_output = gr.Image(label="Extracted Midframe Visualization")
gradio_demo/velocity_page.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_demo.utils import (
3
+ extract_video_data_temp,
4
+ analyze_video_intensity,
5
+ estimate_nailfold_distance_temp,
6
+ )
7
+
8
+ # ------------------------------
9
+ # Function to extract nailfold frames
10
+ # ------------------------------
11
+ def extract_nailfold_frame(input_video):
12
+ """
13
+ Extract middle frame and cropped videos from the uploaded nailfold video.
14
+ Returns a list of temporary file paths (middle frame + cropped videos).
15
+ """
16
+ if input_video is None:
17
+ return []
18
+
19
+ results = extract_video_data_temp(input_video)
20
+
21
+ return [results["middle_frame"]] + results["cropped_videos"]
22
+
23
+ # ------------------------------
24
+ # Function to predict velocity
25
+ # ------------------------------
26
+ def pred_velocity(input_video):
27
+ """
28
+ Predict the velocity of objects in a video using extracted path.
29
+ Returns Gradio-friendly outputs:
30
+ - Estimated Time
31
+ - Estimated Distance
32
+ - Estimated Velocity
33
+ - Segmentation Mask Image
34
+ """
35
+ if input_video is None:
36
+ return "No video uploaded", "No video uploaded", "No video uploaded", None
37
+
38
+ estimate_time = analyze_video_intensity(input_video)
39
+ estimate_distance, segmentation_path = estimate_nailfold_distance_temp(input_video)
40
+ predicted_velocity = estimate_distance / estimate_time if estimate_time > 0 else 0.0
41
+
42
+ # Return strings and mask path for Gradio
43
+ return (
44
+ f"{estimate_time:.2f} s",
45
+ f"{estimate_distance:.2f} mm",
46
+ f"{predicted_velocity:.2f} mm/s",
47
+ segmentation_path
48
+ )
49
+
50
+ # ------------------------------
51
+ # Build Gradio Interface
52
+ # ------------------------------
53
+ with gr.Blocks(
54
+ title="Nailfold Velocity Estimator",
55
+ theme=gr.themes.Default(primary_hue=gr.themes.colors.red,
56
+ secondary_hue=gr.themes.colors.pink)
57
+ ) as velocity_demo:
58
+
59
+ # Header and instructions
60
+ gr.Markdown("# 🎥 Nailfold Velocity Estimator")
61
+ gr.Markdown("""
62
+ This page allows you to process a nailfold capillaroscopy video to estimate flow velocity.
63
+
64
+ ⚠️ **CAUTION**: This tool is experimental and may not work with all videos.
65
+ ⚠️ The values are based on video analysis and may vary based on video quality and content.
66
+ ⚠️ Each parameter is set based on experimental data and may not be accurate for all cases.
67
+ ⚠️ The model is trained on specific datasets and may not generalize well to other types of data.
68
+
69
+ ## Left Side: Extract Nailfold Frames
70
+ Upload a nailfold capillaroscopy video. The object detection model will extract the frame of each nailfold and return cropped areas.
71
+
72
+ **Steps**:
73
+ 1. Upload a nailfold video (.mp4 or .avi).
74
+ 2. Click **Extract Nailfold Frame**.
75
+ 3. Download the extracted frame and cropped videos.
76
+
77
+ ## Right Side: Predict Velocity
78
+ After frame extraction, upload the same video to predict velocity.
79
+
80
+ **Steps**:
81
+ 1. Upload the video used for frame extraction.
82
+ 2. Click **Predict Velocity**.
83
+ 3. View estimated time, distance, velocity, and segmentation mask.
84
+ """)
85
+
86
+ with gr.Row():
87
+ # ------------------------------
88
+ # Left Column: Frame Extraction
89
+ # ------------------------------
90
+ with gr.Column():
91
+ video_input1 = gr.Video(label="Upload Video for Frame Extraction")
92
+ path_output = gr.File(
93
+ label="Download Extracted Frame & Cropped Videos",
94
+ file_types=[".png", ".mp4"],
95
+ type="filepath"
96
+ )
97
+ extract_button = gr.Button("Extract Nailfold Frame")
98
+
99
+ extract_button.click(
100
+ fn=extract_nailfold_frame,
101
+ inputs=video_input1,
102
+ outputs=path_output
103
+ )
104
+
105
+ gr.Markdown("### Example Video for Frame Extraction")
106
+ gr.Examples(
107
+ examples=[["assets/test_velo.mpg"]],
108
+ inputs=[video_input1],
109
+ label="Try with Example",
110
+ )
111
+
112
+ # ------------------------------
113
+ # Right Column: Velocity Prediction
114
+ # ------------------------------
115
+ with gr.Column():
116
+ video_input2 = gr.Video(label="Upload Video for Velocity Prediction")
117
+ time_output = gr.Textbox(label="Estimated Time", interactive=False)
118
+ distance_output = gr.Textbox(label="Estimated Distance", interactive=False)
119
+ velocity_output = gr.Textbox(label="Estimated Velocity", interactive=False)
120
+ segment_output = gr.Image(label="Segmentation Mask")
121
+
122
+ velocity_button = gr.Button("Predict Velocity")
123
+ velocity_button.click(
124
+ fn=pred_velocity,
125
+ inputs=video_input2,
126
+ outputs=[time_output, distance_output, velocity_output, segment_output]
127
+ )
128
+
129
+ gr.Markdown("### Example Video for Velocity Prediction")
130
+ gr.Examples(
131
+ examples=[["assets/test_velo_0_w33_h91.mp4"]],
132
+ inputs=[video_input2],
133
+ label="Try with Example",
134
+ )
135
+
136
+ if __name__ == "__main__":
137
+ velocity_demo.launch()
models/segment_yolo11.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8357f4201e9244ec3a8a8d88fda5f2981d5851e548a044aa32497d1824df64a
3
+ size 6059890
requirements.txt CHANGED
@@ -13,4 +13,5 @@ ultralytics
13
  torch
14
  lightgbm
15
  catboost
16
- mljar-supervised
 
 
13
  torch
14
  lightgbm
15
  catboost
16
+ mljar-supervised
17
+ scikit-image