import os from torchvision.io import read_video import torchvision.transforms.functional as TF import json import pdb from PIL import Image, ImageChops import numpy as np, hashlib, io, os, math def md5(path): h = hashlib.md5() with open(path, "rb") as f: for chunk in iter(lambda: f.read(8192), b""): h.update(chunk) return h.hexdigest() metadata_path = "metadata" # json_list = os.listdir(metadata_path) json_list = ["426eb9d9-d4cf-4acc-9efb-979f61a5be91.json"] for json_file in json_list: with open(os.path.join("metadata", json_file), "r") as f: metadata = json.load(f) # load metadata video_id = metadata["video_id"] video_name = metadata["video_name"] start_pt = metadata["start_sec"] end_pt = metadata["end_sec"] num_output_frames = metadata["num_output_frames"] offset = metadata["offset"] stride = metadata["stride"] # save paths video_path = f"ego4d_full_videos/v2/full_scale/{video_id}.mp4" save_dir = f"frames/{video_name}" print("\n PROCESSING VIDEO ", video_name) video, _, info = read_video( video_path, start_pts=start_pt, end_pts=end_pt, pts_unit="sec" ) video = video.permute(0, 3, 1, 2).float() / 255.0 # [T, C, H, W] os.makedirs(save_dir, exist_ok=True) cropped_vid = video[offset::stride, :][:num_output_frames] # Save each frame for idx, frame_tensor in enumerate(cropped_vid): frame = TF.to_pil_image(frame_tensor) # Convert to PIL image # rgb_path = os.path.join(save_dir, "rgb_frames") os.makedirs(save_dir, exist_ok=True) frame.save( os.path.join(save_dir, f"{idx:06d}.jpg") ) # Zero-padded filenames print(f"Saved {len(cropped_vid)} frames to {save_dir}") # # TODO(ilona) - remove this later # # checks that current dataset aligns with the files that we have !! # for frame_number in [f"{i:06d}" for i in range(0, num_output_frames)]: # paths = [ # f"/data/ilona/datasets/ego4d/ego4d_chosen_videos/{video_name}/rgb_frames/{frame_number}.jpg", # # f"/data/ilona/datasets/go4d/frames/{video_name}/rgb_frames/{frame_number}.jpg", # f"/data/ilona/datasets/itto_release/itto/ego4d/frames/{video_name}/{frame_number}.jpg", # ] # # Load images # imgs = [Image.open(p).convert("RGB") for p in paths] # # Basic metadata & hashes # meta = [ # (os.path.basename(p), im.size, md5(p)) for p, im in zip(paths, imgs) # ] # # Ensure same size # same_size = imgs[0].size == imgs[1].size # # Pixel-wise difference # diff_img = ImageChops.difference(imgs[0], imgs[1]) # diff_array = np.asarray(diff_img) # nonzero_pixels = int(np.count_nonzero(diff_array)) # # Mean absolute error per channel # arr0 = np.asarray(imgs[0], dtype=np.int16) # arr1 = np.asarray(imgs[1], dtype=np.int16) # mae = float(np.mean(np.abs(arr0 - arr1))) # # Peak Signal-to-Noise Ratio (PSNR) # mse = float(np.mean((arr0 - arr1) ** 2)) # if mse == 0: # psnr = float("inf") # else: # PIXEL_MAX = 255.0 # psnr = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # if mae > 0.01: # print("file 1: ", meta[0][0]) # print("file 2: ", meta[1][0]) # print("stride: ", stride) # print("mean abs error: ", mae) # print("psnr: ", psnr)