Upload fascv.ipynb
Browse files- fascv.ipynb +1 -0
fascv.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":13008060,"sourceType":"datasetVersion","datasetId":8235288},{"sourceId":13013760,"sourceType":"datasetVersion","datasetId":8239053},{"sourceId":13014480,"sourceType":"datasetVersion","datasetId":8239528},{"sourceId":13158209,"sourceType":"datasetVersion","datasetId":8337256},{"sourceId":13160632,"sourceType":"datasetVersion","datasetId":8339025},{"sourceId":13178251,"sourceType":"datasetVersion","datasetId":8350960}],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# Lưu ý: Trước khi bắt đầu Session cần \"Add input\" đầy đủ. Những dataset cần thiết đã được liệt kê đầy đủ trong requiments.csv","metadata":{}},{"cell_type":"code","source":"!pip install mediapipe --quiet","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T14:57:34.482907Z","iopub.execute_input":"2025-09-29T14:57:34.483132Z","iopub.status.idle":"2025-09-29T14:58:12.146865Z","shell.execute_reply.started":"2025-09-29T14:57:34.483112Z","shell.execute_reply":"2025-09-29T14:58:12.145999Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import mediapipe as mp","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T14:58:12.147863Z","iopub.execute_input":"2025-09-29T14:58:12.148132Z","iopub.status.idle":"2025-09-29T14:58:29.548324Z","shell.execute_reply.started":"2025-09-29T14:58:12.148108Z","shell.execute_reply":"2025-09-29T14:58:29.547741Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport cv2\nfrom tqdm import tqdm","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# I. Lấy các dataset đã được tạo và tải lên Kaggle để train và dự đoán","metadata":{}},{"cell_type":"markdown","source":"## Vì /kaggle/working chỉ giới hạn 19.5 GiB nên chỉ có thể add thủ link dataset lên kaggle/input","metadata":{}},{"cell_type":"markdown","source":"## 1. Train","metadata":{}},{"cell_type":"code","source":"# ======================================\n# 1. Cài đặt thư viện\n# ======================================\n!pip install torch torchvision tensorboardX tqdm opencv-python","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T15:24:44.680555Z","iopub.execute_input":"2025-09-29T15:24:44.680831Z","iopub.status.idle":"2025-09-29T15:26:06.754875Z","shell.execute_reply.started":"2025-09-29T15:24:44.680810Z","shell.execute_reply":"2025-09-29T15:26:06.753912Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# TRAIN FACE ANTI-SPOOFING (LIVE vs SPOOF)\n# ===============================\n\nimport os\nimport cv2\nimport torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, models\n\n# ===============================\n# 1. Config\n# ===============================\nDATA_DIR = \"/kaggle/input/20kcvfas/fas_frames\" # thư mục chứa 2 folder: live/ và spoof/\nBATCH_SIZE = 32\nIMG_SIZE = 128\nEPOCHS = 7\nLR = 1e-4\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# ===============================\n# 2. Dataset\n# ===============================\nclass FASDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n self.samples = []\n self.transform = transform\n classes = {\"live\": 0, \"spoof\": 1}\n for cls, label in classes.items():\n folder = os.path.join(root_dir, cls)\n for f in os.listdir(folder):\n if f.lower().endswith((\".jpg\", \".png\", \".jpeg\")):\n self.samples.append((os.path.join(folder, f), label))\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n path, label = self.samples[idx]\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.transform:\n img = self.transform(img)\n return img, torch.tensor(label, dtype=torch.long)\n\n# ===============================\n# 3. Transform & Split\n# ===============================\ntransform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((IMG_SIZE, IMG_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n])\n\ndataset = FASDataset(DATA_DIR, transform=transform)\nn_total = len(dataset)\nn_train = int(0.7 * n_total)\nn_val = n_total - n_train\ntrain_set, val_set = torch.utils.data.random_split(dataset, [n_train, n_val])\n\ntrain_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)\nval_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)\n\nprint(f\"Train samples: {len(train_set)}, Val samples: {len(val_set)}\")\n\n# ===============================\n# 4. Model\n# ===============================\nmodel = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)\nmodel.fc = nn.Linear(model.fc.in_features, 2) # live/spoof\nmodel = model.to(DEVICE)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=LR)\n\n# ===============================\n\n# 5. Train loop + Save best model\n# ===============================\nbest_val_acc = 0.0\nbest_epoch = -1\nsave_dir = \"/kaggle/working/fas_model\"\nos.makedirs(save_dir, exist_ok=True)\n\nfor epoch in range(EPOCHS):\n model.train()\n total_loss, correct, total = 0, 0, 0\n for imgs, labels in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{EPOCHS}\"):\n imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)\n\n optimizer.zero_grad()\n outputs = model(imgs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n preds = torch.argmax(outputs, dim=1)\n correct += (preds == labels).sum().item()\n total += labels.size(0)\n\n train_acc = correct / total\n\n # --- Validation ---\n val_correct, val_total = 0, 0\n model.eval()\n with torch.no_grad():\n for imgs, labels in val_loader:\n imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)\n outputs = model(imgs)\n preds = torch.argmax(outputs, dim=1)\n val_correct += (preds == labels).sum().item()\n val_total += labels.size(0)\n\n val_acc = val_correct / val_total\n print(f\"Epoch {epoch+1}/{EPOCHS} | Loss: {total_loss/len(train_loader):.4f} | \"\n f\"Train Acc: {train_acc:.4f} | Val Acc: {val_acc:.4f}\")\n\n # --- Save best model ---\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_epoch = epoch + 1\n save_path = os.path.join(save_dir, f\"resnet18_best.pth\")\n torch.save(model.state_dict(), save_path)\n print(f\"✅ Saved best model at epoch {best_epoch} with Val Acc: {best_val_acc:.4f}\")\n\nprint(f\"🎯 Training done. Best epoch: {best_epoch}, Val Acc: {best_val_acc:.4f}\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T15:26:28.370653Z","iopub.execute_input":"2025-09-29T15:26:28.370957Z","iopub.status.idle":"2025-09-29T16:04:38.436204Z","shell.execute_reply.started":"2025-09-29T15:26:28.370931Z","shell.execute_reply":"2025-09-29T16:04:38.435450Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ===============================\n# 6. Save model\n# ===============================\nos.makedirs(\"/kaggle/working/fas_model\", exist_ok=True)\ntorch.save(model.state_dict(), \"/kaggle/working/fas_model/resnet18_fas.pth\")\nprint(\"✅ Training finished. Model saved!\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T16:08:17.086003Z","iopub.execute_input":"2025-09-29T16:08:17.086614Z","iopub.status.idle":"2025-09-29T16:08:17.156409Z","shell.execute_reply.started":"2025-09-29T16:08:17.086591Z","shell.execute_reply":"2025-09-29T16:08:17.155639Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## 2. Dự đoán trên tệp test","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nfrom torchvision import transforms, models\nfrom tqdm import tqdm\nfrom PIL import Image\n\n# ========================\n# Config\n# ========================\nFRAME_DIR = \"/kaggle/input/testcv1/fas_test_frames (1)\"\nCSV_PATH = \"/kaggle/input/test-fascsv/publics_test_metadata.csv\"\nMODEL_PATH = \"/kaggle/working/fas_model/resnet18_fas.pth\" # ✅ model bạn đã save\nOUTPUT_CSV = \"/kaggle/working/predictionss.csv\"\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# ========================\n# Model definition (ResNet18)\n# ========================\nmodel = models.resnet18(weights=None) # không load pretrained nữa\nmodel.fc = nn.Linear(model.fc.in_features, 2)\nmodel = model.to(device)\n\n# Load checkpoint\nmodel.load_state_dict(torch.load(MODEL_PATH, map_location=device))\nmodel.eval()\n\n# ========================\n# Transform giống training\n# ========================\ntransform = transforms.Compose([\n transforms.Resize((128,128)),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]) # ✅ khớp training\n])\n\n# ========================\n# Load CSV\n# ========================\ndf = pd.read_csv(CSV_PATH)\n\npredictions = []\n\nfor _, row in tqdm(df.iterrows(), total=len(df)):\n uuid = row[\"uuid\"]\n video_name = row[\"path\"] # tên file video: abc.mp4\n frame_path = os.path.join(FRAME_DIR, f\"{video_name}.jpg\")\n\n if not os.path.exists(frame_path):\n print(f\"⚠️ Không tìm thấy frame: {frame_path}\")\n continue\n\n # load ảnh\n img = cv2.imread(frame_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img = transform(img).unsqueeze(0).to(device)\n\n # dự đoán\n with torch.no_grad():\n logits = model(img)\n pred = torch.argmax(logits, dim=1).item()\n\n # mapping\n label = \"0\" if pred == 0 else \"1\"\n predictions.append({\"uuid\": uuid, \"label_pred\": label})\n\n# ========================\n# Xuất CSV\n# ========================\nout_df = pd.DataFrame(predictions)\nout_df.to_csv(OUTPUT_CSV, index=False)\nprint(\"✅ Done. Saved:\", OUTPUT_CSV)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-29T16:09:17.274158Z","iopub.execute_input":"2025-09-29T16:09:17.274459Z","iopub.status.idle":"2025-09-29T16:09:21.491167Z","shell.execute_reply.started":"2025-09-29T16:09:17.274436Z","shell.execute_reply":"2025-09-29T16:09:21.490502Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# II. Cách cắt ảnh để train và test từ video","metadata":{}},{"cell_type":"markdown","source":"# 1. Cắt ảnh tạo file dataset test","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nimport mediapipe as mp\nfrom tqdm import tqdm\n\nTEST_INPUT_DIR = \"/kaggle/input/public-test/data_test\"\nTEST_OUTPUT_DIR = \"/kaggle/working/fas_test_frames\"\nos.makedirs(TEST_OUTPUT_DIR, exist_ok=True)\n\n# Khởi tạo face detector\nmp_face_detection = mp.solutions.face_detection\ndetector = mp.solutions.face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5)\n\ndef extract_multiple_frame_faces(video_path, save_dir, n_parts=5):\n \"\"\"Chia video thành n phần, lấy frame giữa mỗi phần, ưu tiên detect face\"\"\"\n os.makedirs(save_dir, exist_ok=True)\n cap = cv2.VideoCapture(video_path)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n saved = False\n for i in range(n_parts):\n # Tính frame giữa phần i\n start_frame = int(i * total_frames / n_parts)\n end_frame = int((i+1) * total_frames / n_parts)\n mid_frame = (start_frame + end_frame) // 2\n cap.set(cv2.CAP_PROP_POS_FRAMES, mid_frame)\n ret, frame = cap.read()\n if not ret:\n continue\n\n # Detect face\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = detector.process(rgb)\n if results.detections:\n for det in results.detections:\n bbox = det.location_data.relative_bounding_box\n h, w, _ = frame.shape\n x1 = max(0, int(bbox.xmin * w))\n y1 = max(0, int(bbox.ymin * h))\n x2 = min(w, x1 + int(bbox.width * w))\n y2 = min(h, y1 + int(bbox.height * h))\n face = frame[y1:y2, x1:x2]\n if face.size != 0:\n # Giữ nguyên tên + đuôi video rồi thêm .jpg\n out_name = f\"{os.path.basename(video_path)}.jpg\"\n cv2.imwrite(os.path.join(save_dir, out_name), face)\n saved = True\n break\n if saved:\n break\n\n # Nếu không detect được face, lấy frame giữa video\n if not saved:\n cap.set(cv2.CAP_PROP_POS_FRAMES, total_frames // 2)\n ret, frame = cap.read()\n if ret:\n out_name = f\"{os.path.basename(video_path)}.jpg\"\n cv2.imwrite(os.path.join(save_dir, out_name), frame)\n cap.release()\n\n# -----------------------------\n# Process toàn bộ dataset\n# -----------------------------\nfor f in tqdm(os.listdir(TEST_INPUT_DIR), desc=\"Processing test videos\"):\n if f.endswith((\".mp4\", \".avi\", \".MOV\")):\n video_path = os.path.join(TEST_INPUT_DIR, f)\n extract_multiple_frame_faces(video_path, TEST_OUTPUT_DIR, n_parts=5)\n\nprint(\"Done! Mỗi video test đã được trích 1 frame khuôn mặt từ 5 phần.\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import shutil\n\nshutil.make_archive(\"/kaggle/working/fas_test_frames\", 'zip', \"/kaggle/working/fas_test_frames\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 2. Cắt ảnh tạo file dataset train","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nfrom tqdm import tqdm\nimport mediapipe as mp\n\n# Input/Output path\nINPUT_DIR = \"/kaggle/input/fas-cv/publics_data_train/publics_data_train\"\nOUTPUT_DIR = \"/kaggle/working/fas_frames\"\n\n# Khởi tạo face detector\nmp_face_detection = mp.solutions.face_detection\ndetector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5)\n\ndef extract_faces(video_path, save_dir):\n \"\"\"Trích nhiều ảnh khuôn mặt từ video\"\"\"\n os.makedirs(save_dir, exist_ok=True)\n cap = cv2.VideoCapture(video_path)\n frame_count = 0\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n frame_count += 1\n\n # Chỉ lấy mỗi 10 frame (giảm tải)\n if frame_count % 10 != 0:\n continue\n\n # Detect face\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = detector.process(rgb)\n\n if results.detections:\n for idx, det in enumerate(results.detections):\n bbox = det.location_data.relative_bounding_box\n h, w, _ = frame.shape\n x1 = max(0, int(bbox.xmin * w))\n y1 = max(0, int(bbox.ymin * h))\n x2 = min(w, x1 + int(bbox.width * w))\n y2 = min(h, y1 + int(bbox.height * h))\n face = frame[y1:y2, x1:x2]\n\n if face.size != 0:\n # Tên file: tên video + frame index + id face\n out_name = f\"{os.path.splitext(os.path.basename(video_path))[0]}_f{frame_count}_{idx}.jpg\"\n cv2.imwrite(os.path.join(save_dir, out_name), face)\n\n cap.release()\n\ndef process_dataset():\n for label in [\"live\", \"spoof\"]:\n input_path = os.path.join(INPUT_DIR, label)\n for root, dirs, files in os.walk(input_path):\n for f in tqdm(files, desc=f\"Processing {label}\"):\n if f.endswith((\".mp4\", \".avi\", \".MOV\")):\n video_path = os.path.join(root, f)\n save_dir = os.path.join(OUTPUT_DIR, label)\n extract_faces(video_path, save_dir)\n\nprocess_dataset()\nprint(\"Done! Đã trích nhiều ảnh khuôn mặt từ mỗi video.\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import shutil\n\nshutil.make_archive(\"/kaggle/working/fas_frames\", 'zip', \"/kaggle/working/fas_frames\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport cv2\nfrom tqdm import tqdm\nimport mediapipe as mp\n\n# ===============================\n# Input/Output path\n# ===============================\nINPUT_DIR = \"/kaggle/input/fas-cv/publics_data_train/publics_data_train/live\"\nOUTPUT_DIR = \"/kaggle/working/live\"\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n\n# ===============================\n# Khởi tạo face detector\n# ===============================\nmp_face_detection = mp.solutions.face_detection\ndetector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5)\n\ndef extract_faces(video_path, save_dir):\n \"\"\"Trích tất cả ảnh khuôn mặt từ video\"\"\"\n os.makedirs(save_dir, exist_ok=True)\n cap = cv2.VideoCapture(video_path)\n frame_count = 0\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n frame_count += 1\n\n # Detect face\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = detector.process(rgb)\n\n if results.detections:\n for det in results.detections:\n bbox = det.location_data.relative_bounding_box\n h, w, _ = frame.shape\n x1 = max(0, int(bbox.xmin * w))\n y1 = max(0, int(bbox.ymin * h))\n x2 = min(w, x1 + int(bbox.width * w))\n y2 = min(h, y1 + int(bbox.height * h))\n face = frame[y1:y2, x1:x2]\n\n if face.size != 0:\n # Tên file: videoName_frameNumber.jpg\n out_name = f\"{os.path.splitext(os.path.basename(video_path))[0]}_f{frame_count}.jpg\"\n cv2.imwrite(os.path.join(save_dir, out_name), face)\n cap.release()\n\n# ===============================\n# Process live dataset only\n# ===============================\nfor root, dirs, files in os.walk(INPUT_DIR):\n for f in tqdm(files, desc=\"Processing live\"):\n if f.endswith((\".mp4\", \".avi\", \".MOV\")):\n video_path = os.path.join(root, f)\n extract_faces(video_path, OUTPUT_DIR)\n\nprint(\"Done! Đã trích tất cả frame có mặt cho tập live.\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import shutil\n\nshutil.make_archive(\"/kaggle/working/live\", 'zip', \"/kaggle/working/live\")","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
|