splits
Browse files- .verify_dataset_tags.py.un~ +0 -0
- chigh/data.jsonl +3 -0
- clow/data.jsonl +3 -0
- dhigh/data.jsonl +3 -0
- dlow/data.jsonl +3 -0
- lhigh/data.jsonl +3 -0
- llow/data.jsonl +3 -0
- verify_dataset_tags.py +91 -0
.verify_dataset_tags.py.un~
ADDED
|
Binary file (946 Bytes). View file
|
|
|
chigh/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67d59605e377159e4e4f4f361f35d4115693b9aa757f60532fb3dd772d7acc4a
|
| 3 |
+
size 278266230
|
clow/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4585299221e8425058d2ecdd8d776e2878ee5bbf61e854b93cb2a858a95e6b0d
|
| 3 |
+
size 255857908
|
dhigh/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4834f60936df97aeba025513b5e612ef2711018c8536401f0e435161dbfc0a1c
|
| 3 |
+
size 268788868
|
dlow/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e14769dfb60e9cdcc6759c531eb694e85e285fa96d326675313772e211b0d18f
|
| 3 |
+
size 265335270
|
lhigh/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:420dae72eb4f2469c560dd199d4b3659c47bfb5fe3f60dbec2f5d23d05bd33ee
|
| 3 |
+
size 268852050
|
llow/data.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:96877ff4d61118c7fe0d7534c64f4bd54d803d4a29fd185d59cd491de47f4283
|
| 3 |
+
size 265272088
|
verify_dataset_tags.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from typing import List
|
| 4 |
+
from jiwer import wer # Make sure jiwer is installed (pip install jiwer)
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
def extract_html_tags(text: str) -> List[str]:
|
| 9 |
+
"""Extracts HTML-like tags from a given text string."""
|
| 10 |
+
return re.findall(r"</?[^>]+?>", text)
|
| 11 |
+
|
| 12 |
+
def tags_to_str(tags: List[str]) -> str:
|
| 13 |
+
"""Converts a list of tag strings into a single space-separated string."""
|
| 14 |
+
return " ".join(tags)
|
| 15 |
+
|
| 16 |
+
def calculate_tag_reward(source_text: str, target_text: str) -> float:
|
| 17 |
+
"""
|
| 18 |
+
Calculates the tag preservation reward between a source and target text.
|
| 19 |
+
Reward is 1.0 if Tag WER is 0, and 1.0 - Tag WER otherwise (capped at 0).
|
| 20 |
+
"""
|
| 21 |
+
source_tags_list = extract_html_tags(source_text)
|
| 22 |
+
target_tags_list = extract_html_tags(target_text)
|
| 23 |
+
|
| 24 |
+
source_tags_str = tags_to_str(source_tags_list)
|
| 25 |
+
target_tags_str = tags_to_str(target_tags_list)
|
| 26 |
+
|
| 27 |
+
tag_error_rate = wer(source_tags_str, target_tags_str)
|
| 28 |
+
|
| 29 |
+
# Ensure reward is between 0 and 1
|
| 30 |
+
reward = 1.0 - min(1.0, max(0.0, tag_error_rate))
|
| 31 |
+
|
| 32 |
+
if reward < 1.0: # If there's any error, print details
|
| 33 |
+
print(f"\n--- Mismatch Found (Reward: {reward:.4f}, Tag WER: {tag_error_rate:.4f}) ---")
|
| 34 |
+
print(f"Source Text (snippet): {source_text[:100]}...")
|
| 35 |
+
print(f"Target Text (snippet): {target_text[:100]}...")
|
| 36 |
+
print(f"Source Tags (str): '{source_tags_str}'")
|
| 37 |
+
print(f"Target Tags (str): '{target_tags_str}'")
|
| 38 |
+
# For more detailed debugging of WER, you can print the lists too:
|
| 39 |
+
# print(f"Source Tags (list): {source_tags_list}")
|
| 40 |
+
# print(f"Target Tags (list): {target_tags_list}")
|
| 41 |
+
print("----------------------------------------------------")
|
| 42 |
+
|
| 43 |
+
return reward
|
| 44 |
+
|
| 45 |
+
def main(jsonl_file_path: str):
|
| 46 |
+
non_one_reward_count = 0
|
| 47 |
+
total_lines_processed = 0
|
| 48 |
+
|
| 49 |
+
print(f"Processing file: {jsonl_file_path}")
|
| 50 |
+
with open(jsonl_file_path, 'r', encoding='utf-8') as f:
|
| 51 |
+
# First count lines for tqdm progress bar
|
| 52 |
+
num_lines = sum(1 for line in f)
|
| 53 |
+
f.seek(0) # Reset file pointer to the beginning
|
| 54 |
+
|
| 55 |
+
for line in tqdm(f, total=num_lines, desc="Verifying dataset tags"):
|
| 56 |
+
try:
|
| 57 |
+
data = json.loads(line)
|
| 58 |
+
nb_text = data.get("nb")
|
| 59 |
+
nn_text = data.get("nn")
|
| 60 |
+
|
| 61 |
+
if nb_text is None or nn_text is None:
|
| 62 |
+
print(f"Warning: Missing 'nb' or 'nn' field in line: {line.strip()}")
|
| 63 |
+
continue
|
| 64 |
+
|
| 65 |
+
total_lines_processed += 1
|
| 66 |
+
reward = calculate_tag_reward(nb_text, nn_text)
|
| 67 |
+
|
| 68 |
+
if reward < 1.0:
|
| 69 |
+
non_one_reward_count += 1
|
| 70 |
+
|
| 71 |
+
except json.JSONDecodeError:
|
| 72 |
+
print(f"Warning: Could not decode JSON from line: {line.strip()}")
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"An error occurred processing line: {line.strip()} - Error: {e}")
|
| 75 |
+
|
| 76 |
+
print(f"\n--- Verification Complete ---")
|
| 77 |
+
print(f"Total lines processed: {total_lines_processed}")
|
| 78 |
+
print(f"Number of lines where tag reward (nb vs nn) was NOT 1.0: {non_one_reward_count}")
|
| 79 |
+
if total_lines_processed > 0:
|
| 80 |
+
percentage_mismatch = (non_one_reward_count / total_lines_processed) * 100
|
| 81 |
+
print(f"Percentage of lines with tag mismatches: {percentage_mismatch:.2f}%")
|
| 82 |
+
else:
|
| 83 |
+
print("No lines were processed.")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
parser = argparse.ArgumentParser(description="Verify HTML tag consistency between 'nb' and 'nn' fields in a JSONL dataset.")
|
| 88 |
+
parser.add_argument("jsonl_file", type=str, help="Path to the .jsonl dataset file.")
|
| 89 |
+
args = parser.parse_args()
|
| 90 |
+
|
| 91 |
+
main(args.jsonl_file)
|