hk_content_corpus / dataset.py
Walter
update dataset.py
d612680
raw
history blame
1.86 kB
import datasets
_DESCRIPTION = "Cleaned Hong Kong Cantonese / Traditional Chinese corpora from multiple sources."
_HOMEPAGE = "https://huggingface.co/datasets/SolarisCipher/hk_content_corpus"
_CORPUS_FILES = {
"appledaily_article": "appledaily_article_dedup.txt",
"hkcitizenmedia_article": "hkcitizenmedia_article_dedup.txt",
"hkcnews_article": "hkcnews_article_dedup.txt",
"inmedia_article": "inmedia_article_dedup.txt",
"lihkg_posts": "lihkg_posts_dedup_demoji_128.txt",
"openrice_review": "openrice_review_dedup_demoji.txt",
"thestandnews_article": "thestandnews_article_dedup.txt",
"wiki_hk": "wiki_hk.txt",
}
class HKContentCorpus(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation="",
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=split_name,
gen_kwargs={"filepath": filepath},
)
for split_name, filepath in _CORPUS_FILES.items()
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
buffer = []
example_id = 0
for line in f:
line = line.strip()
if line:
buffer.append(line)
else:
if buffer:
yield example_id, {"text": "\n".join(buffer)}
example_id += 1
buffer = []
if buffer:
yield example_id, {"text": "\n".join(buffer)}