Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
File size: 10,150 Bytes
b320584 1b6610f b320584 1b6610f b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 d9093e1 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 d9093e1 8b59592 d9093e1 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 1b6610f b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 d9093e1 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 b320584 8b59592 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 |
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "datasets>=3.2.0"
# ]
# ///
import inspect
import logging
import re
from collections import defaultdict
from collections.abc import Callable
from datetime import datetime
from pathlib import Path
import pandas as pd
from datasets import Dataset, load_dataset
logger = logging.getLogger(__name__)
########## edit manually for each source
hf_path = "NbAiLab/NCC"
source = "ncc_parliament"
doc_type_searchword = "parliament"
license = "other"
domain = "Legal"
num_proc = 10
##########
today = datetime.now().strftime("%Y-%m-%d")
# stop words taken from spaCy
# https://github.com/explosion/spaCy/blob/master/spacy/lang/da/stop_words.py
# Source: Handpicked by Jens Dahl Møllerhøj.
spacy_sw = set(
"""
af aldrig alene alle allerede alligevel alt altid anden andet andre at
bag begge blandt blev blive bliver burde bør
da de dem den denne dens der derefter deres derfor derfra deri dermed derpå derved det dette dig din dine disse dog du
efter egen eller ellers en end endnu ene eneste enhver ens enten er et
flere flest fleste for foran fordi forrige fra få før først
gennem gjorde gjort god gør gøre gørende
ham han hans har havde have hel heller hen hende hendes henover her herefter heri hermed herpå hun hvad hvem hver hvilke hvilken hvilkes hvis hvor hvordan hvorefter hvorfor hvorfra hvorhen hvori hvorimod hvornår hvorved
i igen igennem ikke imellem imens imod ind indtil ingen intet
jeg jer jeres jo
kan kom kommer kun kunne
lad langs lav lave lavet lidt lige ligesom lille længere
man mange med meget mellem men mens mere mest mig min mindre mindst mine mit må måske
ned nemlig nogen nogensinde noget nogle nok nu ny nyt nær næste næsten
og også om omkring op os over overalt
på
samme sammen selv selvom senere ses siden sig sige skal skulle som stadig synes syntes så sådan således
temmelig tidligere til tilbage tit
ud uden udover under undtagen
var ved vi via vil ville vore vores vær være været
øvrigt
""".split()
)
def word_tokenize(text: str) -> list[str]:
"""
Tokenizes a string into words, splitting on whitespace and punctuation.
Example:
>>> word_tokenize("Hello, world!")
['Hello', ',', 'world', '!']
>>> word_tokenize("This is a test.")
['This', 'is', 'a', 'test', '.']
>>> word_tokenize("Many spaces between words.")
['Many', 'spaces', 'between', 'words', '.']
"""
punkt = [",", ".", "!", "?", ":", ";", "(", ")", "[", "]", "{", "}", '"', "'"]
for p in punkt:
text = text.replace(p, f" {p} ")
return text.split()
def alpha_ratio(text: str | list[str]) -> float:
"""
If not split already to words, splits text with word_tokenize()
Calculates ratio of words with only alphabetical characters
"""
if type(text) is str:
text = word_tokenize(text)
else:
pass
alpha_ratio = 1 - sum(not word.isalpha() for word in text) / len(text)
return alpha_ratio
def count_min_target(given_list: list, target_list: list, min: int) -> bool:
"""
Iterates through given list, until at least min items match any items from target list
"""
c_item = 0
given_list_iter = iter(given_list)
while c_item < min:
try:
current_item = next(given_list_iter)
if current_item in target_list:
c_item += 1
except StopIteration:
break
return c_item == min
def dynaword_format(
meta_document: dict[str, str | int],
) -> dict[str, str | dict[str, str]]:
"""Reformats data to fit dynaword standards"""
text = meta_document.get("text")
id = meta_document.get("id")
date = meta_document.get("publish_year")
doc_type = meta_document.get("doc_type")
newdata = {
"text": text,
"source": source,
"id": id,
"added": today,
"created": f"{date}-01-01, {date}-12-31",
"license": license,
"domain": domain,
"metadata": {
"source-pretty": f"Norwegian Colossal Corpus ({re.sub('ncc_', '', source)})",
"source-type": doc_type,
},
}
return newdata
def log_pre_filter_lang_data(
lang_metadata: dict[str, dict[str, int]], filtered_ds: Dataset
):
"""
Function for logging changes in a large dataset,
based on the metadata pre filering and the filtered dataset,
used for language filtering
"""
all_docs = sum(lang_metadata[source].values())
no_docs = lang_metadata[source].get("no")
da_docs = lang_metadata[source].get("da")
no_perc = round(no_docs / all_docs * 100, 4)
da_perc = round(da_docs / all_docs * 100, 4)
f_length = len(filtered_ds)
f_perc = round(f_length / da_docs * 100, 4)
f_total_perc = round(f_length / all_docs * 100, 4)
logger.info(f"Documents of {source}:")
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
logger.info("After language confidence filtering:")
logger.info(f"DA: {f_length}, lost: {100 - f_perc}%")
logger.info("Total document change:")
logger.info(f"{all_docs} -> {f_length}, loss: {100 - f_total_perc}%")
def get_var_name(var):
"""outputs the variable name"""
callers_local_vars = inspect.currentframe().f_back.f_back.f_back.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
def filter_with_changelog(
filter_func: Callable[[Dataset], Dataset], dataset: Dataset
) -> Dataset:
"""
Function, which takes a filter and a dataset.
Counts text docs and tokens before and after filtering,
Saves filtering changes to log.
"""
filter_name = get_var_name(filter_func)
pre_filter_docs = len(dataset)
pre_filter_tokens = sum(len(word_tokenize(i["text"])) for i in dataset)
dataset = dataset.filter(filter_func, num_proc=num_proc)
post_filter_docs = len(dataset)
post_filter_tokens = sum(len(word_tokenize(i["text"])) for i in dataset)
tokens_removed = round((1 - (post_filter_tokens / pre_filter_tokens)) * 100, 2)
docs_removed = round((1 - (post_filter_docs / pre_filter_docs)) * 100, 2)
logger.info(f"FILTER: {filter_name}")
logger.info(
f"TOKENS: pre: {pre_filter_tokens}, post: {post_filter_tokens}, loss: {tokens_removed}%"
)
logger.info(
f"DOCUMENTS: pre: {pre_filter_docs}, post: {post_filter_docs}, loss: {docs_removed}%"
)
return dataset
source_filter = lambda ds: doc_type_searchword in ds["doc_type"] # noqa
length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa
too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa
alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa
stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa
samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
def language_filter_with_desc_stats(ds: Dataset) -> bool:
"""
Language filtering in a streamed dataset while logging all languages
"""
s = source
language = ds["lang_fasttext"]
samples_pr_source[s][language] += 1
language_filter = (
ds["lang_fasttext"] == "da" and float(ds["lang_fasttext_conf"]) >= 0.75
)
return language_filter
def quality_checks(ds: Dataset) -> Dataset:
"""
Quality checks for:
- no duplicate ids
- no duplicate texts
- logs texts > 1e5 tokens
"""
# convert to pandas for the drop_duplicates()
df = pd.DataFrame(ds)
# remove duplicate ids
len_df = len(df)
df = df.drop_duplicates(subset=["id"])
logger.info(f"Removed {len_df - len(df)} duplicate ids")
# remove rows with duplicate text
len_df = len(df)
df = df.drop_duplicates(subset=["text"])
logger.info(f"Removed {len_df - len(df)} rows with duplicate text")
# reconvert and remove index
ds_f = Dataset.from_pandas(df, preserve_index=False)
try:
ds_f["__index_level_0__"]
ds_f = ds_f.remove_columns("__index_level_0__")
except KeyError:
pass
assert len(set(ds_f["id"])) == len(ds_f), "IDs are not unique"
assert len(set(ds_f["text"])) == len(ds_f), "Texts are not unique"
long_texts = ds_f.filter(too_long_filter, num_proc=None)
if len(long_texts["id"]) > 0:
logger.info(f"{len(long_texts['id'])} Long texts (>~1e5 tokens) found")
for id in long_texts["id"]:
logger.info(f"id: {id}")
else:
logger.info("No long texts (>~1e5 tokens) found")
return ds_f
def main():
# load all splits
logger.info(f"Loading data from: {hf_path}")
danish_data = load_dataset(
hf_path, streaming=False, split="train+validation", num_proc=num_proc
)
danish_data.cleanup_cache_files()
# filter by metadata
logger.info(f"Processing source: {source}")
danish_data = danish_data.filter(source_filter, num_proc=num_proc)
logger.info("Processing language")
danish_data = danish_data.filter(language_filter_with_desc_stats, num_proc=None)
# log language changes
log_pre_filter_lang_data(samples_pr_source, danish_data)
# convert to dynaword format
danish_data = danish_data.map(dynaword_format)
danish_data = danish_data.select_columns(
["text", "source", "id", "added", "created", "license", "domain", "metadata"]
)
# filter and log changes
danish_data = filter_with_changelog(length_filter, danish_data)
danish_data = filter_with_changelog(alpha_filter, danish_data)
danish_data = filter_with_changelog(stop_word_filter, danish_data)
# Quality checks
danish_data = quality_checks(danish_data)
### saving
save_path = Path(__file__).parent / f"{source}.parquet"
danish_data.to_parquet(save_path)
if __name__ == "__main__":
log_path = Path(__file__).parent / f"{source}.log"
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(),
logging.FileHandler(log_path),
],
)
main()
|