Datasets:
Update UFSAC.py
Browse files
UFSAC.py
CHANGED
|
@@ -2,6 +2,9 @@ import datasets
|
|
| 2 |
import xml.etree.cElementTree as ET
|
| 3 |
from glob import glob
|
| 4 |
import os
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
_UFSAC_FILE = 'ufsac-public-2.1.tar.xz'
|
| 7 |
|
|
@@ -25,24 +28,30 @@ class UFSAC(datasets.GeneratorBasedBuilder):
|
|
| 25 |
|
| 26 |
def _split_generators(self, dl_manager):
|
| 27 |
data_dir = dl_manager.download_and_extract(_UFSAC_FILE)
|
| 28 |
-
return datasets.SplitGenerator(name = datasets.Split.TRAIN, gen_kwargs={'data_dir': data_dir})
|
| 29 |
|
| 30 |
def _generate_examples(self, data_dir):
|
| 31 |
used_sents = set()
|
| 32 |
count = 0
|
| 33 |
-
for file in glob(os.path.join(data_dir, '
|
| 34 |
-
|
| 35 |
-
root =
|
| 36 |
-
for
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
words = sent.findall('word')
|
| 38 |
-
tokens = [token.attrib['surface_form'] for token in words]
|
| 39 |
-
sent_key = ''.join(tokens)
|
| 40 |
-
if sent_key
|
| 41 |
-
used_sents.add(sent_key)
|
| 42 |
-
else:
|
| 43 |
continue
|
| 44 |
-
|
| 45 |
-
|
|
|
|
| 46 |
for index, word in enumerate(words):
|
| 47 |
if 'wn30_key' in word.attrib:
|
| 48 |
senses = word.attrib['wn30_key'].split(';')
|
|
@@ -50,6 +59,7 @@ class UFSAC(datasets.GeneratorBasedBuilder):
|
|
| 50 |
'tokens': tokens,
|
| 51 |
'lemmas': lemmas,
|
| 52 |
'pos_tags': pos_tags,
|
| 53 |
-
'
|
| 54 |
'sense_keys': senses
|
| 55 |
}
|
|
|
|
|
|
| 2 |
import xml.etree.cElementTree as ET
|
| 3 |
from glob import glob
|
| 4 |
import os
|
| 5 |
+
import gc
|
| 6 |
+
# from memory_profiler import profile
|
| 7 |
+
import objgraph
|
| 8 |
|
| 9 |
_UFSAC_FILE = 'ufsac-public-2.1.tar.xz'
|
| 10 |
|
|
|
|
| 28 |
|
| 29 |
def _split_generators(self, dl_manager):
|
| 30 |
data_dir = dl_manager.download_and_extract(_UFSAC_FILE)
|
| 31 |
+
return datasets.SplitGenerator(name = datasets.Split.TRAIN, gen_kwargs={'data_dir': data_dir}),
|
| 32 |
|
| 33 |
def _generate_examples(self, data_dir):
|
| 34 |
used_sents = set()
|
| 35 |
count = 0
|
| 36 |
+
for file in glob(os.path.join(data_dir, 'ufsac-public-2.1/*.xml')):
|
| 37 |
+
context = ET.iterparse(file, events=('start', 'end'))
|
| 38 |
+
event, root = next(context)
|
| 39 |
+
for event, element in context:
|
| 40 |
+
if element.tag == 'paragraph':
|
| 41 |
+
para = element
|
| 42 |
+
if element.tag != 'sentence':
|
| 43 |
+
continue
|
| 44 |
+
if event == 'end' and element.tag == 'sentence':
|
| 45 |
+
para.remove(element)
|
| 46 |
+
sent = element
|
| 47 |
words = sent.findall('word')
|
| 48 |
+
tokens = [token.attrib['surface_form'] if 'surface_form' in token.attrib else '_' for token in words]
|
| 49 |
+
sent_key = ''.join([token.lower() for token in tokens])
|
| 50 |
+
if sent_key in used_sents:
|
|
|
|
|
|
|
| 51 |
continue
|
| 52 |
+
used_sents.add(sent_key)
|
| 53 |
+
lemmas = [token.attrib['lemma'] if 'lemma' in token.attrib else '_' for token in words]
|
| 54 |
+
pos_tags = [token.attrib['pos'] if 'pos' in token.attrib else '_' for token in words]
|
| 55 |
for index, word in enumerate(words):
|
| 56 |
if 'wn30_key' in word.attrib:
|
| 57 |
senses = word.attrib['wn30_key'].split(';')
|
|
|
|
| 59 |
'tokens': tokens,
|
| 60 |
'lemmas': lemmas,
|
| 61 |
'pos_tags': pos_tags,
|
| 62 |
+
'target_idx': index,
|
| 63 |
'sense_keys': senses
|
| 64 |
}
|
| 65 |
+
count+=1
|