-
Notifications
You must be signed in to change notification settings - Fork 83
Expand file tree
/
Copy pathTEI2LossyJSON.py
More file actions
1010 lines (866 loc) · 45.2 KB
/
TEI2LossyJSON.py
File metadata and controls
1010 lines (866 loc) · 45.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Convert the rich, unambiguous, standard, generic, extendable TEI XML format of GROBID and Pub2TEI into
something similar to CORD-19 degraded JSON format (let's call it a working format)
Original version: https://github.com/howisonlab/softcite-dataset/blob/master/code/corpus/TEI2LossyJSON.py
"""
import logging
import os
import uuid
from collections import OrderedDict
from concurrent.futures import ProcessPoolExecutor, as_completed
import html
import re
from pathlib import Path
from typing import Dict, Union, BinaryIO, Iterator
import dateparser
from bs4 import BeautifulSoup, Tag
# Configure module-level logger
logger = logging.getLogger(__name__)
logger.propagate = False # Prevent propagation to avoid duplicate logs
# Only configure basic logging if nothing is set up yet
if not logger.handlers and not logging.getLogger().handlers:
# Basic configuration if not already configured by the application
logging.basicConfig(level=logging.INFO)
class TEI2LossyJSONConverter:
"""Converter that can operate in two modes:
- non-streaming (backwards-compatible): returns a full document dict for a single file
- streaming: yields passages one by one to keep memory usage low when processing many files
The class also provides utilities to process a directory of TEI files in parallel and in batches.
"""
def __init__(self, validate_refs: bool = True):
self.validate_refs = validate_refs
def convert_tei_file(self, tei_file: Union[Path, BinaryIO], stream: bool = False):
"""Backward-compatible function. If stream=True returns a generator that yields passages (dicts).
If stream=False returns the full document dict (same shape as original function).
"""
# Load with BeautifulSoup but avoid building huge structures when streaming
if hasattr(tei_file, 'read'):
# File-like object (BinaryIO/StringIO)
content = tei_file.read()
if isinstance(content, bytes):
content = content.decode('utf-8')
else:
# Path-like object
with open(tei_file, 'r', encoding='utf-8') as f:
content = f.read()
soup = BeautifulSoup(content, 'xml')
if soup.TEI is None:
logger.warning("%s: The TEI file is not well-formed or empty. Skipping the file.", tei_file)
return None if not stream else iter(())
# Determine passage level early
passage_level = "sentence" if len(soup.find_all("s")) > len(soup.find_all("p")) else "paragraph"
if stream:
# Use generator that yields passages as they are formatted
return self._iter_passages_from_soup(soup, passage_level)
else:
# Build the full document (backward compatible)
document = OrderedDict()
document['level'] = passage_level
biblio_structure = OrderedDict()
document['biblio'] = biblio_structure
text_structure = []
document['body_text'] = text_structure
figures_and_tables = []
document['figures_and_tables'] = figures_and_tables
references_structure = []
document['references'] = references_structure
# Populate header and body using the same traversal used by the generator
for child in soup.TEI.children:
if child.name == 'teiHeader':
# Header parsing mirrors original behavior
title_node = child.find("title", attrs={"type": "main", "level": "a"})
biblio_structure["title"] = title_node.text if title_node else ""
biblio_structure["authors"] = list(
filter(
lambda x: x.strip() != "",
[
" ".join(
[
author.find('forename').text if author.find('forename') is not None else "",
author.find('surname').text if author.find('surname') is not None else ""
]
) for author in child.find_all("author")
]
)
)
doi_node = child.find("idno", type="DOI")
if doi_node:
biblio_structure['doi'] = doi_node.text
md5_node = child.find("idno", type="MD5")
if md5_node:
biblio_structure['hash'] = md5_node.text
pmc_idno = child.find("idno", type="PMC")
if pmc_idno:
biblio_structure['pmc'] = pmc_idno.text
pub_date = child.find("date", attrs={"type": "published"})
if pub_date:
iso_date = pub_date.attrs.get("when")
if iso_date:
biblio_structure["publication_date"] = iso_date
try:
year = dateparser.parse(iso_date).year
biblio_structure["publication_year"] = year
except Exception:
pass
publisherStmt = child.find("publicationStmt")
publisher_node = publisherStmt.find("publisher") if publisherStmt else None
if publisher_node:
biblio_structure["publisher"] = publisher_node.text
journal_node = child.find("title", attrs={"type": "main", "level": "j"})
if journal_node:
biblio_structure["journal"] = journal_node.text
journal_abbr_node = child.find("title", attrs={"type": "abbr", "level": "j"})
if journal_abbr_node:
biblio_structure["journal_abbr"] = journal_abbr_node.text
abstract_node = child.find("abstract")
if abstract_node:
abstract_paragraph_nodes = abstract_node.find_all("p")
if passage_level == "sentence":
biblio_structure["abstract"] = [
[
{
"id": sentence.get("xml:id") if sentence.has_attr("xml:id") else id,
"text": sentence.text,
"coords": [
box_to_dict(coord.split(","))
for coord in sentence['coords'].split(";")
] if sentence.has_attr("coords") else [],
"refs": get_refs_with_offsets(sentence)
}
for id, sentence in enumerate(paragraph.find_all("s"))
]
for paragraph in abstract_paragraph_nodes
]
else:
biblio_structure["abstract"] = [
{
"id": id,
"text": paragraph.text,
"coords": [
box_to_dict(coord.split(","))
for coord in paragraph['coords'].split(";")
] if paragraph.has_attr("coords") else [],
"refs": get_refs_with_offsets(paragraph)
}
for id, paragraph in enumerate(abstract_paragraph_nodes)
]
elif child.name == 'text':
# Collect body_text using the generator to avoid duplicating logic
for passage in self._iter_passages_from_soup_for_text(child, passage_level):
text_structure.append(passage)
# Collect figures and tables (kept in memory as they should be relatively small)
figures_and_tables_xml = child.find_all("figure")
for item in figures_and_tables_xml:
item_id = item.attrs.get("xml:id") if item.has_attr("xml:id") else get_random_id()
desc = item.figDesc
head = item.head
label = item.label
if item.has_attr("type") and item.attrs["type"] == "table":
json_content = xml_table_to_json(item.table) if item.table else None
note = item.note
figures_and_tables.append(
{
"id": item_id,
"label": label.text if label else "",
"head": head.text if head else "",
"type": "table",
"desc": desc.text if desc else "",
"content": json_content,
"note": note.text if note else "",
"coords": [
box_to_dict(coord.split(","))
for coord in item['coords'].split(";")
] if item.has_attr("coords") else []
}
)
else:
graphic_coords = item.graphic.attrs['coords'] if item.graphic and item.graphic.has_attr(
"coords") else None
figures_and_tables.append(
{
"id": item_id,
"label": label.text if label else "",
"head": head.text if head else "",
"type": "figure",
"desc": desc.text if desc else "",
"note": item.note.text if item.note else "",
"coords": [
box_to_dict(coord.split(","))
for coord in graphic_coords.split(";")
] if graphic_coords else []
}
)
# Extract references from listBibl with comprehensive processing
list_bibl = soup.find("listBibl")
if list_bibl:
for i, bibl_struct in enumerate(list_bibl.find_all("biblStruct"), 1):
ref_data = self._extract_comprehensive_reference_data(bibl_struct, i)
if ref_data:
references_structure.append(ref_data)
return document
def _extract_comprehensive_reference_data(self, bibl_struct: Tag, index: int) -> Dict:
"""
Extract detailed bibliographic information from TEI biblStruct elements.
Implements comprehensive parsing for all standard TEI bibliographic components.
"""
citation_data = OrderedDict()
citation_data['id'] = f"b{index}"
# Extract reference identifier if present
xml_id = bibl_struct.get('{http://www.w3.org/XML/1998/namespace}id') or bibl_struct.get('xml:id')
if xml_id:
citation_data['target'] = xml_id
# Initialize containers for different types of content
contributor_list = []
publication_metadata = {}
identifier_collection = {}
supplementary_info = []
link_references = []
# 1. Process analytic level information (article/conference paper content)
analytic_section = bibl_struct.find("analytic")
if analytic_section:
# Extract title information from analytic level
analytic_titles = analytic_section.find_all("title")
for title_element in analytic_titles:
title_level = title_element.get("level", "")
title_content = self._clean_text(title_element.get_text())
if title_content:
if title_level == "a":
citation_data['title'] = title_content
elif title_level == "j":
publication_metadata['journal'] = title_content
# Extract author information from analytic level
analytic_authors = analytic_section.find_all("author")
for author_element in analytic_authors:
author_info = self._extract_contributor_details(author_element)
if author_info:
contributor_list.append(author_info)
# Handle reference elements within analytic section
analytic_ref = analytic_section.find("ref")
if analytic_ref:
ref_content = self._clean_text(analytic_ref.get_text())
if ref_content:
citation_data['reference_text'] = ref_content
if analytic_ref.get('target'):
citation_data['reference_uri'] = analytic_ref.get('target')
# Process identifier elements in analytic section
analytic_identifiers = analytic_section.find_all("idno")
for identifier_element in analytic_identifiers:
self._process_identifier_element(identifier_element, identifier_collection, 'analytic')
# Process pointer elements in analytic section
analytic_pointers = analytic_section.find_all("ptr")
for pointer_element in analytic_pointers:
self._process_pointer_element(pointer_element, link_references)
# 2. Process monograph level information (book/journal publication details)
monograph_section = bibl_struct.find("monogr")
if monograph_section:
# Extract title information from monograph level
monograph_titles = monograph_section.find_all("title")
for title_element in monograph_titles:
title_level = title_element.get("level", "")
title_content = self._clean_text(title_element.get_text())
if title_content:
if title_level == "m" and not citation_data.get('title'):
citation_data['title'] = title_content # Book title
elif title_level == "j" and not publication_metadata.get('journal'):
publication_metadata['journal'] = title_content
elif title_level == "s":
publication_metadata['series'] = title_content
# Extract contributors from monograph level (authors/editors)
monograph_contributors = monograph_section.find_all(["author", "editor"])
for contributor_element in monograph_contributors:
contributor_info = self._extract_contributor_details(contributor_element)
if contributor_info:
if contributor_element.name == "editor":
contributor_info['role'] = 'editor'
contributor_list.append(contributor_info)
# Extract imprint information (publication details)
imprint_section = monograph_section.find("imprint")
if imprint_section:
self._process_imprint_details(imprint_section, publication_metadata)
# Process identifier elements in monograph section
monograph_identifiers = monograph_section.find_all("idno")
for identifier_element in monograph_identifiers:
self._process_identifier_element(identifier_element, identifier_collection, 'monograph')
# Process pointer elements in monograph section
monograph_pointers = monograph_section.find_all("ptr")
for pointer_element in monograph_pointers:
self._process_pointer_element(pointer_element, link_references)
# 3. Process series level information
series_section = bibl_struct.find("series")
if series_section:
series_titles = series_section.find_all("title")
for title_element in series_titles:
title_content = self._clean_text(title_element.get_text())
if title_content and not publication_metadata.get('series'):
publication_metadata['series'] = title_content
series_contributors = series_section.find_all(["author", "editor"])
for contributor_element in series_contributors:
contributor_info = self._extract_contributor_details(contributor_element)
if contributor_info:
contributor_info['role'] = contributor_element.name
contributor_list.append(contributor_info)
# 4. Process top-level identifiers within biblStruct
top_level_identifiers = bibl_struct.find_all("idno")
for identifier_element in top_level_identifiers:
self._process_identifier_element(identifier_element, identifier_collection, 'biblstruct')
# 5. Process notes and supplementary information
note_elements = bibl_struct.find_all("note")
for note_element in note_elements:
note_content = self._clean_text(note_element.get_text())
note_type = note_element.get("type", "")
if note_content:
if note_type == "raw_reference":
citation_data['raw_reference'] = note_content
elif note_type:
citation_data[f'note_{note_type}'] = note_content
else:
supplementary_info.append(note_content)
# 6. Process pointer elements at biblStruct level
biblstruct_pointers = bibl_struct.find_all("ptr")
for pointer_element in biblstruct_pointers:
self._process_pointer_element(pointer_element, link_references)
# 7. Compile extracted information into final citation structure
self._compile_citation_data(citation_data, contributor_list, publication_metadata,
identifier_collection, supplementary_info, link_references)
# Ensure we have meaningful content before returning
if self._validate_citation_content(citation_data):
return citation_data
return None
def _extract_contributor_details(self, contributor_element: Tag) -> Dict:
"""Extract detailed information about authors, editors, and other contributors."""
contributor_info = {}
# Extract name components
surname_element = contributor_element.find("surname")
forename_element = contributor_element.find("forename")
if surname_element and forename_element:
surname_text = self._clean_text(surname_element.get_text())
forename_text = self._clean_text(forename_element.get_text())
contributor_info['name'] = f"{forename_text} {surname_text}"
contributor_info['surname'] = surname_text
contributor_info['forename'] = forename_text
elif surname_element:
surname_text = self._clean_text(surname_element.get_text())
contributor_info['name'] = surname_text
contributor_info['surname'] = surname_text
elif forename_element:
forename_text = self._clean_text(forename_element.get_text())
contributor_info['name'] = forename_text
contributor_info['forename'] = forename_text
else:
# Fallback to full text content
full_name = self._clean_text(contributor_element.get_text())
if full_name:
contributor_info['name'] = full_name
# Extract affiliation information
affiliation_element = contributor_element.find("affiliation")
if affiliation_element:
affiliation_text = self._clean_text(affiliation_element.get_text())
if affiliation_text:
contributor_info['affiliation'] = affiliation_text
return contributor_info if contributor_info.get('name') else None
def _process_identifier_element(self, identifier_element: Tag, identifier_collection: Dict, level: str):
"""Process identifier elements (DOI, ISBN, ISSN, etc.) and organize by type and level."""
identifier_text = self._clean_text(identifier_element.get_text())
identifier_type = identifier_element.get("type", "").lower()
if identifier_text:
# Create level-specific container if it doesn't exist
level_key = f"{level}_identifiers"
if level_key not in identifier_collection:
identifier_collection[level_key] = {}
# Store identifier by type
if identifier_type:
identifier_collection[level_key][identifier_type] = identifier_text
else:
identifier_collection[level_key]['unknown'] = identifier_text
def _process_pointer_element(self, pointer_element: Tag, link_references: list):
"""Process pointer elements that contain external links."""
pointer_target = pointer_element.get("target", "").strip()
if pointer_target:
link_references.append(pointer_target)
def _process_imprint_details(self, imprint_element: Tag, publication_metadata: Dict):
"""Extract and process imprint information including publisher, dates, and page ranges."""
# Extract publisher information
publisher_elements = imprint_element.find_all("publisher")
for publisher_element in publisher_elements:
publisher_name = self._clean_text(publisher_element.get_text())
if publisher_name:
publication_metadata['publisher'] = publisher_name
publisher_location = publisher_element.get("from")
if publisher_location:
publication_metadata['publisher_location'] = publisher_location
# Extract date information
date_elements = imprint_element.find_all("date")
for date_element in date_elements:
date_type = date_element.get("type", "")
date_content = self._clean_text(date_element.get_text())
date_when = date_element.get("when")
if date_when:
publication_metadata['publication_date'] = date_when
# Extract year from ISO date
year_match = re.search(r'\b(19|20)\d{2}\b', date_when)
if year_match:
publication_metadata['year'] = int(year_match.group())
elif date_content:
if date_type:
publication_metadata[f'date_{date_type}'] = date_content
else:
publication_metadata['publication_date_text'] = date_content
# Try to extract year from text
year_match = re.search(r'\b(19|20)\d{2}\b', date_content)
if year_match:
publication_metadata['year'] = int(year_match.group())
# Extract bibliographic scope information (pages, volume, issue)
scope_elements = imprint_element.find_all("biblScope")
for scope_element in scope_elements:
scope_unit = scope_element.get("unit", "")
scope_text = self._clean_text(scope_element.get_text())
scope_from = scope_element.get("from")
scope_to = scope_element.get("to")
if scope_unit == "page":
if scope_from:
publication_metadata['page_start'] = scope_from
if scope_to:
publication_metadata['page_end'] = scope_to
if scope_text and not scope_from and not scope_to:
publication_metadata['pages'] = scope_text
elif scope_unit in ["volume", "vol"]:
publication_metadata['volume'] = scope_text
elif scope_unit in ["issue", "num"]:
publication_metadata['issue'] = scope_text
elif scope_unit == "chapter":
publication_metadata['chapter'] = scope_text
def _compile_citation_data(self, citation_data: Dict, contributors: list,
publication_metadata: Dict, identifiers: Dict,
supplementary_info: list, links: list):
"""Compile all extracted information into the final citation structure."""
# Process contributors
if contributors:
authors = [c for c in contributors if c.get('role') != 'editor']
editors = [c for c in contributors if c.get('role') == 'editor']
if authors:
if len(authors) == 1:
citation_data['authors'] = authors[0]['name']
else:
citation_data['authors'] = [author['name'] for author in authors]
if editors:
if len(editors) == 1:
citation_data['editors'] = editors[0]['name']
else:
citation_data['editors'] = [editor['name'] for editor in editors]
# Merge publication metadata
for key, value in publication_metadata.items():
if value:
citation_data[key] = value
# Merge identifier information
for level, level_identifiers in identifiers.items():
for id_type, id_value in level_identifiers.items():
# Prioritize common identifier types at top level
if id_type in ['doi', 'isbn', 'issn', 'pmc', 'pmid', 'arxiv']:
citation_data[id_type] = id_value
else:
# Store other identifiers in nested structure
if 'identifiers' not in citation_data:
citation_data['identifiers'] = {}
citation_data['identifiers'][f"{level}_{id_type}"] = id_value
# Add supplementary information
if supplementary_info:
if len(supplementary_info) == 1:
citation_data['notes'] = supplementary_info[0]
else:
citation_data['notes'] = supplementary_info
# Add link references
if links:
if len(links) == 1:
citation_data['url'] = links[0]
else:
citation_data['urls'] = links
def _validate_citation_content(self, citation_data: Dict) -> bool:
"""Validate that the citation contains meaningful information."""
# Check for essential bibliographic elements
essential_elements = ['title', 'authors', 'journal', 'doi', 'isbn', 'issn', 'pmc', 'pmid']
# Check if any essential element is present
has_essential = any(citation_data.get(element) for element in essential_elements)
# Check for fallback elements
has_fallback = any(citation_data.get(element) for element in ['raw_reference', 'reference_text'])
return has_essential or has_fallback
def _extract_person_data(self, person_element: Tag) -> Dict:
"""
Extract person data (author/editor) from TEI persName or author elements.
Handles various name formats and affiliations.
"""
person_data = {}
# Try different name extraction methods
forename = person_element.find("forename")
surname = person_element.find("surname")
if forename and surname:
# Standard format: forename + surname
forename_text = self._clean_text(forename.get_text())
surname_text = self._clean_text(surname.get_text())
person_data['name'] = f"{forename_text} {surname_text}"
person_data['forename'] = forename_text
person_data['surname'] = surname_text
elif surname:
# Surname only
surname_text = self._clean_text(surname.get_text())
person_data['name'] = surname_text
person_data['surname'] = surname_text
elif forename:
# Forename only
forename_text = self._clean_text(forename.get_text())
person_data['name'] = forename_text
person_data['forename'] = forename_text
else:
# Try to get name from full text content
full_name = self._clean_text(person_element.get_text())
if full_name:
person_data['name'] = full_name
# Try to parse into components
name_parts = full_name.split()
if len(name_parts) >= 2:
person_data['surname'] = name_parts[-1]
person_data['forename'] = " ".join(name_parts[:-1])
# Extract affiliation if present
affiliation = person_element.find("affiliation")
if affiliation:
aff_text = self._clean_text(affiliation.get_text())
if aff_text:
person_data['affiliation'] = aff_text
# Try to extract institution and location
# Look for common patterns like "Institution, City, Country"
parts = [part.strip() for part in aff_text.split(',') if part.strip()]
if len(parts) >= 1:
person_data['institution'] = parts[0]
if len(parts) >= 2:
person_data['location'] = ", ".join(parts[1:])
return person_data if person_data.get('name') else None
def _clean_text(self, text: str) -> str:
"""
Clean and normalize text content to handle encoding issues and extra whitespace.
"""
if not text:
return ""
# Handle common encoding issues
if isinstance(text, bytes):
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
try:
text = text.decode('latin-1')
except UnicodeDecodeError:
text = text.decode('utf-8', errors='ignore')
# Normalize whitespace and strip
text = re.sub(r'\s+', ' ', text.strip())
# Remove any potential XML/HTML entities
text = html.unescape(text)
return text
def _iter_passages_from_soup(self, soup: BeautifulSoup, passage_level: str) -> Iterator[Dict[str, Union[str, Dict[str, str]]]]:
"""Yield formatted passages discovered in the TEI soup. This yields the same structures
as get_formatted_passage but one at a time to keep memory usage low."""
for child in soup.TEI.children:
if child.name == 'text':
for passage in self._iter_passages_from_soup_for_text(child, passage_level):
yield passage
def _iter_passages_from_soup_for_text(self, text_node: Tag, passage_level: str) -> Iterator[Dict[str, Union[str, Dict[str, str]]]]:
head_paragraph = None
# Process body and back sections
for section in text_node.find_all(['body', 'back']):
# Only get direct child divs of this section (handle namespace variants)
div_nodes = []
for child in section.children:
if hasattr(child, 'name') and child.name:
# Handle both namespaced and non-namespaced divs
if child.name == "div" or child.name.endswith(":div"):
div_nodes.append(child)
for id_div, div in enumerate(div_nodes):
# Skip references div as it's handled separately
if div.get("type") == "references":
continue
div_type = div.get("type")
# Process this div and potentially nested divs
for passage in self._process_div_with_nested_content(div, passage_level, head_paragraph):
yield passage
def _process_div_with_nested_content(self, div: Tag, passage_level: str, head_paragraph: str = None) -> Iterator[Dict[str, Union[str, Dict[str, str]]]]:
"""
Process a div and its nested content, handling various back section types.
Supports nested divs for complex back sections like annex with multiple subsections.
"""
head = div.find("head")
p_nodes = div.find_all("p")
head_section = None
current_head_paragraph = None
# Check if this div has nested divs first (handle namespace variants)
nested_divs = []
for child in div.children:
if hasattr(child, 'name') and child.name:
# Handle both namespaced and non-namespaced divs
if child.name == "div" or child.name.endswith(":div"):
nested_divs.append(child)
# Count only direct child paragraphs, not those in nested divs
direct_p_nodes = [child for child in div.children if hasattr(child, 'name') and child.name == "p"]
if len(nested_divs) > 0 and len(direct_p_nodes) == 0:
# This is a container div - process each nested div independently
for nested_div in nested_divs:
# Skip references divs
if nested_div.get("type") == "references":
continue
# Pass None as head_paragraph to ensure nested divs use their own headers
for passage in self._process_div_with_nested_content(nested_div, passage_level, None):
yield passage
return # Don't process this div further
# Determine the section header and content type for divs with content
if head:
if len(direct_p_nodes) == 0:
# This div has only a head, no paragraphs (standalone head)
current_head_paragraph = self._clean_text(head.get_text())
else:
# This div has both head and paragraphs - head is the section header
head_section = self._clean_text(head.get_text())
else:
# If no head element, try to use the type attribute as head_section
div_type = div.get("type")
if div_type:
# Handle specific div types with appropriate section names
if div_type == "acknowledgement":
head_section = "Acknowledgements"
elif div_type == "conflict":
head_section = "Conflicts of Interest"
elif div_type == "contribution":
head_section = "Author Contributions"
elif div_type == "availability":
# Only set as default if this div has its own content
if len(direct_p_nodes) > 0:
head_section = "Data Availability"
elif div_type == "annex":
head_section = "Annex"
else:
# Generic handling - capitalize and format
head_section = div_type.replace("_", " ").title()
# Process paragraphs in this div
if len(direct_p_nodes) > 0:
for id_p, p in enumerate(direct_p_nodes):
paragraph_id = get_random_id(prefix="p_")
if passage_level == "sentence":
for id_s, sentence in enumerate(p.find_all("s")):
struct = get_formatted_passage(current_head_paragraph or head_paragraph, head_section, paragraph_id, sentence)
if self.validate_refs:
for ref in struct['refs']:
assert ref['offset_start'] < ref['offset_end'], "Wrong offsets"
assert struct['text'][ref['offset_start']:ref['offset_end']] == ref['text'], "Cannot apply offsets"
yield struct
else:
struct = get_formatted_passage(current_head_paragraph or head_paragraph, head_section, paragraph_id, p)
if self.validate_refs:
for ref in struct['refs']:
assert ref['offset_start'] < ref['offset_end'], "Wrong offsets"
assert struct['text'][ref['offset_start']:ref['offset_end']] == ref['text'], "Cannot apply offsets"
yield struct
# Update head_paragraph for potential next div
if current_head_paragraph is not None:
head_paragraph = current_head_paragraph
def process_directory(self, directory: Union[str, Path], pattern: str = "*.tei.xml", parallel: bool = True, workers: int = None) -> Iterator[Dict]:
"""Process a directory of TEI files and yield converted documents.
When parallel=True this uses ProcessPoolExecutor to parallelize file-level conversion.
Each yielded item is a dict with keys: 'path' and 'document' (document may be None on parse error).
"""
directory = Path(directory)
files = list(directory.rglob(pattern))
if not parallel or len(files) <= 1:
for f in files:
yield {"path": f, "document": self.convert_tei_file(f, stream=False)}
return
# Use processes for CPU-bound parsing when many files are available
workers = workers or min(32, (os.cpu_count() or 1))
with ProcessPoolExecutor(max_workers=workers) as ex:
futures = {ex.submit(_convert_file_worker, str(f)): f for f in files}
for fut in as_completed(futures):
f = futures[fut]
try:
doc = fut.result()
except Exception:
logger.exception("Error processing %s", f)
doc = None
yield {"path": f, "document": doc}
def _convert_file_worker(path: str):
"""Worker used by ProcessPoolExecutor. Imports inside function to avoid pickling issues."""
from bs4 import BeautifulSoup
# Reuse existing top-level helpers from this module by importing here
with open(path, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, 'xml')
converter = TEI2LossyJSONConverter()
return converter.convert_tei_file(path, stream=False)
def box_to_dict(coord_list):
"""Convert coordinate list to dictionary format."""
if len(coord_list) >= 4:
return {
"x": float(coord_list[0]),
"y": float(coord_list[1]),
"width": float(coord_list[2]),
"height": float(coord_list[3])
}
return {}
def get_random_id(prefix=""):
"""Generate a random ID with optional prefix."""
return f"{prefix}{uuid.uuid4().hex[:8]}"
def get_refs_with_offsets(element):
"""Extract references with their text offsets from an element."""
refs = []
# Apply the same text cleaning as get_formatted_passage
def _clean_text(text: str) -> str:
if not text:
return ""
import re
import html
text = re.sub(r'\s+', ' ', text.strip())
text = html.unescape(text)
return text
# Now extract references with offsets based on the cleaned text
def traverse_and_collect(node, current_pos=0):
"""
Recursively traverse the DOM tree, building cleaned text content and tracking exact positions.
Returns tuple: (text_content, next_position)
"""
if hasattr(node, 'name') and node.name:
# This is an element node
if node.name == "ref" and node.get("type") == "bibr":
# Found a reference - get its cleaned text and record its exact position
ref_text = _clean_text(node.get_text())
if ref_text: # Only record non-empty references
refs.append({
"type": node.get("type", ""),
"target": node.get("target", ""),
"text": ref_text,
"offset_start": current_pos,
"offset_end": current_pos + len(ref_text)
})
# Return the cleaned reference text and advance position
return ref_text, current_pos + len(ref_text)
else:
# Process children in document order and accumulate their cleaned text
text_parts = []
pos = current_pos
for child in node.children:
child_text, new_pos = traverse_and_collect(child, pos)
if child_text is not None:
text_parts.append(child_text)
pos = new_pos
return "".join(text_parts), pos
else:
# This is a text node (NavigableString) - be more careful with cleaning
text_content = str(node)
# For text nodes, we need to be more careful about whitespace
# Only apply the full cleaning at the end for the complete text
return text_content, current_pos + len(text_content)
# Build raw text with accurate positions first
raw_text, _ = traverse_and_collect(element, 0)
# Now apply the same cleaning as get_formatted_passage to the complete text
final_text = _clean_text(raw_text)
# Adjust all reference offsets to match the cleaned text
final_refs = []
for ref in refs:
# Find the reference text in the cleaned text to get correct offsets
ref_text = ref['text']
# The reference text was also cleaned, so we need to find it in the final cleaned text
# We can search around the original position to find the correct occurrence
search_start = max(0, ref['offset_start'] - 10) # Look a bit before the original position
search_end = min(len(final_text), ref['offset_end'] + 10) # Look a bit after
search_area = final_text[search_start:search_end]
# Find the reference in the search area
relative_pos = search_area.find(ref_text)
if relative_pos != -1:
final_start = search_start + relative_pos
final_end = final_start + len(ref_text)
final_refs.append({
"type": ref["type"],
"target": ref["target"],
"text": ref_text,
"offset_start": final_start,
"offset_end": final_end
})
return final_refs
def get_formatted_passage(head_paragraph, head_section, paragraph_id, element):
"""Format a passage (paragraph or sentence) with metadata and references."""
# Import the clean_text method
def _clean_text_local(text: str) -> str:
if not text:
return ""
import re
import html
text = re.sub(r'\s+', ' ', text.strip())
text = html.unescape(text)
return text
text = _clean_text_local(element.get_text())
refs = get_refs_with_offsets(element)
passage = {
"id": paragraph_id,
"text": text,
"coords": [
box_to_dict(coord.split(","))
for coord in element.get("coords", "").split(";")
] if element.has_attr("coords") else [],
"refs": refs
}
if head_paragraph:
passage["head_paragraph"] = head_paragraph
if head_section:
passage["head_section"] = head_section
return passage
def xml_table_to_markdown(table_element):
"""Convert XML table to markdown format."""
if not table_element:
return None
markdown_lines = []
# Process table rows
for row in table_element.find_all("row"):
cells = []
for cell in row.find_all("cell"):
cell_text = cell.get_text().strip()
cells.append(cell_text)
if cells:
markdown_lines.append("| " + " | ".join(cells) + " |")
return "\n".join(markdown_lines) if markdown_lines else None
def xml_table_to_json(table_element):
"""Convert XML table to JSON format."""
if not table_element:
return None
table_data = {
"headers": [],
"rows": [],
"metadata": {}
}
# Check if table has a header row (thead)
thead = table_element.find("thead")
if thead:
header_row = thead.find("row")
if header_row:
for cell in header_row.find_all("cell"):
cell_text = cell.get_text().strip()
table_data["headers"].append(cell_text)
# Process table body rows
tbody = table_element.find("tbody")
if tbody:
rows = tbody.find_all("row")
else:
# If no tbody, get all rows
rows = table_element.find_all("row")
# Skip first row if we already processed it as header
if thead and rows:
rows = rows[1:]
for row in rows:
row_data = []
for cell in row.find_all("cell"):
cell_text = cell.get_text().strip()
row_data.append(cell_text)
if row_data:
table_data["rows"].append(row_data)
# Add metadata
table_data["metadata"] = {
"row_count": len(table_data["rows"]),
"column_count": len(table_data["headers"]) if table_data["headers"] else (len(table_data["rows"][0]) if table_data["rows"] else 0),