unstructured

Форк
0
546 строк · 18.6 Кб
1
# pyright: reportPrivateUsage=false
2

3
from __future__ import annotations
4

5
import json
6
import os
7
import pathlib
8
from typing import Optional, Type
9

10
import pytest
11
from pytest_mock import MockerFixture
12

13
from test_unstructured.unit_utils import assert_round_trips_through_JSON, example_doc_path
14
from unstructured.chunking.title import chunk_by_title
15
from unstructured.cleaners.core import group_broken_paragraphs
16
from unstructured.documents.elements import Address, ListItem, NarrativeText, Title
17
from unstructured.partition.text import (
18
    _combine_paragraphs_less_than_min,
19
    _split_content_to_fit_max,
20
    partition_text,
21
)
22
from unstructured.partition.utils.constants import UNSTRUCTURED_INCLUDE_DEBUG_METADATA
23

24
DIRECTORY = pathlib.Path(__file__).parent.resolve()
25
EXAMPLE_DOCS_DIRECTORY = os.path.join(DIRECTORY, "..", "..", "example-docs")
26

27
EXPECTED_OUTPUT = [
28
    NarrativeText(text="This is a test document to use for unit tests."),
29
    Address(text="Doylestown, PA 18901"),
30
    Title(text="Important points:"),
31
    ListItem(text="Hamburgers are delicious"),
32
    ListItem(text="Dogs are the best"),
33
    ListItem(text="I love fuzzy blankets"),
34
]
35

36
MIN_MAX_TEXT = """This is a story. This is a story that doesn't matter
37
 because it is just being used as an example. Hi. Hello. Howdy. Hola.
38
 The example is simple and repetitive and long and somewhat boring,
39
 but it serves a purpose. End.""".replace(
40
    "\n",
41
    "",
42
)
43

44
SHORT_PARAGRAPHS = """This is a story.
45

46
This is a story that doesn't matter because it is just being used as an example.
47

48
Hi.
49

50
Hello.
51

52
Howdy.
53

54
Hola.
55

56
The example is simple and repetitive and long and somewhat boring, but it serves a purpose.
57

58
End.
59
"""
60

61

62
@pytest.mark.parametrize(
63
    ("filename", "encoding"),
64
    [
65
        ("fake-text.txt", "utf-8"),
66
        ("fake-text.txt", None),
67
        ("fake-text-utf-16-be.txt", "utf-16-be"),
68
    ],
69
)
70
def test_partition_text_from_filename(filename: str, encoding: Optional[str]):
71
    filename_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
72
    elements = partition_text(filename=filename_path, encoding=encoding)
73
    assert len(elements) > 0
74
    assert elements == EXPECTED_OUTPUT
75
    for element in elements:
76
        assert element.metadata.filename == filename
77
    if UNSTRUCTURED_INCLUDE_DEBUG_METADATA:
78
        assert {element.metadata.detection_origin for element in elements} == {"text"}
79

80

81
def test_partition_text_from_filename_with_metadata_filename():
82
    filename_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
83
    elements = partition_text(
84
        filename=filename_path,
85
        encoding="utf-8",
86
        metadata_filename="test",
87
    )
88
    assert elements == EXPECTED_OUTPUT
89
    for element in elements:
90
        assert element.metadata.filename == "test"
91

92

93
@pytest.mark.parametrize(
94
    "filename",
95
    ["fake-text-utf-16.txt", "fake-text-utf-16-le.txt", "fake-text-utf-32.txt"],
96
)
97
def test_partition_text_from_filename_default_encoding(filename: str):
98
    filename_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
99
    elements = partition_text(filename=filename_path)
100
    assert len(elements) > 0
101
    assert elements == EXPECTED_OUTPUT
102
    for element in elements:
103
        assert element.metadata.filename == filename
104

105

106
@pytest.mark.parametrize(
107
    ("filename", "encoding", "error"),
108
    [
109
        ("fake-text.txt", "utf-16", UnicodeDecodeError),
110
        ("fake-text-utf-16-be.txt", "utf-16", UnicodeError),
111
    ],
112
)
113
def test_partition_text_from_filename_raises_econding_error(
114
    filename: str,
115
    encoding: Optional[str],
116
    error: Type[BaseException],
117
):
118
    with pytest.raises(error):
119
        filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
120
        partition_text(filename=filename, encoding=encoding)
121

122

123
def test_partition_text_from_file():
124
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
125
    with open(filename, "rb") as f:
126
        elements = partition_text(file=f)
127
    assert len(elements) > 0
128
    assert elements == EXPECTED_OUTPUT
129
    for element in elements:
130
        assert element.metadata.filename is None
131

132

133
def test_partition_text_from_file_with_metadata_filename():
134
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
135
    with open(filename, "rb") as f:
136
        elements = partition_text(file=f, metadata_filename="test")
137
    assert len(elements) > 0
138
    assert elements == EXPECTED_OUTPUT
139
    for element in elements:
140
        assert element.metadata.filename == "test"
141

142

143
@pytest.mark.parametrize(
144
    "filename",
145
    ["fake-text-utf-16.txt", "fake-text-utf-16-le.txt", "fake-text-utf-32.txt"],
146
)
147
def test_partition_text_from_file_default_encoding(filename: str):
148
    filename_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
149
    with open(filename_path, "rb") as f:
150
        elements = partition_text(file=f)
151
    assert len(elements) > 0
152
    assert elements == EXPECTED_OUTPUT
153
    for element in elements:
154
        assert element.metadata.filename is None
155

156

157
def test_partition_text_from_bytes_file():
158
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
159
    with open(filename, "rb") as f:
160
        elements = partition_text(file=f)
161
    assert len(elements) > 0
162
    assert elements == EXPECTED_OUTPUT
163
    for element in elements:
164
        assert element.metadata.filename is None
165

166

167
@pytest.mark.parametrize(
168
    "filename",
169
    ["fake-text-utf-16.txt", "fake-text-utf-16-le.txt", "fake-text-utf-32.txt"],
170
)
171
def test_partition_text_from_bytes_file_default_encoding(filename: str):
172
    filename_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
173
    with open(filename_path, "rb") as f:
174
        elements = partition_text(file=f)
175
    assert len(elements) > 0
176
    assert elements == EXPECTED_OUTPUT
177
    for element in elements:
178
        assert element.metadata.filename is None
179

180

181
def test_text_partition_element_metadata_user_provided_languages():
182
    filename = "example-docs/book-war-and-peace-1p.txt"
183
    elements = partition_text(filename=filename, strategy="fast", languages=["en"])
184
    assert elements[0].metadata.languages == ["eng"]
185

186

187
def test_partition_text_from_text():
188
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
189
    with open(filename) as f:
190
        text = f.read()
191
    elements = partition_text(text=text)
192
    assert len(elements) > 0
193
    assert elements == EXPECTED_OUTPUT
194
    for element in elements:
195
        assert element.metadata.filename is None
196

197

198
def test_partition_text_from_text_works_with_empty_string():
199
    assert partition_text(text="") == []
200

201

202
def test_partition_text_raises_with_none_specified():
203
    with pytest.raises(ValueError):
204
        partition_text()
205

206

207
def test_partition_text_raises_with_too_many_specified():
208
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
209
    with open(filename) as f:
210
        text = f.read()
211

212
    with pytest.raises(ValueError):
213
        partition_text(filename=filename, text=text)
214

215

216
def test_partition_text_captures_everything_even_with_linebreaks():
217
    text = """
218
    VERY IMPORTANT MEMO
219
    DOYLESTOWN, PA 18901
220
    """
221
    elements = partition_text(text=text)
222
    assert elements == [
223
        Title(text="VERY IMPORTANT MEMO"),
224
        Address(text="DOYLESTOWN, PA 18901"),
225
    ]
226
    for element in elements:
227
        assert element.metadata.filename is None
228

229

230
def test_partition_text_groups_broken_paragraphs():
231
    text = """The big brown fox
232
was walking down the lane.
233

234
At the end of the lane,
235
the fox met a bear."""
236

237
    elements = partition_text(text=text, paragraph_grouper=group_broken_paragraphs)
238
    assert elements == [
239
        NarrativeText(text="The big brown fox was walking down the lane."),
240
        NarrativeText(text="At the end of the lane, the fox met a bear."),
241
    ]
242
    for element in elements:
243
        assert element.metadata.filename is None
244

245

246
def test_partition_text_extract_regex_metadata():
247
    text = "SPEAKER 1: It is my turn to speak now!"
248

249
    elements = partition_text(text=text, regex_metadata={"speaker": r"SPEAKER \d{1,3}"})
250
    assert elements[0].metadata.regex_metadata == {
251
        "speaker": [{"text": "SPEAKER 1", "start": 0, "end": 9}],
252
    }
253
    for element in elements:
254
        assert element.metadata.filename is None
255

256

257
def test_partition_text_splits_long_text():
258
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
259
    elements = partition_text(filename=filename)
260
    assert len(elements) > 0
261
    assert elements[0].text.startswith("Iwan Roberts")
262
    assert elements[-1].text.endswith("External links")
263

264

265
def test_partition_text_splits_long_text_max_partition():
266
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
267
    elements = partition_text(filename=filename)
268
    elements_max_part = partition_text(filename=filename, max_partition=500)
269
    # NOTE(klaijan) - I edited the operation here from < to <=
270
    # Please revert back if this does not make sense
271
    assert len(elements) <= len(elements_max_part)
272
    for element in elements_max_part:
273
        assert len(element.text) <= 500
274

275
    # Make sure combined text is all the same
276
    assert " ".join([el.text for el in elements]) == " ".join([el.text for el in elements_max_part])
277

278

279
def test_partition_text_splits_max_min_partition():
280
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
281
    elements = partition_text(filename=filename)
282
    elements_max_part = partition_text(filename=filename, min_partition=1000, max_partition=1500)
283
    for i, element in enumerate(elements_max_part):
284
        # NOTE(robinson) - the last element does not have a next element to merge with,
285
        # so it can be short
286
        if i < len(elements_max_part) - 1:
287
            assert len(element.text) <= 1500
288
            assert len(element.text) >= 1000
289

290
    import re
291

292
    from unstructured.nlp.patterns import BULLETS_PATTERN
293

294
    # NOTE(klaijan) - clean the asterik out of both text.
295
    # The `elements` was partitioned by new line and thus makes line 56 (shown below)
296
    # "*Club domestic league appearances and goals"
297
    # be considered as a bullet point by the function is_bulleted_text
298
    # and so the asterik was removed from the paragraph
299
    # whereas `elements_max_part` was partitioned differently and thus none of the line
300
    # starts with any of the BULLETS_PATTERN.
301

302
    # TODO(klaijan) - when edit the function partition_text to support non-bullet paragraph
303
    # that starts with bullet-like BULLETS_PATTERN, remove the re.sub part from the assert below.
304

305
    # Make sure combined text is all the same
306
    assert re.sub(BULLETS_PATTERN, "", " ".join([el.text for el in elements])) == re.sub(
307
        BULLETS_PATTERN,
308
        "",
309
        " ".join([el.text for el in elements_max_part]),
310
    )
311

312

313
def test_partition_text_min_max():
314
    segments = partition_text(text=SHORT_PARAGRAPHS, min_partition=6)
315
    for i, segment in enumerate(segments):
316
        # NOTE(robinson) - the last element does not have a next element to merge with,
317
        # so it can be short
318
        if i < len(segments) - 1:
319
            assert len(segment.text) >= 6
320

321
    segments = partition_text(text=SHORT_PARAGRAPHS, max_partition=20, min_partition=7)
322
    for i, segment in enumerate(segments):
323
        # NOTE(robinson) - the last element does not have a next element to merge with,
324
        # so it can be short
325
        if i < len(segments) - 1:
326
            assert len(segment.text) >= 7
327
            assert len(segment.text) <= 20
328

329

330
def test_split_content_to_fit_max():
331
    segments = _split_content_to_fit_max(
332
        content=MIN_MAX_TEXT,
333
        max_partition=75,
334
    )
335
    assert segments == [
336
        "This is a story.",
337
        "This is a story that doesn't matter because",
338
        "it is just being used as an example. Hi. Hello. Howdy. Hola.",
339
        "The example is simple and repetitive and long",
340
        "and somewhat boring, but it serves a purpose. End.",
341
    ]
342

343

344
def test_combine_paragraphs_less_than_min():
345
    segments = _combine_paragraphs_less_than_min(
346
        SHORT_PARAGRAPHS.split("\n\n"),
347
        max_partition=1500,
348
        min_partition=7,
349
    )
350
    assert len(segments) < len(SHORT_PARAGRAPHS)
351

352

353
def test_partition_text_doesnt_get_page_breaks():
354
    text = "--------------------"
355
    elements = partition_text(text=text)
356
    assert len(elements) == 1
357
    assert elements[0].text == text
358
    assert not isinstance(elements[0], ListItem)
359

360

361
@pytest.mark.parametrize(
362
    ("filename", "encoding"),
363
    [
364
        ("fake-text.txt", "utf-8"),
365
        ("fake-text.txt", None),
366
        ("fake-text-utf-16-be.txt", "utf-16-be"),
367
    ],
368
)
369
def test_partition_text_from_filename_exclude_metadata(filename: str, encoding: Optional[str]):
370
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, filename)
371
    elements = partition_text(
372
        filename=filename,
373
        encoding=encoding,
374
        include_metadata=False,
375
    )
376
    for i in range(len(elements)):
377
        assert elements[i].metadata.to_dict() == {}
378

379

380
def test_partition_text_from_file_exclude_metadata():
381
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
382
    with open(filename, "rb") as f:
383
        elements = partition_text(file=f, include_metadata=False)
384
    for i in range(len(elements)):
385
        assert elements[i].metadata.to_dict() == {}
386

387

388
def test_partition_text_metadata_date(mocker: MockerFixture):
389
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
390
    mocked_last_modification_date = "2029-07-05T09:24:28"
391

392
    mocker.patch(
393
        "unstructured.partition.text.get_last_modified_date",
394
        return_value=mocked_last_modification_date,
395
    )
396

397
    elements = partition_text(
398
        filename=filename,
399
    )
400

401
    assert elements[0].metadata.last_modified == mocked_last_modification_date
402

403

404
def test_partition_text_with_custom_metadata_date(mocker: MockerFixture):
405
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
406
    mocked_last_modification_date = "2029-07-05T09:24:28"
407
    expected_last_modification_date = "2020-07-05T09:24:28"
408

409
    mocker.patch(
410
        "unstructured.partition.text.get_last_modified_date",
411
        return_value=mocked_last_modification_date,
412
    )
413

414
    elements = partition_text(
415
        filename=filename,
416
        metadata_last_modified=expected_last_modification_date,
417
    )
418

419
    assert elements[0].metadata.last_modified == expected_last_modification_date
420

421

422
def test_partition_text_from_file_metadata_date(mocker: MockerFixture):
423
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
424
    mocked_last_modification_date = "2029-07-05T09:24:28"
425

426
    mocker.patch(
427
        "unstructured.partition.text.get_last_modified_date_from_file",
428
        return_value=mocked_last_modification_date,
429
    )
430

431
    with open(filename, "rb") as f:
432
        elements = partition_text(
433
            file=f,
434
        )
435

436
    assert elements[0].metadata.last_modified == mocked_last_modification_date
437

438

439
def test_partition_text_from_file_with_custom_metadata_date(mocker: MockerFixture):
440
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
441
    mocked_last_modification_date = "2029-07-05T09:24:28"
442
    expected_last_modification_date = "2020-07-05T09:24:28"
443

444
    mocker.patch(
445
        "unstructured.partition.text.get_last_modified_date_from_file",
446
        return_value=mocked_last_modification_date,
447
    )
448

449
    with open(filename, "rb") as f:
450
        elements = partition_text(file=f, metadata_last_modified=expected_last_modification_date)
451

452
    assert elements[0].metadata.last_modified == expected_last_modification_date
453

454

455
def test_partition_text_from_text_metadata_date():
456
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
457
    with open(filename) as f:
458
        text = f.read()
459

460
    elements = partition_text(
461
        text=text,
462
    )
463
    assert elements[0].metadata.last_modified is None
464

465

466
def test_partition_text_from_text_with_custom_metadata_date():
467
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "fake-text.txt")
468
    expected_last_modification_date = "2020-07-05T09:24:28"
469

470
    with open(filename) as f:
471
        text = f.read()
472

473
    elements = partition_text(text=text, metadata_last_modified=expected_last_modification_date)
474

475
    assert elements[0].metadata.last_modified == expected_last_modification_date
476

477

478
def test_partition_text_with_unique_ids():
479
    elements = partition_text(text="hello there!")
480
    assert elements[0].id == "c69509590d81db2f37f9d75480c8efed"
481
    # Test that the element is JSON serializable. This should run without an error
482
    json.dumps(elements[0].to_dict())
483

484
    elements = partition_text(text="hello there!", unique_element_ids=True)
485
    id = elements[0].id
486
    assert isinstance(id, str)  # included for type-narrowing
487
    assert len(id) == 36
488
    assert id.count("-") == 4
489
    # Test that the element is JSON serializable. This should run without an error
490
    json.dumps(elements[0].to_dict())
491

492

493
@pytest.mark.parametrize(
494
    ("file_name", "encoding"),
495
    [
496
        ("fake-text.txt", "utf-8"),
497
        ("fake-text.txt", None),
498
        ("fake-text-utf-16-be.txt", "utf-16-be"),
499
    ],
500
)
501
def test_partition_text_with_json(file_name: str, encoding: str | None):
502
    elements = partition_text(example_doc_path(file_name), encoding=encoding)
503
    assert_round_trips_through_JSON(elements)
504

505

506
def test_add_chunking_strategy_on_partition_text():
507
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
508
    elements = partition_text(filename=filename)
509
    chunk_elements = partition_text(filename, chunking_strategy="by_title")
510
    chunks = chunk_by_title(elements)
511
    assert chunk_elements != elements
512
    assert chunk_elements == chunks
513

514

515
def test_partition_text_element_metadata_has_languages():
516
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
517
    elements = partition_text(filename=filename)
518
    assert elements[0].metadata.languages == ["eng"]
519

520

521
def test_partition_text_respects_detect_language_per_element():
522
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "language-docs", "eng_spa_mult.txt")
523
    elements = partition_text(filename=filename, detect_language_per_element=True)
524
    langs = [element.metadata.languages for element in elements]
525
    assert langs == [["eng"], ["spa", "eng"], ["eng"], ["eng"], ["spa"]]
526

527

528
def test_partition_text_respects_languages_arg():
529
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
530
    elements = partition_text(filename=filename, languages=["deu"])
531
    assert elements[0].metadata.languages == ["deu"]
532

533

534
def test_partition_text_element_metadata_raises_TypeError():
535
    with pytest.raises(TypeError):
536
        filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "norwich-city.txt")
537
        partition_text(filename=filename, languages="eng")  # type: ignore
538

539

540
def test_partition_text_detects_more_than_3_languages():
541
    filename = os.path.join(EXAMPLE_DOCS_DIRECTORY, "language-docs", "UDHR_first_article_all.txt")
542
    elements = partition_text(filename=filename, detect_language_per_element=True)
543
    langs = list(
544
        {element.metadata.languages[0] for element in elements if element.metadata.languages},
545
    )
546
    assert len(langs) > 10
547

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.