CSS-LM

Форк
0
/
squad_metrics.py 
763 строки · 28.3 Кб
1
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
2
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
3

4
In addition to basic functionality, we also compute additional statistics and
5
plot precision-recall curves if an additional na_prob.json file is provided.
6
This file is expected to map question ID's to the model's predicted probability
7
that a question is unanswerable.
8
"""
9

10

11
import collections
12
import json
13
import logging
14
import math
15
import re
16
import string
17

18
from transformers.tokenization_bert import BasicTokenizer
19

20

21
logger = logging.getLogger(__name__)
22

23

24
def normalize_answer(s):
25
    """Lower text and remove punctuation, articles and extra whitespace."""
26

27
    def remove_articles(text):
28
        regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
29
        return re.sub(regex, " ", text)
30

31
    def white_space_fix(text):
32
        return " ".join(text.split())
33

34
    def remove_punc(text):
35
        exclude = set(string.punctuation)
36
        return "".join(ch for ch in text if ch not in exclude)
37

38
    def lower(text):
39
        return text.lower()
40

41
    return white_space_fix(remove_articles(remove_punc(lower(s))))
42

43

44
def get_tokens(s):
45
    if not s:
46
        return []
47
    return normalize_answer(s).split()
48

49

50
def compute_exact(a_gold, a_pred):
51
    return int(normalize_answer(a_gold) == normalize_answer(a_pred))
52

53

54
def compute_f1(a_gold, a_pred):
55
    gold_toks = get_tokens(a_gold)
56
    pred_toks = get_tokens(a_pred)
57
    common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
58
    num_same = sum(common.values())
59
    if len(gold_toks) == 0 or len(pred_toks) == 0:
60
        # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
61
        return int(gold_toks == pred_toks)
62
    if num_same == 0:
63
        return 0
64
    precision = 1.0 * num_same / len(pred_toks)
65
    recall = 1.0 * num_same / len(gold_toks)
66
    f1 = (2 * precision * recall) / (precision + recall)
67
    return f1
68

69

70
def get_raw_scores(examples, preds):
71
    """
72
    Computes the exact and f1 scores from the examples and the model predictions
73
    """
74
    exact_scores = {}
75
    f1_scores = {}
76

77
    for example in examples:
78
        qas_id = example.qas_id
79
        gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
80

81
        if not gold_answers:
82
            # For unanswerable questions, only correct answer is empty string
83
            gold_answers = [""]
84

85
        if qas_id not in preds:
86
            print("Missing prediction for %s" % qas_id)
87
            continue
88

89
        prediction = preds[qas_id]
90
        exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
91
        f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
92

93
    return exact_scores, f1_scores
94

95

96
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
97
    new_scores = {}
98
    for qid, s in scores.items():
99
        pred_na = na_probs[qid] > na_prob_thresh
100
        if pred_na:
101
            new_scores[qid] = float(not qid_to_has_ans[qid])
102
        else:
103
            new_scores[qid] = s
104
    return new_scores
105

106

107
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
108
    if not qid_list:
109
        total = len(exact_scores)
110
        return collections.OrderedDict(
111
            [
112
                ("exact", 100.0 * sum(exact_scores.values()) / total),
113
                ("f1", 100.0 * sum(f1_scores.values()) / total),
114
                ("total", total),
115
            ]
116
        )
117
    else:
118
        total = len(qid_list)
119
        return collections.OrderedDict(
120
            [
121
                ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
122
                ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
123
                ("total", total),
124
            ]
125
        )
126

127

128
def merge_eval(main_eval, new_eval, prefix):
129
    for k in new_eval:
130
        main_eval["%s_%s" % (prefix, k)] = new_eval[k]
131

132

133
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
134
    num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
135
    cur_score = num_no_ans
136
    best_score = cur_score
137
    best_thresh = 0.0
138
    qid_list = sorted(na_probs, key=lambda k: na_probs[k])
139
    for i, qid in enumerate(qid_list):
140
        if qid not in scores:
141
            continue
142
        if qid_to_has_ans[qid]:
143
            diff = scores[qid]
144
        else:
145
            if preds[qid]:
146
                diff = -1
147
            else:
148
                diff = 0
149
        cur_score += diff
150
        if cur_score > best_score:
151
            best_score = cur_score
152
            best_thresh = na_probs[qid]
153

154
    has_ans_score, has_ans_cnt = 0, 0
155
    for qid in qid_list:
156
        if not qid_to_has_ans[qid]:
157
            continue
158
        has_ans_cnt += 1
159

160
        if qid not in scores:
161
            continue
162
        has_ans_score += scores[qid]
163

164
    return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
165

166

167
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
168
    best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
169
    best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
170
    main_eval["best_exact"] = best_exact
171
    main_eval["best_exact_thresh"] = exact_thresh
172
    main_eval["best_f1"] = best_f1
173
    main_eval["best_f1_thresh"] = f1_thresh
174
    main_eval["has_ans_exact"] = has_ans_exact
175
    main_eval["has_ans_f1"] = has_ans_f1
176

177

178
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
179
    num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
180
    cur_score = num_no_ans
181
    best_score = cur_score
182
    best_thresh = 0.0
183
    qid_list = sorted(na_probs, key=lambda k: na_probs[k])
184
    for _, qid in enumerate(qid_list):
185
        if qid not in scores:
186
            continue
187
        if qid_to_has_ans[qid]:
188
            diff = scores[qid]
189
        else:
190
            if preds[qid]:
191
                diff = -1
192
            else:
193
                diff = 0
194
        cur_score += diff
195
        if cur_score > best_score:
196
            best_score = cur_score
197
            best_thresh = na_probs[qid]
198
    return 100.0 * best_score / len(scores), best_thresh
199

200

201
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
202
    best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
203
    best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
204

205
    main_eval["best_exact"] = best_exact
206
    main_eval["best_exact_thresh"] = exact_thresh
207
    main_eval["best_f1"] = best_f1
208
    main_eval["best_f1_thresh"] = f1_thresh
209

210

211
def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
212
    qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
213
    has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
214
    no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
215

216
    if no_answer_probs is None:
217
        no_answer_probs = {k: 0.0 for k in preds}
218

219
    exact, f1 = get_raw_scores(examples, preds)
220

221
    exact_threshold = apply_no_ans_threshold(
222
        exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
223
    )
224
    f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
225

226
    evaluation = make_eval_dict(exact_threshold, f1_threshold)
227

228
    if has_answer_qids:
229
        has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
230
        merge_eval(evaluation, has_ans_eval, "HasAns")
231

232
    if no_answer_qids:
233
        no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
234
        merge_eval(evaluation, no_ans_eval, "NoAns")
235

236
    if no_answer_probs:
237
        find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
238

239
    return evaluation
240

241

242
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
243
    """Project the tokenized prediction back to the original text."""
244

245
    # When we created the data, we kept track of the alignment between original
246
    # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
247
    # now `orig_text` contains the span of our original text corresponding to the
248
    # span that we predicted.
249
    #
250
    # However, `orig_text` may contain extra characters that we don't want in
251
    # our prediction.
252
    #
253
    # For example, let's say:
254
    #   pred_text = steve smith
255
    #   orig_text = Steve Smith's
256
    #
257
    # We don't want to return `orig_text` because it contains the extra "'s".
258
    #
259
    # We don't want to return `pred_text` because it's already been normalized
260
    # (the SQuAD eval script also does punctuation stripping/lower casing but
261
    # our tokenizer does additional normalization like stripping accent
262
    # characters).
263
    #
264
    # What we really want to return is "Steve Smith".
265
    #
266
    # Therefore, we have to apply a semi-complicated alignment heuristic between
267
    # `pred_text` and `orig_text` to get a character-to-character alignment. This
268
    # can fail in certain cases in which case we just return `orig_text`.
269

270
    def _strip_spaces(text):
271
        ns_chars = []
272
        ns_to_s_map = collections.OrderedDict()
273
        for (i, c) in enumerate(text):
274
            if c == " ":
275
                continue
276
            ns_to_s_map[len(ns_chars)] = i
277
            ns_chars.append(c)
278
        ns_text = "".join(ns_chars)
279
        return (ns_text, ns_to_s_map)
280

281
    # We first tokenize `orig_text`, strip whitespace from the result
282
    # and `pred_text`, and check if they are the same length. If they are
283
    # NOT the same length, the heuristic has failed. If they are the same
284
    # length, we assume the characters are one-to-one aligned.
285
    tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
286

287
    tok_text = " ".join(tokenizer.tokenize(orig_text))
288

289
    start_position = tok_text.find(pred_text)
290
    if start_position == -1:
291
        if verbose_logging:
292
            logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
293
        return orig_text
294
    end_position = start_position + len(pred_text) - 1
295

296
    (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
297
    (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
298

299
    if len(orig_ns_text) != len(tok_ns_text):
300
        if verbose_logging:
301
            logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
302
        return orig_text
303

304
    # We then project the characters in `pred_text` back to `orig_text` using
305
    # the character-to-character alignment.
306
    tok_s_to_ns_map = {}
307
    for (i, tok_index) in tok_ns_to_s_map.items():
308
        tok_s_to_ns_map[tok_index] = i
309

310
    orig_start_position = None
311
    if start_position in tok_s_to_ns_map:
312
        ns_start_position = tok_s_to_ns_map[start_position]
313
        if ns_start_position in orig_ns_to_s_map:
314
            orig_start_position = orig_ns_to_s_map[ns_start_position]
315

316
    if orig_start_position is None:
317
        if verbose_logging:
318
            logger.info("Couldn't map start position")
319
        return orig_text
320

321
    orig_end_position = None
322
    if end_position in tok_s_to_ns_map:
323
        ns_end_position = tok_s_to_ns_map[end_position]
324
        if ns_end_position in orig_ns_to_s_map:
325
            orig_end_position = orig_ns_to_s_map[ns_end_position]
326

327
    if orig_end_position is None:
328
        if verbose_logging:
329
            logger.info("Couldn't map end position")
330
        return orig_text
331

332
    output_text = orig_text[orig_start_position : (orig_end_position + 1)]
333
    return output_text
334

335

336
def _get_best_indexes(logits, n_best_size):
337
    """Get the n-best logits from a list."""
338
    index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
339

340
    best_indexes = []
341
    for i in range(len(index_and_score)):
342
        if i >= n_best_size:
343
            break
344
        best_indexes.append(index_and_score[i][0])
345
    return best_indexes
346

347

348
def _compute_softmax(scores):
349
    """Compute softmax probability over raw logits."""
350
    if not scores:
351
        return []
352

353
    max_score = None
354
    for score in scores:
355
        if max_score is None or score > max_score:
356
            max_score = score
357

358
    exp_scores = []
359
    total_sum = 0.0
360
    for score in scores:
361
        x = math.exp(score - max_score)
362
        exp_scores.append(x)
363
        total_sum += x
364

365
    probs = []
366
    for score in exp_scores:
367
        probs.append(score / total_sum)
368
    return probs
369

370

371
def compute_predictions_logits(
372
    all_examples,
373
    all_features,
374
    all_results,
375
    n_best_size,
376
    max_answer_length,
377
    do_lower_case,
378
    output_prediction_file,
379
    output_nbest_file,
380
    output_null_log_odds_file,
381
    verbose_logging,
382
    version_2_with_negative,
383
    null_score_diff_threshold,
384
    tokenizer,
385
):
386
    """Write final predictions to the json file and log-odds of null if needed."""
387
    if output_prediction_file:
388
        logger.info(f"Writing predictions to: {output_prediction_file}")
389
    if output_nbest_file:
390
        logger.info(f"Writing nbest to: {output_nbest_file}")
391
    if output_null_log_odds_file and version_2_with_negative:
392
        logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
393

394
    example_index_to_features = collections.defaultdict(list)
395
    for feature in all_features:
396
        example_index_to_features[feature.example_index].append(feature)
397

398
    unique_id_to_result = {}
399
    for result in all_results:
400
        unique_id_to_result[result.unique_id] = result
401

402
    _PrelimPrediction = collections.namedtuple(  # pylint: disable=invalid-name
403
        "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
404
    )
405

406
    all_predictions = collections.OrderedDict()
407
    all_nbest_json = collections.OrderedDict()
408
    scores_diff_json = collections.OrderedDict()
409

410
    for (example_index, example) in enumerate(all_examples):
411
        features = example_index_to_features[example_index]
412

413
        prelim_predictions = []
414
        # keep track of the minimum score of null start+end of position 0
415
        score_null = 1000000  # large and positive
416
        min_null_feature_index = 0  # the paragraph slice with min null score
417
        null_start_logit = 0  # the start logit at the slice with min null score
418
        null_end_logit = 0  # the end logit at the slice with min null score
419
        for (feature_index, feature) in enumerate(features):
420
            result = unique_id_to_result[feature.unique_id]
421
            start_indexes = _get_best_indexes(result.start_logits, n_best_size)
422
            end_indexes = _get_best_indexes(result.end_logits, n_best_size)
423
            # if we could have irrelevant answers, get the min score of irrelevant
424
            if version_2_with_negative:
425
                feature_null_score = result.start_logits[0] + result.end_logits[0]
426
                if feature_null_score < score_null:
427
                    score_null = feature_null_score
428
                    min_null_feature_index = feature_index
429
                    null_start_logit = result.start_logits[0]
430
                    null_end_logit = result.end_logits[0]
431
            for start_index in start_indexes:
432
                for end_index in end_indexes:
433
                    # We could hypothetically create invalid predictions, e.g., predict
434
                    # that the start of the span is in the question. We throw out all
435
                    # invalid predictions.
436
                    if start_index >= len(feature.tokens):
437
                        continue
438
                    if end_index >= len(feature.tokens):
439
                        continue
440
                    if start_index not in feature.token_to_orig_map:
441
                        continue
442
                    if end_index not in feature.token_to_orig_map:
443
                        continue
444
                    if not feature.token_is_max_context.get(start_index, False):
445
                        continue
446
                    if end_index < start_index:
447
                        continue
448
                    length = end_index - start_index + 1
449
                    if length > max_answer_length:
450
                        continue
451
                    prelim_predictions.append(
452
                        _PrelimPrediction(
453
                            feature_index=feature_index,
454
                            start_index=start_index,
455
                            end_index=end_index,
456
                            start_logit=result.start_logits[start_index],
457
                            end_logit=result.end_logits[end_index],
458
                        )
459
                    )
460
        if version_2_with_negative:
461
            prelim_predictions.append(
462
                _PrelimPrediction(
463
                    feature_index=min_null_feature_index,
464
                    start_index=0,
465
                    end_index=0,
466
                    start_logit=null_start_logit,
467
                    end_logit=null_end_logit,
468
                )
469
            )
470
        prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
471

472
        _NbestPrediction = collections.namedtuple(  # pylint: disable=invalid-name
473
            "NbestPrediction", ["text", "start_logit", "end_logit"]
474
        )
475

476
        seen_predictions = {}
477
        nbest = []
478
        for pred in prelim_predictions:
479
            if len(nbest) >= n_best_size:
480
                break
481
            feature = features[pred.feature_index]
482
            if pred.start_index > 0:  # this is a non-null prediction
483
                tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
484
                orig_doc_start = feature.token_to_orig_map[pred.start_index]
485
                orig_doc_end = feature.token_to_orig_map[pred.end_index]
486
                orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
487

488
                tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
489

490
                # tok_text = " ".join(tok_tokens)
491
                #
492
                # # De-tokenize WordPieces that have been split off.
493
                # tok_text = tok_text.replace(" ##", "")
494
                # tok_text = tok_text.replace("##", "")
495

496
                # Clean whitespace
497
                tok_text = tok_text.strip()
498
                tok_text = " ".join(tok_text.split())
499
                orig_text = " ".join(orig_tokens)
500

501
                final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
502
                if final_text in seen_predictions:
503
                    continue
504

505
                seen_predictions[final_text] = True
506
            else:
507
                final_text = ""
508
                seen_predictions[final_text] = True
509

510
            nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
511
        # if we didn't include the empty option in the n-best, include it
512
        if version_2_with_negative:
513
            if "" not in seen_predictions:
514
                nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
515

516
            # In very rare edge cases we could only have single null prediction.
517
            # So we just create a nonce prediction in this case to avoid failure.
518
            if len(nbest) == 1:
519
                nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
520

521
        # In very rare edge cases we could have no valid predictions. So we
522
        # just create a nonce prediction in this case to avoid failure.
523
        if not nbest:
524
            nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
525

526
        assert len(nbest) >= 1
527

528
        total_scores = []
529
        best_non_null_entry = None
530
        for entry in nbest:
531
            total_scores.append(entry.start_logit + entry.end_logit)
532
            if not best_non_null_entry:
533
                if entry.text:
534
                    best_non_null_entry = entry
535

536
        probs = _compute_softmax(total_scores)
537

538
        nbest_json = []
539
        for (i, entry) in enumerate(nbest):
540
            output = collections.OrderedDict()
541
            output["text"] = entry.text
542
            output["probability"] = probs[i]
543
            output["start_logit"] = entry.start_logit
544
            output["end_logit"] = entry.end_logit
545
            nbest_json.append(output)
546

547
        assert len(nbest_json) >= 1
548

549
        if not version_2_with_negative:
550
            all_predictions[example.qas_id] = nbest_json[0]["text"]
551
        else:
552
            # predict "" iff the null score - the score of best non-null > threshold
553
            score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
554
            scores_diff_json[example.qas_id] = score_diff
555
            if score_diff > null_score_diff_threshold:
556
                all_predictions[example.qas_id] = ""
557
            else:
558
                all_predictions[example.qas_id] = best_non_null_entry.text
559
        all_nbest_json[example.qas_id] = nbest_json
560

561
    if output_prediction_file:
562
        with open(output_prediction_file, "w") as writer:
563
            writer.write(json.dumps(all_predictions, indent=4) + "\n")
564

565
    if output_nbest_file:
566
        with open(output_nbest_file, "w") as writer:
567
            writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
568

569
    if output_null_log_odds_file and version_2_with_negative:
570
        with open(output_null_log_odds_file, "w") as writer:
571
            writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
572

573
    return all_predictions
574

575

576
def compute_predictions_log_probs(
577
    all_examples,
578
    all_features,
579
    all_results,
580
    n_best_size,
581
    max_answer_length,
582
    output_prediction_file,
583
    output_nbest_file,
584
    output_null_log_odds_file,
585
    start_n_top,
586
    end_n_top,
587
    version_2_with_negative,
588
    tokenizer,
589
    verbose_logging,
590
):
591
    """ XLNet write prediction logic (more complex than Bert's).
592
        Write final predictions to the json file and log-odds of null if needed.
593

594
        Requires utils_squad_evaluate.py
595
    """
596
    _PrelimPrediction = collections.namedtuple(  # pylint: disable=invalid-name
597
        "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
598
    )
599

600
    _NbestPrediction = collections.namedtuple(  # pylint: disable=invalid-name
601
        "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
602
    )
603

604
    logger.info("Writing predictions to: %s", output_prediction_file)
605
    # logger.info("Writing nbest to: %s" % (output_nbest_file))
606

607
    example_index_to_features = collections.defaultdict(list)
608
    for feature in all_features:
609
        example_index_to_features[feature.example_index].append(feature)
610

611
    unique_id_to_result = {}
612
    for result in all_results:
613
        unique_id_to_result[result.unique_id] = result
614

615
    all_predictions = collections.OrderedDict()
616
    all_nbest_json = collections.OrderedDict()
617
    scores_diff_json = collections.OrderedDict()
618

619
    for (example_index, example) in enumerate(all_examples):
620
        features = example_index_to_features[example_index]
621

622
        prelim_predictions = []
623
        # keep track of the minimum score of null start+end of position 0
624
        score_null = 1000000  # large and positive
625

626
        for (feature_index, feature) in enumerate(features):
627
            result = unique_id_to_result[feature.unique_id]
628

629
            cur_null_score = result.cls_logits
630

631
            # if we could have irrelevant answers, get the min score of irrelevant
632
            score_null = min(score_null, cur_null_score)
633

634
            for i in range(start_n_top):
635
                for j in range(end_n_top):
636
                    start_log_prob = result.start_logits[i]
637
                    start_index = result.start_top_index[i]
638

639
                    j_index = i * end_n_top + j
640

641
                    end_log_prob = result.end_logits[j_index]
642
                    end_index = result.end_top_index[j_index]
643

644
                    # We could hypothetically create invalid predictions, e.g., predict
645
                    # that the start of the span is in the question. We throw out all
646
                    # invalid predictions.
647
                    if start_index >= feature.paragraph_len - 1:
648
                        continue
649
                    if end_index >= feature.paragraph_len - 1:
650
                        continue
651

652
                    if not feature.token_is_max_context.get(start_index, False):
653
                        continue
654
                    if end_index < start_index:
655
                        continue
656
                    length = end_index - start_index + 1
657
                    if length > max_answer_length:
658
                        continue
659

660
                    prelim_predictions.append(
661
                        _PrelimPrediction(
662
                            feature_index=feature_index,
663
                            start_index=start_index,
664
                            end_index=end_index,
665
                            start_log_prob=start_log_prob,
666
                            end_log_prob=end_log_prob,
667
                        )
668
                    )
669

670
        prelim_predictions = sorted(
671
            prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
672
        )
673

674
        seen_predictions = {}
675
        nbest = []
676
        for pred in prelim_predictions:
677
            if len(nbest) >= n_best_size:
678
                break
679
            feature = features[pred.feature_index]
680

681
            # XLNet un-tokenizer
682
            # Let's keep it simple for now and see if we need all this later.
683
            #
684
            # tok_start_to_orig_index = feature.tok_start_to_orig_index
685
            # tok_end_to_orig_index = feature.tok_end_to_orig_index
686
            # start_orig_pos = tok_start_to_orig_index[pred.start_index]
687
            # end_orig_pos = tok_end_to_orig_index[pred.end_index]
688
            # paragraph_text = example.paragraph_text
689
            # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
690

691
            # Previously used Bert untokenizer
692
            tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
693
            orig_doc_start = feature.token_to_orig_map[pred.start_index]
694
            orig_doc_end = feature.token_to_orig_map[pred.end_index]
695
            orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
696
            tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
697

698
            # Clean whitespace
699
            tok_text = tok_text.strip()
700
            tok_text = " ".join(tok_text.split())
701
            orig_text = " ".join(orig_tokens)
702

703
            if hasattr(tokenizer, "do_lower_case"):
704
                do_lower_case = tokenizer.do_lower_case
705
            else:
706
                do_lower_case = tokenizer.do_lowercase_and_remove_accent
707

708
            final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
709

710
            if final_text in seen_predictions:
711
                continue
712

713
            seen_predictions[final_text] = True
714

715
            nbest.append(
716
                _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
717
            )
718

719
        # In very rare edge cases we could have no valid predictions. So we
720
        # just create a nonce prediction in this case to avoid failure.
721
        if not nbest:
722
            nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
723

724
        total_scores = []
725
        best_non_null_entry = None
726
        for entry in nbest:
727
            total_scores.append(entry.start_log_prob + entry.end_log_prob)
728
            if not best_non_null_entry:
729
                best_non_null_entry = entry
730

731
        probs = _compute_softmax(total_scores)
732

733
        nbest_json = []
734
        for (i, entry) in enumerate(nbest):
735
            output = collections.OrderedDict()
736
            output["text"] = entry.text
737
            output["probability"] = probs[i]
738
            output["start_log_prob"] = entry.start_log_prob
739
            output["end_log_prob"] = entry.end_log_prob
740
            nbest_json.append(output)
741

742
        assert len(nbest_json) >= 1
743
        assert best_non_null_entry is not None
744

745
        score_diff = score_null
746
        scores_diff_json[example.qas_id] = score_diff
747
        # note(zhiliny): always predict best_non_null_entry
748
        # and the evaluation script will search for the best threshold
749
        all_predictions[example.qas_id] = best_non_null_entry.text
750

751
        all_nbest_json[example.qas_id] = nbest_json
752

753
    with open(output_prediction_file, "w") as writer:
754
        writer.write(json.dumps(all_predictions, indent=4) + "\n")
755

756
    with open(output_nbest_file, "w") as writer:
757
        writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
758

759
    if version_2_with_negative:
760
        with open(output_null_log_odds_file, "w") as writer:
761
            writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
762

763
    return all_predictions
764

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.