CSS-LM
763 строки · 28.3 Кб
1""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
2modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
3
4In addition to basic functionality, we also compute additional statistics and
5plot precision-recall curves if an additional na_prob.json file is provided.
6This file is expected to map question ID's to the model's predicted probability
7that a question is unanswerable.
8"""
9
10
11import collections
12import json
13import logging
14import math
15import re
16import string
17
18from transformers.tokenization_bert import BasicTokenizer
19
20
21logger = logging.getLogger(__name__)
22
23
24def normalize_answer(s):
25"""Lower text and remove punctuation, articles and extra whitespace."""
26
27def remove_articles(text):
28regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
29return re.sub(regex, " ", text)
30
31def white_space_fix(text):
32return " ".join(text.split())
33
34def remove_punc(text):
35exclude = set(string.punctuation)
36return "".join(ch for ch in text if ch not in exclude)
37
38def lower(text):
39return text.lower()
40
41return white_space_fix(remove_articles(remove_punc(lower(s))))
42
43
44def get_tokens(s):
45if not s:
46return []
47return normalize_answer(s).split()
48
49
50def compute_exact(a_gold, a_pred):
51return int(normalize_answer(a_gold) == normalize_answer(a_pred))
52
53
54def compute_f1(a_gold, a_pred):
55gold_toks = get_tokens(a_gold)
56pred_toks = get_tokens(a_pred)
57common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
58num_same = sum(common.values())
59if len(gold_toks) == 0 or len(pred_toks) == 0:
60# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
61return int(gold_toks == pred_toks)
62if num_same == 0:
63return 0
64precision = 1.0 * num_same / len(pred_toks)
65recall = 1.0 * num_same / len(gold_toks)
66f1 = (2 * precision * recall) / (precision + recall)
67return f1
68
69
70def get_raw_scores(examples, preds):
71"""
72Computes the exact and f1 scores from the examples and the model predictions
73"""
74exact_scores = {}
75f1_scores = {}
76
77for example in examples:
78qas_id = example.qas_id
79gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
80
81if not gold_answers:
82# For unanswerable questions, only correct answer is empty string
83gold_answers = [""]
84
85if qas_id not in preds:
86print("Missing prediction for %s" % qas_id)
87continue
88
89prediction = preds[qas_id]
90exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
91f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
92
93return exact_scores, f1_scores
94
95
96def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
97new_scores = {}
98for qid, s in scores.items():
99pred_na = na_probs[qid] > na_prob_thresh
100if pred_na:
101new_scores[qid] = float(not qid_to_has_ans[qid])
102else:
103new_scores[qid] = s
104return new_scores
105
106
107def make_eval_dict(exact_scores, f1_scores, qid_list=None):
108if not qid_list:
109total = len(exact_scores)
110return collections.OrderedDict(
111[
112("exact", 100.0 * sum(exact_scores.values()) / total),
113("f1", 100.0 * sum(f1_scores.values()) / total),
114("total", total),
115]
116)
117else:
118total = len(qid_list)
119return collections.OrderedDict(
120[
121("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
122("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
123("total", total),
124]
125)
126
127
128def merge_eval(main_eval, new_eval, prefix):
129for k in new_eval:
130main_eval["%s_%s" % (prefix, k)] = new_eval[k]
131
132
133def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
134num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
135cur_score = num_no_ans
136best_score = cur_score
137best_thresh = 0.0
138qid_list = sorted(na_probs, key=lambda k: na_probs[k])
139for i, qid in enumerate(qid_list):
140if qid not in scores:
141continue
142if qid_to_has_ans[qid]:
143diff = scores[qid]
144else:
145if preds[qid]:
146diff = -1
147else:
148diff = 0
149cur_score += diff
150if cur_score > best_score:
151best_score = cur_score
152best_thresh = na_probs[qid]
153
154has_ans_score, has_ans_cnt = 0, 0
155for qid in qid_list:
156if not qid_to_has_ans[qid]:
157continue
158has_ans_cnt += 1
159
160if qid not in scores:
161continue
162has_ans_score += scores[qid]
163
164return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
165
166
167def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
168best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
169best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
170main_eval["best_exact"] = best_exact
171main_eval["best_exact_thresh"] = exact_thresh
172main_eval["best_f1"] = best_f1
173main_eval["best_f1_thresh"] = f1_thresh
174main_eval["has_ans_exact"] = has_ans_exact
175main_eval["has_ans_f1"] = has_ans_f1
176
177
178def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
179num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
180cur_score = num_no_ans
181best_score = cur_score
182best_thresh = 0.0
183qid_list = sorted(na_probs, key=lambda k: na_probs[k])
184for _, qid in enumerate(qid_list):
185if qid not in scores:
186continue
187if qid_to_has_ans[qid]:
188diff = scores[qid]
189else:
190if preds[qid]:
191diff = -1
192else:
193diff = 0
194cur_score += diff
195if cur_score > best_score:
196best_score = cur_score
197best_thresh = na_probs[qid]
198return 100.0 * best_score / len(scores), best_thresh
199
200
201def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
202best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
203best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
204
205main_eval["best_exact"] = best_exact
206main_eval["best_exact_thresh"] = exact_thresh
207main_eval["best_f1"] = best_f1
208main_eval["best_f1_thresh"] = f1_thresh
209
210
211def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
212qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
213has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
214no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
215
216if no_answer_probs is None:
217no_answer_probs = {k: 0.0 for k in preds}
218
219exact, f1 = get_raw_scores(examples, preds)
220
221exact_threshold = apply_no_ans_threshold(
222exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
223)
224f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
225
226evaluation = make_eval_dict(exact_threshold, f1_threshold)
227
228if has_answer_qids:
229has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
230merge_eval(evaluation, has_ans_eval, "HasAns")
231
232if no_answer_qids:
233no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
234merge_eval(evaluation, no_ans_eval, "NoAns")
235
236if no_answer_probs:
237find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
238
239return evaluation
240
241
242def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
243"""Project the tokenized prediction back to the original text."""
244
245# When we created the data, we kept track of the alignment between original
246# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
247# now `orig_text` contains the span of our original text corresponding to the
248# span that we predicted.
249#
250# However, `orig_text` may contain extra characters that we don't want in
251# our prediction.
252#
253# For example, let's say:
254# pred_text = steve smith
255# orig_text = Steve Smith's
256#
257# We don't want to return `orig_text` because it contains the extra "'s".
258#
259# We don't want to return `pred_text` because it's already been normalized
260# (the SQuAD eval script also does punctuation stripping/lower casing but
261# our tokenizer does additional normalization like stripping accent
262# characters).
263#
264# What we really want to return is "Steve Smith".
265#
266# Therefore, we have to apply a semi-complicated alignment heuristic between
267# `pred_text` and `orig_text` to get a character-to-character alignment. This
268# can fail in certain cases in which case we just return `orig_text`.
269
270def _strip_spaces(text):
271ns_chars = []
272ns_to_s_map = collections.OrderedDict()
273for (i, c) in enumerate(text):
274if c == " ":
275continue
276ns_to_s_map[len(ns_chars)] = i
277ns_chars.append(c)
278ns_text = "".join(ns_chars)
279return (ns_text, ns_to_s_map)
280
281# We first tokenize `orig_text`, strip whitespace from the result
282# and `pred_text`, and check if they are the same length. If they are
283# NOT the same length, the heuristic has failed. If they are the same
284# length, we assume the characters are one-to-one aligned.
285tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
286
287tok_text = " ".join(tokenizer.tokenize(orig_text))
288
289start_position = tok_text.find(pred_text)
290if start_position == -1:
291if verbose_logging:
292logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
293return orig_text
294end_position = start_position + len(pred_text) - 1
295
296(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
297(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
298
299if len(orig_ns_text) != len(tok_ns_text):
300if verbose_logging:
301logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
302return orig_text
303
304# We then project the characters in `pred_text` back to `orig_text` using
305# the character-to-character alignment.
306tok_s_to_ns_map = {}
307for (i, tok_index) in tok_ns_to_s_map.items():
308tok_s_to_ns_map[tok_index] = i
309
310orig_start_position = None
311if start_position in tok_s_to_ns_map:
312ns_start_position = tok_s_to_ns_map[start_position]
313if ns_start_position in orig_ns_to_s_map:
314orig_start_position = orig_ns_to_s_map[ns_start_position]
315
316if orig_start_position is None:
317if verbose_logging:
318logger.info("Couldn't map start position")
319return orig_text
320
321orig_end_position = None
322if end_position in tok_s_to_ns_map:
323ns_end_position = tok_s_to_ns_map[end_position]
324if ns_end_position in orig_ns_to_s_map:
325orig_end_position = orig_ns_to_s_map[ns_end_position]
326
327if orig_end_position is None:
328if verbose_logging:
329logger.info("Couldn't map end position")
330return orig_text
331
332output_text = orig_text[orig_start_position : (orig_end_position + 1)]
333return output_text
334
335
336def _get_best_indexes(logits, n_best_size):
337"""Get the n-best logits from a list."""
338index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
339
340best_indexes = []
341for i in range(len(index_and_score)):
342if i >= n_best_size:
343break
344best_indexes.append(index_and_score[i][0])
345return best_indexes
346
347
348def _compute_softmax(scores):
349"""Compute softmax probability over raw logits."""
350if not scores:
351return []
352
353max_score = None
354for score in scores:
355if max_score is None or score > max_score:
356max_score = score
357
358exp_scores = []
359total_sum = 0.0
360for score in scores:
361x = math.exp(score - max_score)
362exp_scores.append(x)
363total_sum += x
364
365probs = []
366for score in exp_scores:
367probs.append(score / total_sum)
368return probs
369
370
371def compute_predictions_logits(
372all_examples,
373all_features,
374all_results,
375n_best_size,
376max_answer_length,
377do_lower_case,
378output_prediction_file,
379output_nbest_file,
380output_null_log_odds_file,
381verbose_logging,
382version_2_with_negative,
383null_score_diff_threshold,
384tokenizer,
385):
386"""Write final predictions to the json file and log-odds of null if needed."""
387if output_prediction_file:
388logger.info(f"Writing predictions to: {output_prediction_file}")
389if output_nbest_file:
390logger.info(f"Writing nbest to: {output_nbest_file}")
391if output_null_log_odds_file and version_2_with_negative:
392logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
393
394example_index_to_features = collections.defaultdict(list)
395for feature in all_features:
396example_index_to_features[feature.example_index].append(feature)
397
398unique_id_to_result = {}
399for result in all_results:
400unique_id_to_result[result.unique_id] = result
401
402_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
403"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
404)
405
406all_predictions = collections.OrderedDict()
407all_nbest_json = collections.OrderedDict()
408scores_diff_json = collections.OrderedDict()
409
410for (example_index, example) in enumerate(all_examples):
411features = example_index_to_features[example_index]
412
413prelim_predictions = []
414# keep track of the minimum score of null start+end of position 0
415score_null = 1000000 # large and positive
416min_null_feature_index = 0 # the paragraph slice with min null score
417null_start_logit = 0 # the start logit at the slice with min null score
418null_end_logit = 0 # the end logit at the slice with min null score
419for (feature_index, feature) in enumerate(features):
420result = unique_id_to_result[feature.unique_id]
421start_indexes = _get_best_indexes(result.start_logits, n_best_size)
422end_indexes = _get_best_indexes(result.end_logits, n_best_size)
423# if we could have irrelevant answers, get the min score of irrelevant
424if version_2_with_negative:
425feature_null_score = result.start_logits[0] + result.end_logits[0]
426if feature_null_score < score_null:
427score_null = feature_null_score
428min_null_feature_index = feature_index
429null_start_logit = result.start_logits[0]
430null_end_logit = result.end_logits[0]
431for start_index in start_indexes:
432for end_index in end_indexes:
433# We could hypothetically create invalid predictions, e.g., predict
434# that the start of the span is in the question. We throw out all
435# invalid predictions.
436if start_index >= len(feature.tokens):
437continue
438if end_index >= len(feature.tokens):
439continue
440if start_index not in feature.token_to_orig_map:
441continue
442if end_index not in feature.token_to_orig_map:
443continue
444if not feature.token_is_max_context.get(start_index, False):
445continue
446if end_index < start_index:
447continue
448length = end_index - start_index + 1
449if length > max_answer_length:
450continue
451prelim_predictions.append(
452_PrelimPrediction(
453feature_index=feature_index,
454start_index=start_index,
455end_index=end_index,
456start_logit=result.start_logits[start_index],
457end_logit=result.end_logits[end_index],
458)
459)
460if version_2_with_negative:
461prelim_predictions.append(
462_PrelimPrediction(
463feature_index=min_null_feature_index,
464start_index=0,
465end_index=0,
466start_logit=null_start_logit,
467end_logit=null_end_logit,
468)
469)
470prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
471
472_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
473"NbestPrediction", ["text", "start_logit", "end_logit"]
474)
475
476seen_predictions = {}
477nbest = []
478for pred in prelim_predictions:
479if len(nbest) >= n_best_size:
480break
481feature = features[pred.feature_index]
482if pred.start_index > 0: # this is a non-null prediction
483tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
484orig_doc_start = feature.token_to_orig_map[pred.start_index]
485orig_doc_end = feature.token_to_orig_map[pred.end_index]
486orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
487
488tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
489
490# tok_text = " ".join(tok_tokens)
491#
492# # De-tokenize WordPieces that have been split off.
493# tok_text = tok_text.replace(" ##", "")
494# tok_text = tok_text.replace("##", "")
495
496# Clean whitespace
497tok_text = tok_text.strip()
498tok_text = " ".join(tok_text.split())
499orig_text = " ".join(orig_tokens)
500
501final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
502if final_text in seen_predictions:
503continue
504
505seen_predictions[final_text] = True
506else:
507final_text = ""
508seen_predictions[final_text] = True
509
510nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
511# if we didn't include the empty option in the n-best, include it
512if version_2_with_negative:
513if "" not in seen_predictions:
514nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
515
516# In very rare edge cases we could only have single null prediction.
517# So we just create a nonce prediction in this case to avoid failure.
518if len(nbest) == 1:
519nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
520
521# In very rare edge cases we could have no valid predictions. So we
522# just create a nonce prediction in this case to avoid failure.
523if not nbest:
524nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
525
526assert len(nbest) >= 1
527
528total_scores = []
529best_non_null_entry = None
530for entry in nbest:
531total_scores.append(entry.start_logit + entry.end_logit)
532if not best_non_null_entry:
533if entry.text:
534best_non_null_entry = entry
535
536probs = _compute_softmax(total_scores)
537
538nbest_json = []
539for (i, entry) in enumerate(nbest):
540output = collections.OrderedDict()
541output["text"] = entry.text
542output["probability"] = probs[i]
543output["start_logit"] = entry.start_logit
544output["end_logit"] = entry.end_logit
545nbest_json.append(output)
546
547assert len(nbest_json) >= 1
548
549if not version_2_with_negative:
550all_predictions[example.qas_id] = nbest_json[0]["text"]
551else:
552# predict "" iff the null score - the score of best non-null > threshold
553score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
554scores_diff_json[example.qas_id] = score_diff
555if score_diff > null_score_diff_threshold:
556all_predictions[example.qas_id] = ""
557else:
558all_predictions[example.qas_id] = best_non_null_entry.text
559all_nbest_json[example.qas_id] = nbest_json
560
561if output_prediction_file:
562with open(output_prediction_file, "w") as writer:
563writer.write(json.dumps(all_predictions, indent=4) + "\n")
564
565if output_nbest_file:
566with open(output_nbest_file, "w") as writer:
567writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
568
569if output_null_log_odds_file and version_2_with_negative:
570with open(output_null_log_odds_file, "w") as writer:
571writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
572
573return all_predictions
574
575
576def compute_predictions_log_probs(
577all_examples,
578all_features,
579all_results,
580n_best_size,
581max_answer_length,
582output_prediction_file,
583output_nbest_file,
584output_null_log_odds_file,
585start_n_top,
586end_n_top,
587version_2_with_negative,
588tokenizer,
589verbose_logging,
590):
591""" XLNet write prediction logic (more complex than Bert's).
592Write final predictions to the json file and log-odds of null if needed.
593
594Requires utils_squad_evaluate.py
595"""
596_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
597"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
598)
599
600_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
601"NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
602)
603
604logger.info("Writing predictions to: %s", output_prediction_file)
605# logger.info("Writing nbest to: %s" % (output_nbest_file))
606
607example_index_to_features = collections.defaultdict(list)
608for feature in all_features:
609example_index_to_features[feature.example_index].append(feature)
610
611unique_id_to_result = {}
612for result in all_results:
613unique_id_to_result[result.unique_id] = result
614
615all_predictions = collections.OrderedDict()
616all_nbest_json = collections.OrderedDict()
617scores_diff_json = collections.OrderedDict()
618
619for (example_index, example) in enumerate(all_examples):
620features = example_index_to_features[example_index]
621
622prelim_predictions = []
623# keep track of the minimum score of null start+end of position 0
624score_null = 1000000 # large and positive
625
626for (feature_index, feature) in enumerate(features):
627result = unique_id_to_result[feature.unique_id]
628
629cur_null_score = result.cls_logits
630
631# if we could have irrelevant answers, get the min score of irrelevant
632score_null = min(score_null, cur_null_score)
633
634for i in range(start_n_top):
635for j in range(end_n_top):
636start_log_prob = result.start_logits[i]
637start_index = result.start_top_index[i]
638
639j_index = i * end_n_top + j
640
641end_log_prob = result.end_logits[j_index]
642end_index = result.end_top_index[j_index]
643
644# We could hypothetically create invalid predictions, e.g., predict
645# that the start of the span is in the question. We throw out all
646# invalid predictions.
647if start_index >= feature.paragraph_len - 1:
648continue
649if end_index >= feature.paragraph_len - 1:
650continue
651
652if not feature.token_is_max_context.get(start_index, False):
653continue
654if end_index < start_index:
655continue
656length = end_index - start_index + 1
657if length > max_answer_length:
658continue
659
660prelim_predictions.append(
661_PrelimPrediction(
662feature_index=feature_index,
663start_index=start_index,
664end_index=end_index,
665start_log_prob=start_log_prob,
666end_log_prob=end_log_prob,
667)
668)
669
670prelim_predictions = sorted(
671prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
672)
673
674seen_predictions = {}
675nbest = []
676for pred in prelim_predictions:
677if len(nbest) >= n_best_size:
678break
679feature = features[pred.feature_index]
680
681# XLNet un-tokenizer
682# Let's keep it simple for now and see if we need all this later.
683#
684# tok_start_to_orig_index = feature.tok_start_to_orig_index
685# tok_end_to_orig_index = feature.tok_end_to_orig_index
686# start_orig_pos = tok_start_to_orig_index[pred.start_index]
687# end_orig_pos = tok_end_to_orig_index[pred.end_index]
688# paragraph_text = example.paragraph_text
689# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
690
691# Previously used Bert untokenizer
692tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
693orig_doc_start = feature.token_to_orig_map[pred.start_index]
694orig_doc_end = feature.token_to_orig_map[pred.end_index]
695orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
696tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
697
698# Clean whitespace
699tok_text = tok_text.strip()
700tok_text = " ".join(tok_text.split())
701orig_text = " ".join(orig_tokens)
702
703if hasattr(tokenizer, "do_lower_case"):
704do_lower_case = tokenizer.do_lower_case
705else:
706do_lower_case = tokenizer.do_lowercase_and_remove_accent
707
708final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
709
710if final_text in seen_predictions:
711continue
712
713seen_predictions[final_text] = True
714
715nbest.append(
716_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
717)
718
719# In very rare edge cases we could have no valid predictions. So we
720# just create a nonce prediction in this case to avoid failure.
721if not nbest:
722nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
723
724total_scores = []
725best_non_null_entry = None
726for entry in nbest:
727total_scores.append(entry.start_log_prob + entry.end_log_prob)
728if not best_non_null_entry:
729best_non_null_entry = entry
730
731probs = _compute_softmax(total_scores)
732
733nbest_json = []
734for (i, entry) in enumerate(nbest):
735output = collections.OrderedDict()
736output["text"] = entry.text
737output["probability"] = probs[i]
738output["start_log_prob"] = entry.start_log_prob
739output["end_log_prob"] = entry.end_log_prob
740nbest_json.append(output)
741
742assert len(nbest_json) >= 1
743assert best_non_null_entry is not None
744
745score_diff = score_null
746scores_diff_json[example.qas_id] = score_diff
747# note(zhiliny): always predict best_non_null_entry
748# and the evaluation script will search for the best threshold
749all_predictions[example.qas_id] = best_non_null_entry.text
750
751all_nbest_json[example.qas_id] = nbest_json
752
753with open(output_prediction_file, "w") as writer:
754writer.write(json.dumps(all_predictions, indent=4) + "\n")
755
756with open(output_nbest_file, "w") as writer:
757writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
758
759if version_2_with_negative:
760with open(output_null_log_odds_file, "w") as writer:
761writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
762
763return all_predictions
764