5
from moellava.eval.m4c_evaluator import EvalAIAnswerProcessor
9
parser = argparse.ArgumentParser()
10
parser.add_argument('--annotation-file', type=str, required=True)
11
parser.add_argument('--result-file', type=str, required=True)
12
parser.add_argument('--result-upload-file', type=str, required=True)
13
return parser.parse_args()
16
if __name__ == '__main__':
20
os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True)
24
for line_idx, line in enumerate(open(args.result_file)):
26
results.append(json.loads(line))
29
results = {x['question_id']: x['text'] for x in results}
30
test_split = [json.loads(line) for line in open(args.annotation_file)]
31
split_ids = set([x['question_id'] for x in test_split])
33
print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}')
37
answer_processor = EvalAIAnswerProcessor()
40
assert x['question_id'] in results
43
'answer': answer_processor(results[x['question_id']])
46
with open(args.result_upload_file, 'w') as f:
47
json.dump(all_answers, f)