llama-index
113 строк · 3.5 Кб
1"""Slides parser.
2
3Contains parsers for .pptx files.
4
5"""
6
7import os8from pathlib import Path9from typing import Dict, List, Optional10
11from llama_index.legacy.readers.base import BaseReader12from llama_index.legacy.schema import Document13from llama_index.legacy.utils import infer_torch_device14
15
16class PptxReader(BaseReader):17"""Powerpoint parser.18
19Extract text, caption images, and specify slides.
20
21"""
22
23def __init__(self) -> None:24"""Init parser."""25try:26import torch # noqa27from PIL import Image # noqa28from pptx import Presentation # noqa29from transformers import (30AutoTokenizer,31VisionEncoderDecoderModel,32ViTFeatureExtractor,33)34except ImportError:35raise ImportError(36"Please install extra dependencies that are required for "37"the PptxReader: "38"`pip install torch transformers python-pptx Pillow`"39)40
41model = VisionEncoderDecoderModel.from_pretrained(42"nlpconnect/vit-gpt2-image-captioning"43)44feature_extractor = ViTFeatureExtractor.from_pretrained(45"nlpconnect/vit-gpt2-image-captioning"46)47tokenizer = AutoTokenizer.from_pretrained(48"nlpconnect/vit-gpt2-image-captioning"49)50
51self.parser_config = {52"feature_extractor": feature_extractor,53"model": model,54"tokenizer": tokenizer,55}56
57def caption_image(self, tmp_image_file: str) -> str:58"""Generate text caption of image."""59from PIL import Image60
61model = self.parser_config["model"]62feature_extractor = self.parser_config["feature_extractor"]63tokenizer = self.parser_config["tokenizer"]64
65device = infer_torch_device()66model.to(device)67
68max_length = 1669num_beams = 470gen_kwargs = {"max_length": max_length, "num_beams": num_beams}71
72i_image = Image.open(tmp_image_file)73if i_image.mode != "RGB":74i_image = i_image.convert(mode="RGB")75
76pixel_values = feature_extractor(77images=[i_image], return_tensors="pt"78).pixel_values79pixel_values = pixel_values.to(device)80
81output_ids = model.generate(pixel_values, **gen_kwargs)82
83preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)84return preds[0].strip()85
86def load_data(87self,88file: Path,89extra_info: Optional[Dict] = None,90) -> List[Document]:91"""Parse file."""92from pptx import Presentation93
94presentation = Presentation(file)95result = ""96for i, slide in enumerate(presentation.slides):97result += f"\n\nSlide #{i}: \n"98for shape in slide.shapes:99if hasattr(shape, "image"):100image = shape.image101# get image "file" contents102image_bytes = image.blob103# temporarily save the image to feed into model104image_filename = f"tmp_image.{image.ext}"105with open(image_filename, "wb") as f:106f.write(image_bytes)107result += f"\n Image: {self.caption_image(image_filename)}\n\n"108
109os.remove(image_filename)110if hasattr(shape, "text"):111result += f"{shape.text}\n"112
113return [Document(text=result, metadata=extra_info or {})]114