# -*- coding: utf-8 -*- """aai_pdf_zusammenfassung_Version_rohfassung v2 angepasst.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1vu_OZyE0h-zyHsxB3vgVQBJpiEZV-yE3 """ #pip install gradio import gradio as gr import numpy as np #pip install PyPDF2 #pip install pdfminer.six #pip3 install pdfplumber #pip install pdf2image #pip install Pillow #pip install --upgrade pip #pip install --upgrade transformers scipy #apt-get install poppler-utils #apt install tesseract-ocr #apt install libtesseract-dev # To read the PDF #import PyPDF2 # To analyze the PDF layout and extract text #from pdfminer.high_level import extract_pages, extract_text #from pdfminer.layout import LTTextContainer, LTChar, LTRect, LTFigure # To extract text from tables in PDF #import pdfplumber # To extract the images from the PDFs #from PIL import Image #from pdf2image import convert_from_path # To perform OCR to extract text from images #import pytesseract # To remove the additional created files #import os #from itertools import chain #import re #from transformers import pipeline #from IPython.display import Audio #import scipy # Create a function to extract text def text_extraction(element): # Extracting the text from the in-line text element line_text = element.get_text() # Find the formats of the text # Initialize the list with all the formats that appeared in the line of text line_formats = [] for text_line in element: if isinstance(text_line, LTTextContainer): # Iterating through each character in the line of text for character in text_line: if isinstance(character, LTChar): # Append the font name of the character line_formats.append(character.fontname) # Append the font size of the character line_formats.append(character.size) # Find the unique font sizes and names in the line format_per_line = list(set(line_formats)) # Return a tuple with the text in each line along with its format return (line_text, format_per_line) def read_pdf(pdf_path): # create a PDF file object pdfFileObj = open(pdf_path, 'rb') # create a PDF reader object pdfReaded = PyPDF2.PdfReader(pdfFileObj) # Create the dictionary to extract text from each image text_per_page = {} # We extract the pages from the PDF for pagenum, page in enumerate(extract_pages(pdf_path)): print("Elaborating Page_" +str(pagenum)) # Initialize the variables needed for the text extraction from the page pageObj = pdfReaded.pages[pagenum] page_text = [] line_format = [] text_from_images = [] text_from_tables = [] page_content = [] # Initialize the number of the examined tables table_num = 0 first_element= True table_extraction_flag= False # Open the pdf file pdf = pdfplumber.open(pdf_path) # Find the examined page page_tables = pdf.pages[pagenum] # Find the number of tables on the page tables = page_tables.find_tables() # Find all the elements page_elements = [(element.y1, element) for element in page._objs] # Sort all the elements as they appear in the page page_elements.sort(key=lambda a: a[0], reverse=True) # Find the elements that composed a page for i,component in enumerate(page_elements): # Extract the position of the top side of the element in the PDF pos= component[0] # Extract the element of the page layout element = component[1] # Check if the element is a text element if isinstance(element, LTTextContainer): # Check if the text appeared in a table if table_extraction_flag == False: # Use the function to extract the text and format for each text element (line_text, format_per_line) = text_extraction(element) # Append the text of each line to the page text page_text.append(line_text) # Append the format for each line containing text line_format.append(format_per_line) page_content.append(line_text) else: # Omit the text that appeared in a table pass # Check the elements for images # if isinstance(element, LTFigure): # Crop the image from the PDF # crop_image(element, pageObj) # Convert the cropped pdf to an image # convert_to_images('cropped_image.pdf') # Extract the text from the image # image_text = image_to_text('PDF_image.png') # text_from_images.append(image_text) # page_content.append(image_text) # Add a placeholder in the text and format lists # page_text.append('image') # line_format.append('image') # Check the elements for tables if isinstance(element, LTRect): # If the first rectangular element if first_element == True and (table_num+1) <= len(tables): # Find the bounding box of the table lower_side = page.bbox[3] - tables[table_num].bbox[3] upper_side = element.y1 # Extract the information from the table table = extract_table(pdf_path, pagenum, table_num) # Convert the table information in structured string format table_string = table_converter(table) # Append the table string into a list text_from_tables.append(table_string) page_content.append(table_string) # Set the flag as True to avoid the content again table_extraction_flag = True # Make it another element first_element = False # Add a placeholder in the text and format lists page_text.append('table') line_format.append('table') # Check if we already extracted the tables from the page if element.y0 >= lower_side and element.y1 <= upper_side: pass elif not isinstance(page_elements[i+1][1], LTRect): table_extraction_flag = False first_element = True table_num+=1 # Create the key of the dictionary dctkey = 'Page_'+str(pagenum) # Add the list of list as the value of the page key text_per_page[dctkey]= [page_text, line_format, text_from_images,text_from_tables, page_content] # Closing the pdf file object pdfFileObj.close() # Deleting the additional files created # os.remove('cropped_image.pdf') # os.remove('PDF_image.png') return text_per_page pdf_path = 'Article 11 Hidden Technical Debt in Machine Learning Systems' text_per_page = read_pdf(pdf_path) text_per_page.keys() page_0 = text_per_page['Page_0'] page_1 = text_per_page['Page_1'] page_2 = text_per_page['Page_2'] page_3 = text_per_page['Page_3'] page_4 = text_per_page['Page_4'] page_5 = text_per_page['Page_5'] page_6 = text_per_page['Page_6'] page_7 = text_per_page['Page_7'] page_8 = text_per_page['Page_8'] page_all = page_0 + page_1 +page_2 + page_3 +page_4 + page_5 +page_6 + page_7 + page_8 # Flatten the nested lists flattened_page_all = list(chain.from_iterable(page_all)) # Convert the flattened list to a string page_all_string = ''.join(map(str, flattened_page_all)) # Use regular expression to find the abstract text including the delimiters match = re.search(r'Abstract\n(.*?)(?=\d+\nIntroduction)', page_all_string, re.DOTALL) # Check if a match is found if match: abstract_text = match.group(1) #print(abstract_text) else: print("Abstract not found.") # Initialize summarization pipeline summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY") # Get the summary summary_result = summarizer(abstract_text, max_length=100, min_length=30, do_sample=False) # Extract the summary text from the result summary_text = summary_result[0]['summary_text'] # Replace the dot between two sentences with a space and "and" merged_summary = summary_text.replace('. ', ' and ', 1) # Find the index of "and" in the merged summary and_index = merged_summary.find('and') # Replace the first letter after "and" with its lowercase equivalent if and_index != -1 and and_index + 4 < len(merged_summary): merged_summary = merged_summary[:and_index + 4] + merged_summary[and_index + 4].lower() + merged_summary[and_index + 5:] # Print the merged summary #print(merged_summary) merged_summary_1 = "A" synthesiser = pipeline("text-to-speech", "suno/bark") speech = synthesiser(merged_summary_1, forward_params={"do_sample": True}) Audio(speech["audio"], rate=speech["sampling_rate"]) def PDF_abstract(audio): pdf_path = 'Article 11 Hidden Technical Debt in Machine Learning Systems' text_per_page = read_pdf(pdf_path) text_per_page.keys() page_0 = text_per_page['Page_0'] page_1 = text_per_page['Page_1'] page_2 = text_per_page['Page_2'] page_3 = text_per_page['Page_3'] page_4 = text_per_page['Page_4'] page_5 = text_per_page['Page_5'] page_6 = text_per_page['Page_6'] page_7 = text_per_page['Page_7'] page_8 = text_per_page['Page_8'] page_all = page_0 + page_1 +page_2 + page_3 +page_4 + page_5 +page_6 + page_7 + page_8 # Flatten the nested lists flattened_page_all = list(chain.from_iterable(page_all)) # Convert the flattened list to a string page_all_string = ''.join(map(str, flattened_page_all)) # Use regular expression to find the abstract text including the delimiters match = re.search(r'Abstract\n(.*?)(?=\d+\nIntroduction)', page_all_string, re.DOTALL) # Check if a match is found if match: abstract_text = match.group(1) #print(abstract_text) else: print("Abstract not found.") # Initialize summarization pipeline summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY") # Get the summary summary_result = summarizer(abstract_text, max_length=100, min_length=30, do_sample=False) # Extract the summary text from the result summary_text = summary_result[0]['summary_text'] # Replace the dot between two sentences with a space and "and" merged_summary = summary_text.replace('. ', ' and ', 1) # Find the index of "and" in the merged summary and_index = merged_summary.find('and') # Replace the first letter after "and" with its lowercase equivalent if and_index != -1 and and_index + 4 < len(merged_summary): merged_summary = merged_summary[:and_index + 4] + merged_summary[and_index + 4].lower() + merged_summary[and_index + 5:] # Print the merged summary #print(merged_summary) merged_summary_1 = "A" synthesiser = pipeline("text-to-speech", "suno/bark") speech = synthesiser(merged_summary_1, forward_params={"do_sample": True}) #Audio(speech["audio"], rate=speech["sampling_rate"]) # Convert audio bytes to playable format audio_bytes = BytesIO(speech["audio"]) audio = Audio(audio_bytes, rate=speech["sampling_rate"]) return PDF_abstract() #({"sampling_rate": sr, "raw": y})["text"] demo = gr.Interface( PDF_abstract, inputs="file", outputs="audio", live=True ) demo.launch()