Closed Rahul6903 closed 3 months ago
You've chosen to report an unexpected problem or bug. Unless you already know the root cause of it, please include details about it by filling the issue template. The following information is missing: "Instructions To Reproduce the Issue and Full Logs"; "Your Environment";
Requested information was not provided in 7 days, so we're closing this issue.
Please open new issue if information becomes available. Otherwise, use github discussions for free-form discussions.
import sys sys.path.append("unilm") sys.path.append("detectron2") import cv2 import numpy as np import matplotlib.pyplot as plt from unilm.dit.object_detection.ditod import add_vit_config import torch
from detectron2.config import CfgNode as CN from detectron2.config import get_cfg from detectron2.utils.visualizer import ColorMode, Visualizer from detectron2.data import MetadataCatalog from detectron2.engine import DefaultPredictor
import gradio as gr
Step 1: instantiate config
cfg = get_cfg() add_vit_config(cfg) cfg.merge_from_file("cascade_dit_base.yml")
Step 2: add model weights URL to config
cfg.MODEL.WEIGHTS = "publaynet_dit-b_cascade.pth"
Step 3: set device
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
Step 4: define model
predictor = DefaultPredictor(cfg)
import json import pdfplumber
def extract_text_from_pdf_with_coordinates(pdf_path, page_number, x_min, y_min, x_max, y_max): with pdfplumber.open(pdf_path) as pdf: page = pdf.pages[page_number - 1] # Adjust for 0-based indexing content = "" page_width = page.width page_height = page.height
pdf_path = "/Users/infx012941/Desktop/AI/pdf/AIM_chest-imaging.pdf" page_number = 6
def analyze_image(img): md = MetadataCatalog.get(cfg.DATASETS.TEST[0]) if cfg.DATASETS.TEST[0] == 'icdar2019_test': md.set(thing_classes=["table"]) else: md.set(thing_classes=["text", "title", "list", "table", "figure"])
title = "Interactive demo: Document Layout Analysis with DiT"
description = "Demo for Microsoft's DiT, the Document Image Transformer for state-of-the-art document understanding tasks. This particular model is fine-tuned on PubLayNet, a large dataset for document layout analysis (read more at the links below). To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
article = "
Paper | Github Repo
| HuggingFace doc"examples =[['publaynet_example.jpeg']]
css = ".output-image, .input-image, .image-preview {height: 600px !important}"
iface = gr.Interface(fn=analyze_image, inputs=gr.Image(type="numpy", label="document image"), outputs=gr.Image(type="numpy", label="annotated document"), title=title, css=css) obj=iface.launch(debug=True)
it's possible to adjust a model's confidence level because i unable to obtain the complete labeled coordinates along with the corresponding text