PaddlePaddle / PaddleOCR

Awesome multilingual OCR toolkits based on PaddlePaddle (practical ultra lightweight OCR system, support 80+ languages recognition, provide data annotation and synthesis tools, support training and deployment among server, mobile, embedded and IoT devices)
Apache License 2.0
41.13k stars 7.54k forks source link

请教一下版面分析+OCR服务部署 #10567

Closed lycfight closed 1 month ago

lycfight commented 11 months ago

``请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem

import logging import numpy as np import copy import cv2 import base64

from paddle_serving_app.reader import OCRReader

from ocr_reader import OCRReader, DetResizeForTest, ArgsParser from paddle_serving_app.reader import Sequential, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes

_LOGGER = logging.getLogger()

class DetOp(Op): def init_op(self): self.det_preprocess = Sequential([ DetResizeForTest(), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.filter_func = FilterBoxes(10, 10) self.post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.6, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 })

def preprocess(self, input_dicts, data_id, log_id):
    (_, input_dict), = input_dicts.items()
    data = base64.b64decode(input_dict["image"].encode('utf8'))
    self.raw_im = data
    data = np.fromstring(data, np.uint8)
    # Note: class variables(self.var) can only be used in process op mode
    im = cv2.imdecode(data, cv2.IMREAD_COLOR)
    self.ori_h, self.ori_w, _ = im.shape
    det_img = self.det_preprocess(im)
    _, self.new_h, self.new_w = det_img.shape
    return {"x": det_img[np.newaxis, :].copy()}, False, None, ""

def postprocess(self, input_dicts, fetch_dict, data_id, log_id):
    det_out = list(fetch_dict.values())[0]
    ratio_list = [
        float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
    ]
    dt_boxes_list = self.post_func(det_out, [ratio_list])
    dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
    out_dict = {"dt_boxes": dt_boxes, "image": self.raw_im}
    return out_dict, None, ""

class RecOp(Op): def init_op(self): self.ocr_reader = OCRReader( char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt")

    self.get_rotate_crop_image = GetRotateCropImage()
    self.sorted_boxes = SortedBoxes()

def preprocess(self, input_dicts, data_id, log_id):
    (_, input_dict), = input_dicts.items()
    raw_im = input_dict["image"]
    data = np.frombuffer(raw_im, np.uint8)
    im = cv2.imdecode(data, cv2.IMREAD_COLOR)
    self.dt_list = input_dict["dt_boxes"]
    self.dt_list = self.sorted_boxes(self.dt_list)
    # deepcopy to save origin dt_boxes
    dt_boxes = copy.deepcopy(self.dt_list)
    feed_list = []
    img_list = []
    max_wh_ratio = 320 / 48.
    ## Many mini-batchs, the type of feed_data is list.
    max_batch_size = 6  # len(dt_boxes)

    # If max_batch_size is 0, skipping predict stage
    if max_batch_size == 0:
        return {}, True, None, ""
    boxes_size = len(dt_boxes)
    batch_size = boxes_size // max_batch_size
    rem = boxes_size % max_batch_size
    for bt_idx in range(0, batch_size + 1):
        imgs = None
        boxes_num_in_one_batch = 0
        if bt_idx == batch_size:
            if rem == 0:
                continue
            else:
                boxes_num_in_one_batch = rem
        elif bt_idx < batch_size:
            boxes_num_in_one_batch = max_batch_size
        else:
            _LOGGER.error("batch_size error, bt_idx={}, batch_size={}".
                          format(bt_idx, batch_size))
            break

        start = bt_idx * max_batch_size
        end = start + boxes_num_in_one_batch
        img_list = []
        for box_idx in range(start, end):
            boximg = self.get_rotate_crop_image(im, dt_boxes[box_idx])
            img_list.append(boximg)
            h, w = boximg.shape[0:2]
            wh_ratio = w * 1.0 / h
            max_wh_ratio = max(max_wh_ratio, wh_ratio)
        _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
                                                  max_wh_ratio).shape

        imgs = np.zeros((boxes_num_in_one_batch, 3, w, h)).astype('float32')
        for id, img in enumerate(img_list):
            norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
            imgs[id] = norm_img
        feed = {"x": imgs.copy()}
        feed_list.append(feed)
    return feed_list, False, None, ""

def postprocess(self, input_dicts, fetch_data, data_id, log_id):
    rec_list = []
    dt_num = len(self.dt_list)
    if isinstance(fetch_data, dict):
        if len(fetch_data) > 0:
            rec_batch_res = self.ocr_reader.postprocess(
                fetch_data, with_score=True)
            for res in rec_batch_res:
                rec_list.append(res)
    elif isinstance(fetch_data, list):
        for one_batch in fetch_data:
            one_batch_res = self.ocr_reader.postprocess(
                one_batch, with_score=True)
            for res in one_batch_res:
                rec_list.append(res)
    result_list = []
    for i in range(dt_num):
        text = rec_list[i]
        dt_box = self.dt_list[i]
        if text[1] >= 0.5:
            result_list.append([text, dt_box.tolist()])
    res = {"result": str(result_list)}
    return res, None, ""

class OcrService(WebService): def get_pipeline_response(self, read_op): det_op = DetOp(name="det", input_ops=[read_op]) rec_op = RecOp(name="rec", input_ops=[det_op]) return rec_op

uci_service = OcrService(name="ocr") FLAGS = ArgsParser().parse_args() uci_service.prepare_pipeline_config(yml_dict=FLAGS.conf_dict) uci_service.run_service()


我现有一段推理的代码,先版面分析,然后将OCR的行聚合到文本块中,该如何将其写成服务呢?

```python
import os
import time
import cv2
from paddleocr import PPStructure, PaddleOCR, draw_ocr, draw_structure_result, save_structure_res
from paddleocr.ppstructure.recovery.recovery_to_doc import sorted_layout_boxes

def is_contained(bbox1, bbox2):
    left1, top1, right1, bottom1 = bbox1
    left2, top2, right2, bottom2 = bbox2
    center_x, center_y = (left1 + right1) / 2, (top1 + bottom1) / 2
    if center_x > left2 and center_x < right2 and center_y > top2 and center_y < bottom2:
        return True
    return False

def find_contain_block(ocr_bbox, layout_blocks):
    for block in layout_blocks:
        if is_contained(ocr_bbox['bbox'], block['bbox']):
            return block
    return None

def intersection_ratio(bbox1, bbox2):
    left1, top1, right1, bottom1 = bbox1
    left2, top2, right2, bottom2 = bbox2

    left = max(left1, left2)
    top = max(top1, top2)
    right = min(right1, right2)
    bottom = min(bottom1, bottom2)

    if left < right and top < bottom:
        intersection_area = (right - left) * (bottom - top)
        bbox1_area = (right1 - left1) * (bottom1 - top1)
        return intersection_area / bbox1_area
    else:
        return 0.0

def find_max_intersection(ocr_line, layout_blocks):
    max_ratio = 0.0
    max_block = None
    for block in layout_blocks:
        ratio = intersection_ratio(ocr_line['bbox'], block['bbox'])
        if ratio > max_ratio:
            max_ratio = ratio
            max_block = block
    return max_block if max_ratio > 0.9 else None

def aggregate(ocr_results, layout_results):
    layout_blocks = [{'bbox': block['bbox'], 'res': '', 'lines': []} for block in layout_results]
    for line in ocr_results:
        block = find_contain_block(line, layout_blocks)
        if block is not None:
            block['lines'].append(line)
    for block in layout_blocks:
        block['lines'].sort(key=lambda line: line['bbox'][1])
        block['res'] = ''.join([line['res'] for line in block['lines']])
    return layout_blocks

start = time.time()

table_engine = PPStructure(show_log=False, ocr=False)
font_path = './fonts/simfang.ttf' # PaddleOCR下提供字体包

img_path = './pdf/JZDB622X-L-05说明书-A1_split/page-22.png'
img = cv2.imread(img_path)
layout_results = table_engine(img)
h, w, _ = img.shape
layout_results = sorted_layout_boxes(layout_results, w)
layout_results_filtered = [block for block in layout_results if block['type'] != 'figure' and block['type'] != 'table']

ocr = PaddleOCR(show_log=False, ocr_order_method='tb-yx')
ocr_results = ocr.ocr(img, cls=True)
ocr_results_convert = [{'bbox': line[0][0] + line[0][2], 'res': line[1][0]} for line in ocr_results[0]]

results = aggregate(ocr_results_convert, layout_results_filtered)

end = time.time()
print('Running time of all pages: %s Seconds'%(end-start))
SWHL commented 11 months ago

可以看一下FastAPI这个工具来部署成服务

lycfight commented 11 months ago

可以看一下FastAPI这个工具来部署成服务

目前还在尝试将PPStructure封装的预处理和后处理拆出来写成op,用Paddle Serving自定义pipeline,如果还是有问题的话,那只能放弃Paddle Serving,教程文档可读性太差了,完全无法实现自己的服务。

FastAPI是能直接复用上面的推理代码,放在服务的特定方法里是吧

SWHL commented 11 months ago

这个可以参考我整理的RapidStructure

UserWangZz commented 1 month ago

该issue长时间未更新,暂将此issue关闭,如有需要可重新开启。