flet-dev / flet

Flet enables developers to easily build realtime web, mobile and desktop apps in Python. No frontend experience required.
https://flet.dev
Apache License 2.0
11.41k stars 445 forks source link

`Camera` control #1281

Open monkeycc opened 1 year ago

monkeycc commented 1 year ago

Please Describe The Problem To Be Solved

Web or desktop client Directly call the camera

(Optional): Suggest A Solution

camera https://pub.dev/packages/camera

A Flutter plugin for iOS, Android and Web allowing access to the device cameras.

Display live camera preview in a widget. Snapshots can be captured and saved to a file. Record video. Add access to the image stream from Dart.

Working with camera stream: https://www.youtube.com/watch?v=oqfCCWhNe7s https://pub.dev/documentation/camera/latest/camera/CameraController/startImageStream.html

ndonkoHenri commented 1 year ago

Is there perhaps any desktop plugin too?

monkeycc commented 1 year ago

也许也有桌面插件吗?

Opencv ? cv2.VideoCapture(0)

goyalyashpal commented 1 year ago
Python-Zhao commented 11 months ago

Have been looking forward to the camera control, do not know when to use.

Python-Zhao commented 11 months ago

Recently, I used Flet to do an information management project, and I wanted to use the mobile phone to open the camera and scan the QR code to obtain information, but it was not possible.

goncaze commented 10 months ago

@Python-Zhao, Hello! Still can't access the camera on your Flet app?

Python-Zhao commented 10 months ago

@goncaze ,Yes, I have been using Python to develop projects, I want to call the camera to achieve scan code query, scan code login, photo recognition, I publish my project as PWA, I use "cap = cv2.VideoCapture(0)" to call the local camera, it can be used on Windows. However, I have been unable to access the camera when using the camera function on the phone. Is there any solution? So I'm just going to learn Dart and then redevelop with Flutter.

glira commented 8 months ago

也许也有桌面插件吗?

Opencv ? cv2.VideoCapture(0)

no work to android, just in linux

jibinjp commented 7 months ago

Why was the "platform:mobile" label removed? Even, with using cv2 from within the Fet framework there is no way to add a key interrupt to pause or stop the camera once its started, unless the app itself is refreshed which defeats the purpose

C69Xg3sg9PAvYK commented 7 months ago

@goncaze ,Yes, I have been using Python to develop projects, I want to call the camera to achieve scan code query, scan code login, photo recognition, I publish my project as PWA, I use "cap = cv2.VideoCapture(0)" to call the local camera, it can be used on Windows. However, I have been unable to access the camera when using the camera function on the phone. Is there any solution? So I'm just going to learn Dart and then redevelop with Flutter.

I'm in the same boat as you are. Hoping this would be introduced soon

ndonkoHenri commented 7 months ago

Why was the "platform:mobile" label removed?

Because the Camera control to be implemented is not just mobile specific. It's for all the platforms...

metrolunar commented 7 months ago

My use case is mobile phone specific.

Just need to press a button to take a picture and store it locally for upload to a server for more processing.

Abduguru commented 7 months ago

Please can I create application that is not web app with flet and run it on android

HamaBTW commented 3 months ago

@ndonkoHenri @FeodorFitsner @monkeycc

i made a camera class for my flet app idk if it can help it relies on the open cv2

` from pyzbar import pyzbar from PIL import Image import flet as ft import threading import base64 import cv2 import io

class CameraMaster(): def init(self, page: ft.Page, camera_img_control : ft.Image | ft.Container, is_a_qr_reader : bool = False, qr_reader_callback : callable = None): self.page = page self.camera_img_control = camera_img_control self.is_a_qr_reader = is_a_qr_reader self.qr_reader_callback = qr_reader_callback self.crypto_handler = CryptoHandlerMaster(page) self.camera_img_control.fit = ft.ImageFit.FILL

    # Find the available camera
    self.camera_index = self.find_available_camera()
    if self.camera_index is None:
        print("No available camera found.")
        return

    # Initialize the camera
    self.cap = cv2.VideoCapture(self.camera_index)

    # Create the image control
    # img = ft.Image(src_base64="", width=640, height=480)

    # Event to control the update loop
    self.stop_event = threading.Event()

    # Start the periodic image update in a separate thread
    self.update_thread = threading.Thread(target=self.update_image)
    self.update_thread.start()

    # Attach the cleanup function to the page close event
    self.page.on_close = self.on_close

    self.page.update()

def is_mounted(self) -> bool:
    if self.page is None:
        return False
    for view in self.page.views:
        for control in view.controls:
            if self == control.content:
                return True    
    return False

def find_available_camera(self, max_index=10):
    for index in range(max_index):
        cap = cv2.VideoCapture(index)
        if cap.isOpened():
            cap.release()
            return index
    return None

def update_image(self):
    while not self.stop_event.is_set():
        self.ret, self.frame = self.cap.read()
        if self.ret:
            if self.is_a_qr_reader == True:
                # Get frame dimensions
                height, width, _ = self.frame.shape

                # Define the rectangle dimensions (centered)
                rect_width, rect_height = 400, 350
                top_left = (width // 2 - rect_width // 2, height // 2 - rect_height // 2)
                bottom_right = (width // 2 + rect_width // 2, height // 2 + rect_height // 2)
                color = (255, 255, 255)  # White color in BGR
                thickness = 2
                style = 'dashed'

                # Apply dark overlay outside the rectangle
                self.frame = self.apply_overlay(self.frame, top_left, bottom_right)

                # Draw the dashed rectangle on top of the frame
                self.drawrect(self.frame, top_left, bottom_right, color, thickness, style)

                # Scan QR codes within the rectangle
                self.frame, valid_objects = self.scan_qr_codes_within_rect(self.frame, top_left, bottom_right)
                if valid_objects and self.qr_reader_callback:
                    for obj in valid_objects:
                        if self.qr_reader_callback != None and obj['is_valid'] == True:
                            self.qr_reader_callback(obj['data'])

            # Convert the frame to RGB
            self.frame_rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
            # Convert the frame to PIL Image
            self.pil_im = Image.fromarray(self.frame_rgb)
            # Save the image to a bytes buffer
            self.buf = io.BytesIO()
            self.pil_im.save(self.buf, format='PNG')
            # Encode the bytes buffer to base64
            self.img_str = base64.b64encode(self.buf.getvalue()).decode()
            # Update the Flet image control
            self.camera_img_control.src_base64 = self.img_str
            # self.camera_img_control.image_src_base64 = self.img_str
            try:
                self.page.update()
            except Exception:
                break

        # Sleep for 100 ms
        self.stop_event.wait(0.1)

def on_close(self, e):
    # print("Camera closed")
    self.stop_event.set()
    self.cap.release()  # Release the camera
    self.update_thread.join()

def set_camera_img_control(self, camera_img_control : ft.Image | ft.Container):
    self.camera_img_control = camera_img_control

def drawline(self, img, pt1, pt2, color, thickness=1, style='dashed', dash_length=10, gap_length=10):
    dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
    num_dashes = int(dist / (dash_length + gap_length))

    for i in range(num_dashes):
        start = (
            int(pt1[0] + (pt2[0] - pt1[0]) * (i * (dash_length + gap_length)) / dist),
            int(pt1[1] + (pt2[1] - pt1[1]) * (i * (dash_length + gap_length)) / dist)
        )
        end = (
            int(pt1[0] + (pt2[0] - pt1[0]) * ((i * (dash_length + gap_length)) + dash_length) / dist),
            int(pt1[1] + (pt2[1] - pt1[1]) * ((i * (dash_length + gap_length)) + dash_length) / dist)
        )

        cv2.line(img, start, end, color, thickness)

def drawpoly(self, img, pts, color, thickness=1, style='dashed'):
    s = pts[0]
    e = pts[0]
    pts.append(pts.pop(0))
    for p in pts:
        s = e
        e = p
        self.drawline(img, s, e, color, thickness, style)

def drawrect(self, img, pt1, pt2, color, thickness=1, style='dashed'):
    # Draw the dashed rectangle on the frame
    pts = [pt1, (pt2[0], pt1[1]), pt2, (pt1[0], pt2[1])]
    self.drawpoly(img, pts, color, thickness, style)

def apply_overlay(self, img, rect_pt1, rect_pt2):
    # Create a dark overlay
    overlay = img.copy()
    overlay[:] = (0, 0, 0)  # Dark color
    alpha = 0.4  # Overlay transparency

    # Draw a white rectangle to cover the area inside the dashed rectangle
    cv2.rectangle(overlay, rect_pt1, rect_pt2, (255, 255, 255), cv2.FILLED)

    # Blend the overlay with the original image
    img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
    return img

def scan_qr_codes_within_rect(self, frame, rect_pt1, rect_pt2):
    decoded_objects = pyzbar.decode(frame)
    valid_objects = []
    list_of_valid_objects_to_return = []
    for obj in decoded_objects:

        # qr_data = obj.data.decode("utf-8")
        # qr_type = obj.type
        # text = f'{qr_data} ({qr_type})'
        is_valid, qr_data = self.crypto_handler.validate_data(obj.data.decode("utf-8"))
        color = (0, 255, 0) if is_valid else (0, 0, 255)
        text = f'({qr_data['name']})' if is_valid else f'(Invalid)'

        object_to_return = {
            'data': qr_data,
            'is_valid': is_valid,
        }
        list_of_valid_objects_to_return.append(object_to_return)

        # Draw the bounding box and text on the frame
        points = obj.polygon
        if len(points) > 4:
            hull = cv2.convexHull(points)
            points = hull
        points = list(points)
        for j in range(len(points)):
            cv2.line(frame, tuple(points[j]), tuple(points[(j + 1) % len(points)]), color, 3)

        # Draw the text on the frame
        cv2.putText(frame, text, (points[0].x, points[0].y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

        center_x = (obj.rect.left + obj.rect.left + obj.rect.width) // 2
        center_y = (obj.rect.top + obj.rect.top + obj.rect.height) // 2
        if rect_pt1[0] <= center_x <= rect_pt2[0] and rect_pt1[1] <= center_y <= rect_pt2[1]:
            valid_objects.append(obj)

    #return frame, valid_objects
    return frame, list_of_valid_objects_to_return

""" def main(page: ft.Page):

# Create the image control
img = ft.Image(src_base64="", src="test.test", width=640, height=480)
page.add(img)

page.update()

camera_master = CameraMaster(page, img, is_a_qr_reader=True, qr_reader_callback=lambda data: print(f"QR Code Data: {data}"))

ft.app(target=main) """

`