nischi / MMM-Face-Reco-DNN

Face recognition with opencv and deep neural network
MIT License
91 stars 46 forks source link

i want show my code in MagicMirror #95

Closed kimyunho-cpu closed 3 years ago

kimyunho-cpu commented 3 years ago

i have open cv phython code, and it is working very well without MagicMirror, but i want work this open cv in MagicMirror, what i have to do? replace ? change? fix? i have two code abuot basic MaigcMirror code, this open cv code. i want to put this code in MagicMirror and use this in MagicMirror please teach me in details.

import cv2 import numpy as np import sys from PyQt5 import QtCore,QtGui from PyQt5.QtWidgets import QPushButton,QWidget,QGridLayout,QLabel,QApplication from PyQt5.QtCore import QThread import os import time

casc='haarcascade_frontalface_default.xml' eye_cas='haarcascade_eye.xml'

faceCascade=cv2.CascadeClassifier(casc) eyeCascade=cv2.CascadeClassifier(eye_cas) cap=cv2.VideoCapture(0) ret=cap.set(3,240) ret=cap.set(4,180) images=[] for i in range(1,19): images.append('./images/'+str(i)+'.png') print(images) num=0 class MyApp(QWidget):

def __init__(self):
    super().__init__()
    self.initUI()
def initUI(self):
    global images
    grid=QGridLayout()
    self.setLayout(grid)
    self.btn1=QPushButton('1')
    self.btn2=QPushButton('2')
    self.btn3=QPushButton('3')
    self.btn4=QPushButton('4')
    self.btn5=QPushButton('5')
    self.btn6=QPushButton('6')
    self.btn7=QPushButton('7')
    self.btn8=QPushButton('8')
    self.btn9=QPushButton('9')
    self.btn10=QPushButton('10')
    self.btn11=QPushButton('11')
    self.btn12=QPushButton('12')
    self.btn13=QPushButton('13')
    self.btn14=QPushButton('14')
    self.btn15=QPushButton('15')
    self.btn16=QPushButton('16')
    self.btn17=QPushButton('17')
    self.btn18=QPushButton('18')
    self.btns=[self.btn1]
    self.btns.append(self.btn2)
    self.btns.append(self.btn3)
    self.btns.append(self.btn4)
    self.btns.append(self.btn5)
    self.btns.append(self.btn6)
    self.btns.append(self.btn7)
    self.btns.append(self.btn8)
    self.btns.append(self.btn9)
    self.btns.append(self.btn10)
    self.btns.append(self.btn11)
    self.btns.append(self.btn12)
    self.btns.append(self.btn13)
    self.btns.append(self.btn14)
    self.btns.append(self.btn15)
    self.btns.append(self.btn16)
    self.btns.append(self.btn17)
    self.btns.append(self.btn18)
    self.btns[0].clicked.connect(self.cbtn1)
    self.btns[1].clicked.connect(self.cbtn2)
    self.btns[2].clicked.connect(self.cbtn3)
    self.btns[3].clicked.connect(self.cbtn4)
    self.btns[4].clicked.connect(self.cbtn5)
    self.btns[5].clicked.connect(self.cbtn6)
    self.btns[6].clicked.connect(self.cbtn7)
    self.btns[7].clicked.connect(self.cbtn8)
    self.btns[8].clicked.connect(self.cbtn9)
    self.btns[9].clicked.connect(self.cbtn10)
    self.btns[10].clicked.connect(self.cbtn11)
    self.btns[11].clicked.connect(self.cbtn12)
    self.btns[12].clicked.connect(self.cbtn13)
    self.btns[13].clicked.connect(self.cbtn14)
    self.btns[14].clicked.connect(self.cbtn15)
    self.btns[15].clicked.connect(self.cbtn16)
    self.btns[16].clicked.connect(self.cbtn17)
    self.btns[17].clicked.connect(self.cbtn18)
    for i in range(18):
        self.btns[i].resize(24,24)
        self.btns[i].setIcon(QtGui.QIcon(images[i]))
        self.btns[i].setIconSize(QtCore.QSize(24,24))
    grid.addWidget(self.btns[0],0,0,1,1)
    grid.addWidget(self.btns[1],0,1,1,1)
    grid.addWidget(self.btns[2],0,2,1,1)
    grid.addWidget(self.btns[3],0,3,1,1)
    grid.addWidget(self.btns[4],0,4,1,1)
    grid.addWidget(self.btns[5],1,0,1,1)
    grid.addWidget(self.btns[6],1,1,1,1)
    grid.addWidget(self.btns[7],1,2,1,1)
    grid.addWidget(self.btns[8],1,3,1,1)
    grid.addWidget(self.btns[9],1,4,1,1)
    grid.addWidget(self.btns[10],2,0,1,1)
    grid.addWidget(self.btns[11],2,1,1,1)
    grid.addWidget(self.btns[12],2,2,1,1)
    grid.addWidget(self.btns[13],2,3,1,1)
    grid.addWidget(self.btns[14],2,4,1,1)
    grid.addWidget(self.btns[15],3,0,1,1)
    grid.addWidget(self.btns[16],3,1,1,1)
    grid.addWidget(self.btns[17],3,2,1,1)
    self.setWindowTitle('set image')
    self.show()
def cbtn1(self):
    global num
    num=1
    self.close()
def cbtn2(self):
    global num
    num=2
    self.close()
def cbtn3(self):
    global num
    num=3
    self.close()
def cbtn4(self):
    global num
    num=4
    self.close()
def cbtn5(self):
    global num
    num=5
    self.close()
def cbtn6(self):
    global num
    num=6
    self.close()
def cbtn7(self):
    global num
    num=7
    self.close()
def cbtn8(self):
    global num
    num=8
    self.close()
def cbtn9(self):
    global num
    num=9
    self.close()
def cbtn10(self):
    global num
    num=10
    self.close()
def cbtn11(self):
    global num
    num=11
    self.close()
def cbtn12(self):
    global num
    num=12
    self.close()
def cbtn13(self):
    global num
    num=13
    self.close()
def cbtn14(self):
    global num
    num=14
    self.close()
def cbtn15(self):
    global num
    num=15
    self.close()
def cbtn16(self):
    global num
    num=16
    self.close()
def cbtn17(self):
    global num
    num=17
    self.close()
def cbtn18(self):
    global num
    num=18
    self.close()

app=QApplication(sys.argv) ex=MyApp() app.exec_() print(num) num=str(num) while True: ret,frame=cap.read() frame=cv2.flip(frame,1) gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces=faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(100,100), ) sz=0

a,b,c,d=0,0,0,0
x,y,w,h=0,0,0,0
count=0
for (x,y,w,h) in faces:
    if w>sz:
        sz=w
        a,b,c,d=(x,y,w,h)
        count=count+1
if count>0:
    if True:#눈이 발견된경우
        #cv2.imshow('video',frame)

        hair_url='./images/'+num+'.png'
        hair=cv2.imread(hair_url,cv2.IMREAD_UNCHANGED)
        height,width,channel=hair.shape

        origin_ratio=int(w/h)
        height=origin_ratio*width
        ratio=int(width/w)
        height=int(height/ratio)
        width=int(width/ratio)
        if w>0 and h>0:
            if int(num)>15:# 선글래스 합성#
                print('3channel')
                height=int(height/3)
                y=y+int(height/2)
                x=x-int(width/12)

                hair=cv2.resize(hair,(width,height),interpolation=cv2.INTER_AREA)
                cut_img=frame[y:y+height,x:x+width]
                gray_hair=cv2.cvtColor(hair,cv2.COLOR_BGR2GRAY)

                gray_hair=cv2.bitwise_not(gray_hair)
                ret,mask=cv2.threshold(gray_hair,100,255,cv2.THRESH_BINARY)

                #cv2.rectangle(frame,(x,y),(x+width,y+height),(0,2,255),2)
                mask_inv=cv2.bitwise_not(mask)
                fg=cv2.bitwise_and(hair,hair,mask=mask)
                bg=cv2.bitwise_and(cut_img,cut_img,mask=mask_inv)
                result=cv2.add(fg,bg)
                frame[y:y+height,x:x+width]=result
                cv2.imshow('result',frame)
            else:# 머리합성
                r,g,b,a=cv2.split(hair)
                r=cv2.bitwise_and(r,r,mask=a)
                g=cv2.bitwise_and(g,g,mask=a)
                b=cv2.bitwise_and(b,b,mask=a)
                hair=cv2.merge((r,g,b))
                height=height*2
                width=width*3
                if x>int(width/2) and y>int(height/2):
                    x=x-w
                    y=y-int(h/2)
                    #cv2.rectangle(frame,(x,y),(x+width,y+height),(0,2,255),2)
                    hair=cv2.resize(hair,(width,height),interpolation=cv2.INTER_AREA)

                    cut_img=frame[y:y+height,x:x+width]
                    gray_hair=cv2.cvtColor(hair,cv2.COLOR_BGR2GRAY)

                    ret,mask=cv2.threshold(gray_hair,10,255,cv2.THRESH_BINARY)
                    mask_inv=cv2.bitwise_not(mask)
                    fg=cv2.bitwise_and(hair,hair,mask=mask)
                    bg=cv2.bitwise_and(cut_img,cut_img,mask=mask_inv)
                    result=cv2.add(fg,bg)
                    frame[y:y+height,x:x+width]=result
                    cv2.imshow('result',frame)
                else:
                    print('step back too close')

if cv2.waitKey(1)&0xff==ord('q'):
    break

cap.release() cv2.destroyAllWindows()

import cv2 import numpy as np import sys casc='haarcascade_frontalface_default.xml' eye_cas='haarcascade_eye.xml'

faceCascade=cv2.CascadeClassifier(casc) eyeCascade=cv2.CascadeClassifier(eye_cas) cap=cv2.VideoCapture(0) ret=cap.set(3,240) ret=cap.set(4,180) while True: num=input('input numbers 1~15= hair, 16~18 = glass') if int(num)>0 and int(num)<19: break while True: ret,frame=cap.read() frame=cv2.flip(frame,1) gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces=faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(100,100), ) sz=0

a,b,c,d=0,0,0,0
x,y,w,h=0,0,0,0
count=0
for (x,y,w,h) in faces:
    if w>sz:
        sz=w
        a,b,c,d=(x,y,w,h)
        count=count+1
if count>0:
    (x,y,w,h)=a,b,c,d
    roi_gray=gray[y:y+h,x:x+w]
    roi_color=gray[y:y+h,x:x+w]
    eye=eyeCascade.detectMultiScale(roi_gray)
    cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
    sz=0
    count=0
    eyes=[]
    for ex,ey,ew,eh in eye:
        if ey<int(h/2) and ew>sz:
            sz=ew
            a,b,c,d=(ex,ey,ew,eh)
            count=count+1
            eyes.append((a,b,c,d))
    eyes.sort(reverse=True)
    if count>1:#눈이 발견된경우
        xi,yi,wi,hi=eyes[0]

        xxi,yyi,wwi,hhi=eyes[1]
        lx=0
        ly=0

        rx=0
        ry=0

        if xi<int(w/2):# xi : left   
            lx=x+xi+int(wi/2)
            ly=y+yi+int(hi/2)
            cv2.line(frame,(lx,ly),(lx,ly),(255,255,0),8,cv2.LINE_AA)
        else :
            rx=x+xi+int(wi/2)
            ry=y+yi+int(hi/2)
            cv2.line(frame,(rx,ry),(rx,ry),(255,0,0),8,cv2.LINE_AA)
        cv2.imshow('video',frame)

        hair_url='./images/'+num+'.png'
        hair=cv2.imread(hair_url,cv2.IMREAD_UNCHANGED)
        height,width,channel=hair.shape

        origin_ratio=int(w/h)
        height=origin_ratio*width
        ratio=int(width/w)
        height=int(height/ratio)
        width=int(width/ratio)
        if w>0 and h>0:
            if int(num)>15:# 선글래스 합성#
                print('3channel')
                height=int(height/3)
                y=y+int(height/2)
                x=x-int(width/12)

                hair=cv2.resize(hair,(width,height),interpolation=cv2.INTER_AREA)
                cut_img=frame[y:y+height,x:x+width]
                gray_hair=cv2.cvtColor(hair,cv2.COLOR_BGR2GRAY)

                gray_hair=cv2.bitwise_not(gray_hair)
                ret,mask=cv2.threshold(gray_hair,100,255,cv2.THRESH_BINARY)

                mask_inv=cv2.bitwise_not(mask)
                fg=cv2.bitwise_and(hair,hair,mask=mask)
                bg=cv2.bitwise_and(cut_img,cut_img,mask=mask_inv)
                result=cv2.add(fg,bg)
                frame[y:y+height,x:x+width]=result
                cv2.imshow('result',frame)
            else:# 머리합성
                r,g,b,a=cv2.split(hair)
                r=cv2.bitwise_and(r,r,mask=a)
                g=cv2.bitwise_and(g,g,mask=a)
                b=cv2.bitwise_and(b,b,mask=a)
                hair=cv2.merge((r,g,b))
                height=height*2
                width=width*3
                if x>int(width/2) and y>int(height/2):
                    x=x-w
                    y=y-int(h/2)
                    cv2.rectangle(frame,(x,y),(x+width,y+height),(0,2,255),2)
                    hair=cv2.resize(hair,(width,height),interpolation=cv2.INTER_AREA)

                    cut_img=frame[y:y+height,x:x+width]
                    gray_hair=cv2.cvtColor(hair,cv2.COLOR_BGR2GRAY)

                    ret,mask=cv2.threshold(gray_hair,10,255,cv2.THRESH_BINARY)
                    mask_inv=cv2.bitwise_not(mask)
                    fg=cv2.bitwise_and(hair,hair,mask=mask)
                    bg=cv2.bitwise_and(cut_img,cut_img,mask=mask_inv)
                    result=cv2.add(fg,bg)
                    frame[y:y+height,x:x+width]=result
                    cv2.imshow('result',frame)
                else:
                    print('step back too close')

if cv2.waitKey(1)&0xff==ord('q'):
    break

cap.release() cv2.destroyAllWindows()