PeterWang512 / FALdetector

Code for the paper: Detecting Photoshopped Faces by Scripting Photoshop
https://peterwang512.github.io/FALdetector/
Apache License 2.0
1.57k stars 233 forks source link

I made a google collab of this #31

Open konsumer opened 3 years ago

konsumer commented 3 years ago

I don't know if it's helpful to anyone, but I made a google-collab notebook, so you can play around with it very easily, for free:

https://colab.research.google.com/drive/1AQ0XSKWjzJBhGXXJ0XrA4DckFdv6Ul5N?usp=sharing#scrollTo=2eYISfxy-8qe

Works great!

konsumer commented 3 years ago

It's much better to use your own Google Drive folder to hold code & output. It's faster (after initial download) and you can browse the files on Drive.

Here is some fancy setup-code that will mount your google drive and collect the code & models, and wrap everything up so it's easy to work with:

# this is where you are keeping things, on your google-drive

# path to code
PATH = "MyDrive/FALdetector"

# where to store output files
OUT = "MyDrive/FALdetector/out"

# where to look for input files (if applicable)
IN = "MyDrive/FALdetector/in"

from google.colab import files, drive
from os import path, system, remove, chdir, getcwd, mkdir
import subprocess
from shutil import unpack_archive, rmtree, move
import requests
from urllib.parse import urlparse
import site
from IPython.display import display, Image as IM

def download(url, fname=None):
  if fname == None:
    a = urlparse(url)
    fname = path.basename(a.path)
  print(f"Downloading {url}")
  # clean up previous download
  try:
    remove(fname)
  except:
    pass
  with open(fname, "wb") as handle:
        response = requests.get(url, stream=True)
        if not response.ok:
            return print(response)
        for block in response.iter_content(1024):
            if not block:
                break
            handle.write(block)
  return fname

# show an array of images
def show_images(images):
   for image in images:
      display(IM(image))

print("Setting things up...")

# you will need to auth to your own google drive:
drive.mount("/content/drive")
full_path = path.join("/content/drive", PATH)
full_out = path.join("/content/drive", OUT)
full_in = path.join("/content/drive", IN)

# go to root-dir
chdir("/content")

# grab the repo
if (not path.exists(full_path)):
  f = download("https://github.com/PeterWang512/FALdetector/archive/refs/heads/master.zip")
  unpack_archive(f)
  move("FALdetector-master", full_path)
  remove(f)

# download models
chdir(path.join(full_path, "weights"))
if not path.exists("global.pth"):
  download("https://www.dropbox.com/s/rb8zpvrbxbbutxc/global.pth?dl=1")
if not path.exists("local.pth"):
  download("https://www.dropbox.com/s/pby9dhpr6cqziyl/local.pth?dl=1")

# CLASSIFY

# they are structured a bit funny for import, so I have to do some extra stuff
chdir(full_path)
import global_classifier

import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
from networks.drn_seg import DRNSeg
from utils.tools import *
from utils.visualize import *

def global_classify(image, no_crop=False):
  model = global_classifier.load_classifier(path.join(full_path, "weights", "global.pth"), 0)
  return global_classifier.classify_fake(model, image, no_crop)

def local_classify(fname, crop=False, dest_folder=full_out):
  img_path = fname
  model_path = path.join(full_path, "weights", "local.pth")
  no_crop = not crop
  device = 'cuda:{}'.format(0)
  model = DRNSeg(2)
  state_dict = torch.load(model_path, map_location=device)
  model.load_state_dict(state_dict['model'])
  model.to(device)
  model.eval()
  tf = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  ])
  im_w, im_h = Image.open(img_path).size
  if no_crop:
      face = Image.open(img_path).convert('RGB')
  else:
      faces = face_detection(img_path, verbose=False)
      if len(faces) == 0:
          print("no face detected by dlib, exiting")
          sys.exit()
      face, box = faces[0]
  face = resize_shorter_side(face, 400)[0]
  face_tens = tf(face).to(device)

  # Warping field prediction
  with torch.no_grad():
      flow = model(face_tens.unsqueeze(0))[0].cpu().numpy()
      flow = np.transpose(flow, (1, 2, 0))
      h, w, _ = flow.shape

  # Undoing the warps
  modified = face.resize((w, h), Image.BICUBIC)
  modified_np = np.asarray(modified)
  reverse_np = warp(modified_np, flow)
  reverse = Image.fromarray(reverse_np)

  finname = path.splitext(path.basename(fname))[0]
  finput = os.path.join(dest_folder, f'{finname}-cropped_input.jpg')
  fwarped = os.path.join(dest_folder, f'{finname}-warped.jpg')
  fheat = os.path.join(dest_folder, f'{finname}-heatmap.jpg')

  flow_magn = np.sqrt(flow[:, :, 0]**2 + flow[:, :, 1]**2)

  # Saving the results
  modified.save(finput, quality=90)
  reverse.save(fwarped, quality=90)
  save_heatmap_cv(modified_np, flow_magn, fheat)
  return (finput, fwarped, fheat)

Put this at the bottom of the same code-block to ask to upload files:

chdir(full_out)
print("Please upload the image(s) you'd like to analyze: ")
images = files.upload()

# output analysis
for image in images:
  print(f"Global: {image}")
  full_image = path.join(full_out, image)
  prob = global_classify(full_image)
  print("Probibility being modified by Photoshop FAL: {:.2f}%".format(prob*100))
  print(f"Local: {image}")
  show_images(local_classify(full_image))

Put this at the bottom instead, to download a list of URLs:

# INPUT URLS
chdir(full_out)

# put yours in here
urls = [
  "https://organicthemes.com/demo/profile/files/2018/05/profile-pic.jpg"
]

for url in urls:
  image = download(url)
  print(f"Global: {image}")
  full_image = path.join(full_out, image)
  prob = global_classify(full_image)
  print("Probibility being modified by Photoshop FAL: {:.2f}%".format(prob*100))
  print(f"Local: {image}")
  show_images(local_classify(full_image))

Put this at the bottom instead, to find all the images in your google drive:

from glob import glob

for image in glob(path.join(full_in, '*')):
  print(f"Global: {image}")
  full_image = path.join(full_out, image)
  prob = global_classify(full_image)
  print("Probibility being modified by Photoshop FAL: {:.2f}%".format(prob*100))
  print(f"Local: {image}")
  show_images(local_classify(full_image))

IN (at the top) should be a flat-directory, with no sub-dirs, filled with images.

On all 3, you can remove the show_images() part to just output to Drive (in OUT dir, in Drive) and not show them:

local_classify(full_image)
Jean-Pierre-Polnareff commented 2 years ago

RuntimeError: Unable to open utils/dlib_face_detector/mmod_human_face_detector.dat for reading.

I am unable to run the code in google colab due to this runtime error. i am pretty sure i have the right .dat file. Hope i am not too late.

konsumer commented 2 years ago

I think you might need to use your own Google Drive folder to hold code & output (I can't reproduce.)

Jean-Pierre-Polnareff commented 2 years ago

got it! I just dint give the path to the .dat file

LinhVi commented 6 months ago

got it! I just dint give the path to the .dat file

I got the same error. How did you fix that? Thanks!

zahra12638 commented 6 months ago

收到。

LinhVi commented 6 months ago

I think you might need to use your own Google Drive folder to hold code & output (I can't reproduce.)

Already use my own ggl drive, and got this error: Capture

LinhVi commented 6 months ago

Hi, can you please help me to sort this out? TIA. https://colab.research.google.com/drive/15wFDqZRprZlTh0T6WOl6wM1-g2qCwnGD?usp=sharing