KUCognitiveInformaticsLab / zhu_project_real_and_fake

The issues when we use the PsychoPy software
0 stars 0 forks source link

Colab - Python data analysis #5

Open conchincradle opened 1 year ago

conchincradle commented 1 year ago

Since I frequently revise the code, it's better to write on here in order to use

conchincradle commented 1 year ago
import matplotlib.pyplot as plt
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(15, 9))
for i in range(9):
  plt.subplot(3,6,2*i+1)
  plt.imshow(plt.imread(paths[i]+".png"), cmap='gray')
  plt.title("Natural-IMAGE-0"+str(i+1),y=-0.2)
  plt.axis('off')
  plt.subplot(3,6,2*i+2)
  plt.imshow(plt.imread("u"+paths[i]+".png"), cmap='gray')
  plt.title("Unnatural-IMAGE-0"+str(i+1),y=-0.2)
  plt.axis('off')
#plt.savefig('01.eps') // used for overleaf to insert image

plt.show()

image

conchincradle commented 1 year ago
import cv2
import numpy as np
import matplotlib.pyplot as plt

# 读取图像并转换为灰度图像
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(20, 15))
fig, axs = plt.subplots(3, 3)
fig.set_size_inches(12, 12) 
for i,path in zip(range(9),paths):
  image = cv2.imread(path+'.png', cv2.IMREAD_GRAYSCALE)
  plt.subplot(3,3,i+1)
  # plt.axis("off")

  plt.title("IMAGE-0"+str(i+1))
  plt.hist(image.flatten(), bins=50)
fig.supxlabel("pixel intensity")
fig.supylabel("pixel number")

08

conchincradle commented 1 year ago
import cv2
# from google.colab.patches import cv2_imshow
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]

for path in paths:
  # 读取图像
  image = cv2.imread("u"+path+'.png')

  # 转换为灰度图像
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

  # 自适应阈值化
  block_size = 11
  C = 2
  thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, block_size, C)
  cv2.imwrite("u"+path+"_contour.png",thresh)

  # 显示阈值化图像
  # cv2_imshow(thresh)
conchincradle commented 1 year ago
import matplotlib.pyplot as plt
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(15, 9))
for i in range(9):
  plt.subplot(3,6,2*i+1)
  plt.imshow(plt.imread("u"+paths[i]+".png"), cmap='gray')
  plt.title("Unnatural-0"+str(i+1),y=-0.2)
  plt.axis('off')
  plt.subplot(3,6,2*i+2)
  plt.imshow(plt.imread("u"+paths[i]+"_contour.png"), cmap='gray')
  plt.title("Contoured unnatural-0"+str(i+1),y=-0.2)
  plt.axis('off')
# plt.savefig('01.eps') // used for overleaf to insert image

plt.show()

09

conchincradle commented 1 year ago
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df1 = pd.read_csv("swirl_repeated.csv")
df2 = pd.read_csv("blur_repeated.csv")
df3 = pd.read_csv("noise_repeated.csv")
df = pd.DataFrame(df1)

# 排除第一列
columns_to_divide = df.columns[1:]

# 计算除第一列外的所有列加在一起的平均值
total_mean = df[columns_to_divide].values.mean()
print(total_mean)

The mean JND of each distortion swirl: 0.4188819432176951 blur: 0.4389911994872622 noise: 0.46012526069642695

conchincradle commented 1 year ago

除开第一列的所有除以平均值 然后再合并

df = pd.DataFrame(df1)

# 排除第一列
columns_to_divide = df.columns[1:]

# 计算除第一列外的所有列加在一起的平均值
total_mean = df[columns_to_divide].values.mean()

# 将除第一列外的所有数据除以总平均值
df_divided = df[columns_to_divide] / total_mean

# 合并第一列和除第一列外的数据
df_result1 = pd.concat([df.iloc[:, 0], df_divided], axis=1)
conchincradle commented 1 year ago
df = pd.DataFrame(df1)

# 排除第一列
columns_to_divide = df.columns[1:]

# 计算除第一列外的所有列加在一起的平均值
total_mean = df[columns_to_divide].values.mean()

# 将除第一列外的所有数据除以总平均值
df_divided = df[columns_to_divide] / total_mean

# 合并第一列和除第一列外的数据
df_result1 = pd.concat([df.iloc[:, 0], df_divided], axis=1)

df = pd.DataFrame(df2)

# 排除第一列
columns_to_divide = df.columns[1:]

# 计算除第一列外的所有列加在一起的平均值
total_mean = df[columns_to_divide].values.mean()

# 将除第一列外的所有数据除以总平均值
df_divided = df[columns_to_divide] / total_mean

# 合并第一列和除第一列外的数据
df_result2 = pd.concat([df.iloc[:, 0], df_divided], axis=1)

df = pd.DataFrame(df3)

# 排除第一列
columns_to_divide = df.columns[1:]

# 计算除第一列外的所有列加在一起的平均值
total_mean = df[columns_to_divide].values.mean()

# 将除第一列外的所有数据除以总平均值
df_divided = df[columns_to_divide] / total_mean

# 合并第一列和除第一列外的数据
df_result3 = pd.concat([df.iloc[:, 0], df_divided], axis=1)

df1 = df_result1
df2 = df_result2
df3 = df_result3
conchincradle commented 1 year ago
data = []
columns = ["Naturalness","Distortion Type","Image Stimuli","Distortion Intensity"] # natural is 0, unnatural is 1
for index,each in zip(range(3), [df1,df2,df3]):
  df = pd.DataFrame(each)
  for i in range(9):
    path = "natural*0"+str(i+1)
    row = [0,index,i,df[path].mean()]
    data.append(row)
  for i in range(9):
    path = "unnatural*0"+str(i+1)
    row = [1,index,i,df[path].mean()]
    data.append(row)
df = pd.DataFrame(data, columns=columns)
df.to_csv('final_total_normalize.csv', index=False)

把所有的列出来

conchincradle commented 1 year ago

MSE calculation

def mse(A,B):
  return np.sqrt(np.sum(np.power((np.float32(A) - np.float32(B))/255, 2))/(256*256))
conchincradle commented 1 year ago

MSE ----------------use natutal all natural:-----swir--blur --- noise --total 0.09582750283984548 0.01667309639251033 0.021269827988445327 0.04459014240693371

unnatural-- 0.10179927925353327 0.017531941042584983 0.035051566858189796 0.05146092905143602

total

0.04802553572918486

conchincradle commented 1 year ago

1 - ssim 0.34702679846021867 0.022263401084476046 0.09024006393220689 0.15317675449230053

0.4080379009246826 0.030067377620273165 0.13298674159579807 0.19036400671358464

0.17177038060294259

conchincradle commented 1 year ago

NLPD 0.6079553332593706 0.054263080573744245 0.12608186991678344 0.26276676124996606

0.6718173987335629 0.06533221941855219 0.18922264269656608 0.30879075361622704 0.28577875743309655

conchincradle commented 1 year ago

1 - MS SSIM 0.2899041838116116 0.0025759670469495985 0.010619748963250054 0.10103329994060377

0.33551422754923504 0.003524906105465359 0.016960499684015907 0.11866654444623877 0.10984992219342127

conchincradle commented 1 year ago

LPIPS-VGG

0.1553400812877549 0.05963869848185115 0.1887365930610233 0.13457179094354313

0.19328026307953727 0.09829100428356065 0.28026174207528437 0.19061100314612744 0.16259139704483527

conchincradle commented 1 year ago

DISTS

0.05956704749001397 0.07730601893530951 0.15820786820517646 0.09836031154349999

0.06402201784981622 0.10157267252604167 0.19075672295358445 0.11878380444314744 0.10857205799332371

conchincradle commented 1 year ago

sum the natural and unnatural to s csv of normalized data


columns = ["Naturalness","Distortion Type","Image Stimuli","Distortion Intensity"] # natural is 0, unnatural is 1
for model in ["MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]:
  df1 = pd.read_csv("final_swirl_"+model+"_predicted.csv")
  df2 = pd.read_csv("final_blur_"+model+"_predicted.csv")
  df3 = pd.read_csv("final_noise_"+model+"_predicted.csv")
  data = []
  for index,each in zip(range(3), [df1,df2,df3]):
    df = pd.DataFrame(each)
    natural = df["natural"].values
    unnatural = df["unnatural"].values
    for i in range(9):
      row = [0,index,i,natural[i]]
      data.append(row)
    for i in range(9):
      row = [1,index,i,unnatural[i]]
      data.append(row)
  df = pd.DataFrame(data, columns=columns)
  df.to_csv(model+'_final_total_normalize.csv', index=False)

  data = []
  for index,each in zip(range(3), [df1,df2,df3]):
    df = pd.DataFrame(each)
    natural = df["natural"].values
    for i in range(9):
      row = [0,index,i,natural[i]]
      data.append(row)
  df = pd.DataFrame(data, columns=columns)
  df.to_csv(model+'_final_natural_normalize.csv', index=False)

  data = []
  for index,each in zip(range(3), [df1,df2,df3]):
    df = pd.DataFrame(each)
    unnatural = df["unnatural"].values
    for i in range(9):
      row = [1,index,i,unnatural[i]]
      data.append(row)
  df = pd.DataFrame(data, columns=columns)
  df.to_csv(model+'_final_unnatural_normalize.csv', index=False)
conchincradle commented 1 year ago

merge all the model and human normalized data

import pandas as pd
data = pd.read_csv("HUMAN_final_natural_normalize.csv")
df = pd.DataFrame(data)
df = df.rename(columns={'Distortion Intensity': 'HUMAN'})
models = ["MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  data1 = pd.read_csv(model+"_final_natural_normalize.csv")
  df1 = pd.DataFrame(data1)
  df1 = df1.rename(columns={'Distortion Intensity': model})
  df = df.join(df1[model])

df.to_csv('merged_final_natural_normalize.csv', index=False)

data = pd.read_csv("HUMAN_final_unnatural_normalize.csv")
df = pd.DataFrame(data)
df = df.rename(columns={'Distortion Intensity': 'HUMAN'})
models = ["MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  data1 = pd.read_csv(model+"_final_unnatural_normalize.csv")
  df1 = pd.DataFrame(data1)
  df1 = df1.rename(columns={'Distortion Intensity': model})
  df = df.join(df1[model])

df.to_csv('merged_final_unnatural_normalize.csv', index=False)

n =  pd.read_csv('merged_final_natural_normalize.csv')
u =  pd.read_csv('merged_final_unnatural_normalize.csv')
df1 =  pd.DataFrame(n)
df2 =  pd.DataFrame(u)
merge = pd.concat([df1, df2], axis=0)
merge.to_csv('merged_final_total_normalize.csv', index=False)
conchincradle commented 1 year ago

import seaborn as sns
import pandas as pd
ftotal = 'merged_final_total_normalize.csv'
fna = 'merged_final_natural_normalize.csv'
fun = 'merged_final_unnatural_normalize.csv'

dtotal = pd.read_csv(ftotal)
df_total = pd.DataFrame(dtotal)

dna = pd.read_csv(fna)
df_na = pd.DataFrame(dna)

dun = pd.read_csv(fun)
df_un = pd.DataFrame(dun)
data = df_total
models = ["MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  hue = 'Naturalness'
  # hue = ""
  sns.jointplot(data=data, y=model, x='HUMAN',hue=hue)
  top = max(data[model])

  plt.plot([0, top], [0, top], color='gray', linestyle='--')
  plt.text(top, top, 'y=x', color='black', ha='right', va='bottom')
  plt.xlim(0,top+0.3)
  plt.ylim(0,top+0.3)
  plt.ylabel(model+" predicted value")
  plt.xlabel("distortion intensity at human JND")
  plt.savefig("00_"+model+"_total.png")

plt.figure(figsize=(8, 12))
for i,model in zip(range(6),models):
  plt.subplot(3,2,i+1)
  plt.imshow(plt.imread("00_"+model+"_total.png"), cmap='gray')
  plt.axis('off')

plt.show()
conchincradle commented 1 year ago

image

conchincradle commented 1 year ago
df = data
human = "HUMAN"
for model in models:
  plcc = df[human].corr(df[model], method='pearson')
  srcc = df[human].corr(df[model], method='spearman')
  krcc = df[human].corr(df[model], method='kendall')
  error = df[model] - df[human]
  squared_error = error.multiply(error)
  ss = squared_error.sum()
  print("-------"+model+"-----------")
  print(round(plcc,4))
  print(round(srcc,4))
  print(round(krcc,4))
  print(round(ss,4))
conchincradle commented 1 year ago

swirl the image with the same intensity

!pip install wand
!apt-get install libmagickwand-dev
conchincradle commented 1 year ago
from wand.image import Image
mid = 0.4188819432176951
sigma = 1.3 * (10 ** (3 * mid))
paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
upaths = list(map(lambda x: "u" + x, paths))
for path in paths+upaths:
  filename512 = "512_"+path+".png"
  filename2 = path+"_swirl.png"
  with Image(filename=filename512) as img:
    img.swirl(degree=sigma)
    img.resize(256, 256)
    img.save(filename=filename2)
conchincradle commented 1 year ago

画典型的图

import matplotlib.pyplot as plt
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(15, 9))
names = ["Natural-IMAGE-06","Distorted Natural-IMAGE-06",\
         "Unnatural-IMAGE-06","Distorted Unnatural-IMAGE-06",\
         "Natural-IMAGE-09","Distorted Natural-IMAGE-09",\
         "Unnatural-IMAGE-09","Distorted Unnatural-IMAGE-09"]

index = 1
for path in ["manmade","winter"]:
  for pre in ["","u"]:
    for suf in [".png","_swirl.png"]:
      plt.subplot(2,4,index)
      index += 1
      plt.imshow(plt.imread(pre+path+suf), cmap='gray')
      plt.title(names[index-2])
      plt.axis('off')

plt.show()
conchincradle commented 1 year ago

画九张图

import matplotlib.pyplot as plt
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(15, 27))
prefixes = []
names = ["Natural-IMAGE-","Distorted Natural-IMAGE-","Unnatural-IMAGE-","Distorted Unnatural-IMAGE-"]
names1 = [each+"01" for each in names ]+[each+"02" for each in names ]+[each+"03" for each in names ]
names2 = [each+"04" for each in names ]+[each+"05" for each in names ]+[each+"06" for each in names ]
names3 = [each+"07" for each in names ]+[each+"08" for each in names ]+[each+"09" for each in names ]  
names = names1+names2+names3   
index = 1
for path in paths:
  for pre in ["","u"]:
    for suf in [".png","_swirl.png"]:
      plt.subplot(9,4,index)
      index += 1
      plt.imshow(plt.imread(pre+path+suf), cmap='gray')
      plt.title(names[index-2])
      plt.axis('off')

plt.show()
conchincradle commented 1 year ago

image

conchincradle commented 1 year ago

BLUR

import numpy as np
import cv2
mid = 0.7
sigma = 0.2 * (10 ** (mid))

paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
upaths = list(map(lambda x: "u" + x, paths))
for path in paths+upaths:
  filename1 = path+".png"
  filename2 = path+"_blur.png"
  img1 = cv2.imread(filename1,0)
  blur = cv2.GaussianBlur(img1, (0, 0), sigmaX=sigma, sigmaY=0)
  cv2.imwrite(filename2,blur)
conchincradle commented 1 year ago

coefficient of variance


def func(df,human,model):
  mean_value = df[human].mean()
  sst = ((df[human]-mean_value)**2).sum()
  ssr = ((df[model]-df[human])**2).sum()
  r2 = 1-(ssr/sst)
  return sst,ssr,r2

from sklearn.metrics import mean_squared_error, r2_score
df = data
human = "HUMAN"
for model in models:
  sst1,ssr1,r21 = func(df_total,human,model)
  sst2,ssr2,r22 = func(df_na,human,model)
  sst3,ssr3,r23 = func(df_un,human,model)

  # r3 = r2_score(df[human],df[model])
  # print(str(r2)+"==="+str(r3))

  if model=="MSE":
    model="RMSE"
  if model=="SSIM":
    model="1-SSIM"
  if model=="MS_SSIM":
    model="1-MS SSIM"
  print(model+"&"+str(round(ssr1,4))+"&"+str(round(sst1,4))+"&"+str(round(r21,4))+"&"+ \
        str(round(ssr2,4))+"&"+str(round(sst2,4))+"&"+str(round(r22,4))+"&"+ \
        str(round(ssr3,4))+"&"+str(round(sst3,4))+"&"+str(round(r23,4))+"\\\\")
conchincradle commented 1 year ago

ZIP打包

import os, tarfile

import os
from google.colab import files

def make_targz_one_by_one(output_filename, source_dir):
  tar = tarfile.open(output_filename,"w")
  for root,dir_name,files_list in os.walk(source_dir):
    for file in files_list:
      pathfile = os.path.join(root, file)
      tar.add(pathfile)
  tar.close()

  files.download(output_filename)

make_targz_one_by_one('noise.tar', './distortion//')
conchincradle commented 1 year ago
import numpy as np
import cv2
from wand.image import Image
for i in range(0,11):
  # mid1 = 0.45
  # mid2 = 0.58
  # mid3 = 0.45
  mid1 = 0.08*i+0.2
  mid2 = 0.1*i+0.5
  mid3 = 0.06*i+0.2

  np.random.seed(1)
  sigma_noise = 0.001 * (10 ** (3 * mid1)) # noise
  sigma_blur = 0.2 * (10 ** (mid2)) # blur
  sigma_swirl = 1.3 * (10 ** (3 * mid3))
  paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
  upaths = list(map(lambda x: "u" + x, paths))
  for path in paths+upaths:
    filename1 = path+".png"
    filename_blur = path+"_blur"+str(i)+".png"
    filename_noise = path+"_noise"+str(i)+".png"
    img1 = cv2.imread(filename1,0)
    blur = cv2.GaussianBlur(img1, (0, 0), sigmaX=sigma_blur, sigmaY=0)
    imgTmp = img1 / 255
    np.random.seed(1)
    noise = np.random.normal(0, sigma_noise, img1.shape)
    gauss = np.clip(imgTmp + noise, 0, 1)
    gauss = np.uint8(gauss * 255)
    filename512 = "512_"+path+".png"
    filename2 = path+"_swirl"+str(i)+".png"
    with Image(filename=filename512) as img:
      img.swirl(degree=sigma_swirl)
      img.resize(256, 256)
      img.save(filename=filename2)

    cv2.imwrite(filename_blur,blur)
    cv2.imwrite(filename_noise,gauss)
conchincradle commented 1 year ago
!pip install wand
!apt-get install libmagickwand-dev
!pip install  IQA_pytorch
conchincradle commented 1 year ago
import matplotlib.pyplot as plt
paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
plt.figure(figsize=(24, 12))
prefixes = []
path = "animal"

for i in range(6):
  plt.subplot(3,6,i+1)
  name = path+"_swirl"+str(2*i)+".png"

  plt.imshow(plt.imread(name), cmap='gray')
  plt.axis("off")
  if(i==0):
    plt.title("original image",fontsize=20)
  if(i==1):
    plt.title("swirl: low",fontsize=20)
  if(i==2):
    plt.title("->",fontsize=20)
  if(i==3):
    plt.title("middle",fontsize=20)
  if(i==4):
    plt.title("->",fontsize=20)
  if(i==5):
    plt.title("high",fontsize=20)
for i in range(6):
  plt.subplot(3,6,i+7)
  name = path+"_blur"+str(2*i)+".png"

  plt.imshow(plt.imread(name), cmap='gray')
  plt.axis("off")
  if(i==0):
    plt.title("original image",fontsize=20)
  if(i==1):
    plt.title("gaussian blur: low",fontsize=20)
  if(i==2):
    plt.title("->",fontsize=20)
  if(i==3):
    plt.title("middle",fontsize=20)
  if(i==4):
    plt.title("->",fontsize=20)
  if(i==5):
    plt.title("high",fontsize=20)
for i in range(6):
  plt.subplot(3,6,i+13)
  name = path+"_noise"+str(2*i)+".png"

  plt.imshow(plt.imread(name), cmap='gray')
  plt.axis("off")
  if(i==0):
    plt.title("original image",fontsize=20)
  if(i==1):
    plt.title("gaussian noise: low",fontsize=20)
  if(i==2):
    plt.title("->",fontsize=20)
  if(i==3):
    plt.title("middle",fontsize=20)
  if(i==4):
    plt.title("->",fontsize=20)
  if(i==5):
    plt.title("high",fontsize=20)

plt.show()
conchincradle commented 1 year ago

image

conchincradle commented 1 year ago

random factor

import numpy as np
filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
print(df["swirl*natural*01"][10])
ans = []

for distortion in ["swirl","blur","noise"]:
  for naturalness in ["natural","unnatural"]:
    for i in range(1,10):

      num = "0"+str(i)

      col = distortion + "*"+naturalness +"*"+ num
      for k in range(14):
        value = df[col][k]
        if k<9:
          subject = "Subject-0"+str(k+1)
        else:
          subject = "Subject-"+str(k+1)
        ans.append([subject,distortion,naturalness,num,value])

df = pd.DataFrame(ans, columns=["Subject","Distortion Type","Naturalness","Image Stimuli","Distortion Intensity"])

csv_name = "total_analysis_random_factor.csv"
df.to_csv(csv_name, index=False)
conchincradle commented 1 year ago

generate JND image single by single

from wand.image import Image
# swirl_mean = 0.4188819432176951

paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
upaths = list(map(lambda x: "u" + x, paths))
for index,path in zip(range(9),paths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["natural"+num][i]
      sigma = 1.3 * (10 ** (3 * mid))

      filename512 = "512_"+path+".png"
      filename2 = "random_jnd_swirl/"+str(i)+path+".png"
      with Image(filename=filename512) as img:
        img.swirl(degree=sigma)
        img.resize(256, 256)
        img.save(filename=filename2)
for index,path in zip(range(9),upaths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["unnatural"+num][i]
      sigma = 1.3 * (10 ** (3 * mid))

      filename512 = "512_"+path+".png"
      filename2 = "random_jnd_swirl/"+str(i)+path+".png"
      with Image(filename=filename512) as img:
        img.swirl(degree=sigma)
        img.resize(256, 256)
        img.save(filename=filename2)
conchincradle commented 1 year ago
filename = "blur_repeated.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
print(df["natural*01"])
conchincradle commented 1 year ago
import numpy as np
import cv2
filename = "blur_repeated.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
print(df["natural*01"])

paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
upaths = list(map(lambda x: "u" + x, paths))
for index,path in zip(range(9),paths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["natural"+num][i]
      sigma = 0.2 * (10 ** (mid))

      filename1 = path+".png"
      filename2 = "random_jnd_blur/"+str(i)+path+".png"
      img1 = cv2.imread(filename1,0)
      blur = cv2.GaussianBlur(img1, (0, 0), sigmaX=sigma, sigmaY=0)
      cv2.imwrite(filename2,blur)

for index,path in zip(range(9),upaths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["unnatural"+num][i]
      sigma = 0.2 * (10 ** (mid))

      filename1 = path+".png"
      filename2 = "random_jnd_blur/"+str(i)+path+".png"
      img1 = cv2.imread(filename1,0)
      blur = cv2.GaussianBlur(img1, (0, 0), sigmaX=sigma, sigmaY=0)
      cv2.imwrite(filename2,blur)
conchincradle commented 1 year ago
import numpy as np
import cv2
import pandas as pd
import random 
filename = "noise_repeated.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
print(df["natural*01"])

paths = ["animal", "flower", "foliage", "fruit", "landscape", "manmade", "shadow", "texture", "winter"]
upaths = list(map(lambda x: "u" + x, paths))
for index,path in zip(range(9),paths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["natural"+num][i]
      sigma = 0.001 * (10 ** (3 * mid)) # nois
      filename1 = path+".png"

      img1 = cv2.imread(filename1,0)
      imgTmp = img1 / 255
      for seed in range(1,11):
        np.random.seed(seed)
        noise = np.random.normal(0, sigma, img1.shape)
        gauss = np.clip(imgTmp + noise, 0, 1)
        gauss = np.uint8(gauss * 255)
        filename2 = "random_jnd_noise/"+str(i)+path+"_"+str(seed)+".png"
        cv2.imwrite(filename2,gauss)

for index,path in zip(range(9),upaths):
    num = "*0"+str(index+1)
    for i in range(14):
      mid = df["unnatural"+num][i]
      sigma = 0.001 * (10 ** (3 * mid)) # nois
      filename1 = path+".png"

      img1 = cv2.imread(filename1,0)
      imgTmp = img1 / 255
      for seed in range(1,11):
        np.random.seed(seed)
        noise = np.random.normal(0, sigma, img1.shape)
        gauss = np.clip(imgTmp + noise, 0, 1)
        gauss = np.uint8(gauss * 255)
        filename2 = "random_jnd_noise/"+str(i)+path+"_"+str(seed)+".png"
        cv2.imwrite(filename2,gauss)
conchincradle commented 1 year ago

model predict together

model_names = ["MSE", "SSIM", "NLPD", "MS_SSIM", "LPIPS", "DISTS"]
dis_types = ["swirl","blur", "noise"]
na_types = ["natural", "unntural"]
img_types = ["0"+str(i) for i in range(9)]
columns = ["Subject"]
for i in dis_types:
  for j in na_types:
    for k in img_types:
      columns.append(i+"*"+j+"*"+k)
print(columns)

model_data  = [[["Subject-0"+str(i+1)] for i in range(14)] for k in range(6)]

for dis in dis_types:
  for i in range(14):
    filename = dis+"/"+dis+"_"+"random_"+str(i)+".csv"
    data = pd.read_csv(filename)
    df =  pd.DataFrame(data)
    for k in range(6):
        nums = df.iloc[k][1:].tolist()
        # print(len(nums))
        model_data[k][i].extend(nums)
for index,model in zip(range(6),model_names):
  data = model_data[index]
  df = pd.DataFrame(data, columns=columns)

  csv_name = model+"_predict.csv"
  df.to_csv(csv_name, index=False)
conchincradle commented 1 year ago

use the row data

filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
# df = df.transpose()
nums = df.iloc[13][:10].tolist()
print(nums)
conchincradle commented 1 year ago

human correlation figure

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
# df = df.transpose()
for subject in range(14):
  num1 = df.iloc[subject][1:].tolist()
  num2 = [0 for i in range(54)]
  for human in range(14):
    if human==subject:
      continue
    values = df.iloc[human][1:].tolist()
    for k in range(54):
      num2[k] += values[k]
  num2 = [each/13 for each in num2]

  # 创建示例数据
  x = num1 # 替换为您的x列数据
  y = num2  # 替换为您的y列数据
  c = []
  types = ["swirl-0","swirl-1","blur-0","blur-1","noise-0","noise-1"]
  for k in range(6):
    for i in range(9):
      c.append(types[k])

  categories = c  # 替换为包含类别信息的列

  # categories = types  # 替换为您的类别数据

  # 设置形状和颜色映射
  marker_map = dict(zip(types,["X","X","o","o","s","s"]))
  color_map = dict(zip(types,["darkorange","forestgreen"]*3))
  print(color_map)

  # 绘制散点图
  sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                  markers=marker_map, palette=color_map)
  top = max(x+y)+0.01
  bottom = min(x+y)-0.01

  plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

  # 显示图例
  plt.legend()
  title = "0"+str(subject+1) if subject<9 else str(subject+1)
  plt.title('Total: Subject-'+title)
  plt.xlabel("human JND")
  plt.ylabel("the mean of other subjects' human JND")

  plt.savefig("human_fig/"+str(subject)+"_human_corr.png")
  plt.show()
  # print(nums)
conchincradle commented 1 year ago

for model

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"
filename2 = "MSE_predict.csv"
data2 = pd.read_csv(filename2)
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
df2 =  pd.DataFrame(data2)
# df = df.transpose()
for subject in range(14):
  num1 = df.iloc[subject][1:].tolist()
  num2 = df2.iloc[subject][1:].tolist()

  # num2 = [0 for i in range(54)]
  # for human in range(14):
  #   if human==subject:
  #     continue
  #   values = df.iloc[human][1:].tolist()
  #   for k in range(54):
  #     num2[k] += values[k]
  # num2 = [each/13 for each in num2]

  # 创建示例数据
  x = num1 # 替换为您的x列数据
  y = num2  # 替换为您的y列数据
  c = []
  types = ["swirl-0","swirl-1","blur-0","blur-1","noise-0","noise-1"]
  for k in range(6):
    for i in range(9):
      c.append(types[k])

  categories = c  # 替换为包含类别信息的列

  # categories = types  # 替换为您的类别数据

  # 设置形状和颜色映射
  marker_map = dict(zip(types,["X","X","o","o","s","s"]))
  color_map = dict(zip(types,["darkorange","forestgreen"]*3))
  print(color_map)

  # 绘制散点图
  sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                  markers=marker_map, palette=color_map)
  top = max(x+y)+0.01
  bottom = min(x+y)-0.01

  plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

  # 显示图例
  plt.legend()
  title = "0"+str(subject+1) if subject<9 else str(subject+1)
  plt.title('Total: Subject-'+title)
  plt.xlabel("human JND")
  plt.ylabel("MSE predicted JND")

  plt.savefig("mse_fig/"+str(subject)+"_mse_corr.png")
  plt.show()
  # print(nums)
conchincradle commented 1 year ago

PLCC

from scipy.stats import pearsonr, spearmanr, kendalltau

models = ["HUMAN","MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  filename = "total_analysis_normalize.csv"
  if model != "HUMAN":
    filename2 = model+"_predict.csv"
    data2 = pd.read_csv(filename2)
    df2 =  pd.DataFrame(data2)

  data = pd.read_csv(filename)
  df =  pd.DataFrame(data)

  plccs = []
  srccs = []
  krccs = []
  n_plccs = []
  n_srccs = []
  n_krccs = []
  u_plccs = []
  u_srccs = []
  u_krccs = []
  for subject in range(14):
    num1 = df.iloc[subject][1:].tolist()

    if model=="HUMAN":
      num2 = [0 for i in range(54)]
      for human in range(14):
        if human==subject:
          continue
        values = df.iloc[human][1:].tolist()
        for k in range(54):
          num2[k] += values[k]

      num2 = [each/13 for each in num2]
    else:
      num2 = df2.iloc[subject][1:].tolist()

    plcc,_ = pearsonr(num1,num2)
    srcc,_ = spearmanr(num1,num2)
    krcc,_ = kendalltau(num1,num2)
    plccs.append(plcc)
    srccs.append(srcc)
    krccs.append(krcc)
    n_num1 = num1[:9]+num1[18:27]+num1[36:45]
    u_num1 = num1[9:18]+num1[27:36]+num1[45:54]
    n_num2 = num2[:9]+num2[18:27]+num2[36:45]
    u_num2 = num2[9:18]+num2[27:36]+num2[45:54]
    n_plcc,_ = pearsonr(n_num1,n_num2)
    n_srcc,_ = spearmanr(n_num1,n_num2)
    n_krcc,_ = kendalltau(n_num1,n_num2)
    n_plccs.append(n_plcc)
    n_srccs.append(n_srcc)
    n_krccs.append(n_krcc)
    u_plcc,_ = pearsonr(u_num1,u_num2)
    u_srcc,_ = spearmanr(u_num1,u_num2)
    u_krcc,_ = kendalltau(u_num1,u_num2)
    u_plccs.append(u_plcc)
    u_srccs.append(u_srcc)
    u_krccs.append(u_krcc)
  plcc = np.mean(plccs)
  srcc = np.mean(srccs)
  krcc = np.mean(krccs)
  n_plcc = np.mean(n_plccs)
  n_srcc = np.mean(n_srccs)
  n_krcc = np.mean(n_krccs)
  u_plcc = np.mean(u_plccs)
  u_srcc = np.mean(u_srccs)
  u_krcc = np.mean(u_krccs)
  if model=="SSIM" or model=="MS_SSIM":
    model = "1-"+model
  if model=="1-MS_SSIM":
    model = "1-MS\\_SSIM"

  print( model+"&"+str(round(plcc,4))+"&"+str(round(srcc,4))+"&"+str(round(krcc,4))+ \

  "&"+str(round(n_plcc,4))+"&"+str(round(n_srcc,4))+"&"+str(round(n_krcc,4))+ \
  "&"+str(round(u_plcc,4))+"&"+str(round(u_srcc,4))+"&"+str(round(u_krcc,4))+"\\\\" \

  )
conchincradle commented 1 year ago

plcc for swirl

from scipy.stats import pearsonr, spearmanr, kendalltau

models = ["HUMAN","MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  filename = "total_analysis_normalize.csv"
  if model != "HUMAN":
    filename2 = model+"_predict.csv"
    data2 = pd.read_csv(filename2)
    df2 =  pd.DataFrame(data2)

  data = pd.read_csv(filename)
  df =  pd.DataFrame(data)

  plccs = []
  srccs = []
  krccs = []
  n_plccs = []
  n_srccs = []
  n_krccs = []
  u_plccs = []
  u_srccs = []
  u_krccs = []
  for subject in range(14):
    num1 = df.iloc[subject][1:].tolist()

    if model=="HUMAN":
      num2 = [0 for i in range(54)]
      for human in range(14):
        if human==subject:
          continue
        values = df.iloc[human][1:].tolist()
        for k in range(54):
          num2[k] += values[k]

      num2 = [each/13 for each in num2]
    else:
      num2 = df2.iloc[subject][1:].tolist()

    num1 = num1[:18]
    num2 = num2[:18]

    plcc,_ = pearsonr(num1,num2)
    srcc,_ = spearmanr(num1,num2)
    krcc,_ = kendalltau(num1,num2)
    plccs.append(plcc)
    srccs.append(srcc)
    krccs.append(krcc)
    n_num1 = num1[:9]
    u_num1 = num1[9:18]
    n_num2 = num2[:9]
    u_num2 = num2[9:18]
    n_plcc,_ = pearsonr(n_num1,n_num2)
    n_srcc,_ = spearmanr(n_num1,n_num2)
    n_krcc,_ = kendalltau(n_num1,n_num2)
    n_plccs.append(n_plcc)
    n_srccs.append(n_srcc)
    n_krccs.append(n_krcc)
    u_plcc,_ = pearsonr(u_num1,u_num2)
    u_srcc,_ = spearmanr(u_num1,u_num2)
    u_krcc,_ = kendalltau(u_num1,u_num2)
    u_plccs.append(u_plcc)
    u_srccs.append(u_srcc)
    u_krccs.append(u_krcc)
  plcc = np.mean(plccs)
  srcc = np.mean(srccs)
  krcc = np.mean(krccs)
  n_plcc = np.mean(n_plccs)
  n_srcc = np.mean(n_srccs)
  n_krcc = np.mean(n_krccs)
  u_plcc = np.mean(u_plccs)
  u_srcc = np.mean(u_srccs)
  u_krcc = np.mean(u_krccs)
  if model=="SSIM" or model=="MS_SSIM":
    model = "1-"+model
  if model=="1-MS_SSIM":
    model = "1-MS\\_SSIM"

  print( model+"&"+str(round(plcc,4))+"&"+str(round(srcc,4))+"&"+str(round(krcc,4))+ \

  "&"+str(round(n_plcc,4))+"&"+str(round(n_srcc,4))+"&"+str(round(n_krcc,4))+ \
  "&"+str(round(u_plcc,4))+"&"+str(round(u_srcc,4))+"&"+str(round(u_krcc,4))+"\\\\" \

  )
conchincradle commented 1 year ago
import pandas as pd
import os

# 创建一个空的 DataFrame
data_indivi = []

data_merge = []
# columns = ["Subject","Distortion Type","Naturalness","Image Stimuli","Distortion Intensity"]
# 设置 CSV 文件所在的文件夹路径
for dis_type in ['swirl','blur','noise']:
# folder_path = './noise/'
# folder_path = './swirl/'

# 遍历文件夹中的所有 CSV 文件
# for filename in os.listdir(folder_path):
#     if filename.endswith('.csv'):
#         # 读取 CSV 文件,并将其添加到 DataFrame 中
#         temp_df = pd.read_csv(os.path.join(folder_path, filename))
#         # 将文件名分割成两个部分
#         file_parts = filename.split('.')
#         # 将分类变量作为新列添加到 DataFrame 中
#         # temp_df['imgType'] = file_parts[0]
#         # 将新的 DataFrame 添加到主 DataFrame 中
#         df = pd.concat([df, temp_df], ignore_index=True)

  f_pre = "./"+dis_type+"/"
  for i in range(1,15):
    f1 = f_pre+str(i)+"_"+"1.csv"
    f2 = f_pre+str(i)+"_"+"2.csv"
    df1 = pd.read_csv(f1)
    df2 = pd.read_csv(f2)
    df1 = pd.DataFrame(df1)
    df2 = pd.DataFrame(df2)
    # 按照 category 列进行分组
    df1 = df1.dropna(subset=['approxThreshold'])
    df2 = df2.dropna(subset=['approxThreshold'])

    # grouped = df.groupby('imgType')['approxThreshold'].agg('mean').to_list()
    group1 = df1.groupby('imgType').agg({'approxThreshold': 'mean'}).to_dict()['approxThreshold']
    group2 = df2.groupby('imgType').agg({'approxThreshold': 'mean'}).to_dict()['approxThreshold']

    # grouped_std = df.groupby('imgType').agg({'approxThreshold': 'std'}).to_dict()['approxThreshold']
    # print(grouped_std)
    # print(grouped_mean)
    paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
    n_jnd1 = []
    u_jnd1 = []
    n_jnd2 = []
    u_jnd2 = []
    n_jnd3 = []
    u_jnd3 = []

    for path in paths:
      n_value1 = group1[path]
      u_value1 = group1["u"+path]

      n_value2 = group2[path]
      u_value2 = group2["u"+path]

      n_value3 = (n_value1+n_value2)/2
      u_value3 = (u_value1+u_value2)/2

      n_jnd1.append(n_value1)
      u_jnd1.append(u_value1)

      n_jnd2.append(n_value2)
      u_jnd2.append(u_value2)

      n_jnd3.append(n_value3)
      u_jnd3.append(u_value3)

    for k in range(9):
      image = "'0"+str(k+1)
      subject = "Subject-0"+str(i)
      # row1 = [subject,dis_type,"natural",image,n_jnd1[k]]
      row1 = [subject,"natural",image,n_jnd1[k]]
      row2 = [subject,"unnatural",image,u_jnd1[k]]

      row3 = [subject,"natural",image,n_jnd2[k]]
      row4 = [subject,"unnatural",image,u_jnd2[k]]

      row5 = [subject,"natural",image,n_jnd3[k]]
      row6 = [subject,"unnatural",image,u_jnd3[k]]

      data_indivi.extend([row1,row2,row3,row4])
      data_merge.extend([row5,row6])

  # df = pd.DataFrame(data_indivi, columns=["Subject","Distortion Type","Naturalness","Image Stimuli","Distortion Intensity"])
  df1 = pd.DataFrame(data_indivi, columns=["Subject","Naturalness","Image Stimuli","Distortion Intensity"])
  df1 = pd.DataFrame(data_indivi, columns=["Subject","Naturalness","Image Stimuli","Distortion Intensity"])   
  csv_name = dis_type+"_random_factor_twice.csv"
  df.to_csv(csv_name, index=False)
conchincradle commented 1 year ago

merge all the distortion type JND together

import pandas as pd
import os

# 创建一个空的 DataFrame
data_indivi = []

data_merge = []
columns = ["Subject","Distortion Type","Naturalness","Image Stimuli","Distortion Intensity"]
# 设置 CSV 文件所在的文件夹路径
jnd_mean = {"swirl":0.4188819432176951,"blur":0.4389911994872622, "noise":0.46012526069642695 }

for dis_type in ['swirl','blur','noise']:
# folder_path = './noise/'
# folder_path = './swirl/'

# 遍历文件夹中的所有 CSV 文件
# for filename in os.listdir(folder_path):
#     if filename.endswith('.csv'):
#         # 读取 CSV 文件,并将其添加到 DataFrame 中
#         temp_df = pd.read_csv(os.path.join(folder_path, filename))
#         # 将文件名分割成两个部分
#         file_parts = filename.split('.')
#         # 将分类变量作为新列添加到 DataFrame 中
#         # temp_df['imgType'] = file_parts[0]
#         # 将新的 DataFrame 添加到主 DataFrame 中
#         df = pd.concat([df, temp_df], ignore_index=True)

  f_pre = "./"+dis_type+"/"
  for i in range(1,15):
    f1 = f_pre+str(i)+"_"+"1.csv"
    f2 = f_pre+str(i)+"_"+"2.csv"
    df1 = pd.read_csv(f1)
    df2 = pd.read_csv(f2)
    df1 = pd.DataFrame(df1)
    df2 = pd.DataFrame(df2)
    # 按照 category 列进行分组
    df1 = df1.dropna(subset=['approxThreshold'])
    df2 = df2.dropna(subset=['approxThreshold'])

    # grouped = df.groupby('imgType')['approxThreshold'].agg('mean').to_list()
    group1 = df1.groupby('imgType').agg({'approxThreshold': 'mean'}).to_dict()['approxThreshold']
    group2 = df2.groupby('imgType').agg({'approxThreshold': 'mean'}).to_dict()['approxThreshold']

    # grouped_std = df.groupby('imgType').agg({'approxThreshold': 'std'}).to_dict()['approxThreshold']
    # print(grouped_std)
    # print(grouped_mean)
    paths =  ["animal","flower","foliage","fruit","landscape","manmade","shadow","texture","winter"]
    n_jnd1 = []
    u_jnd1 = []
    n_jnd2 = []
    u_jnd2 = []
    n_jnd3 = []
    u_jnd3 = []

    for path in paths:
      n_value1 = group1[path]
      u_value1 = group1["u"+path]

      n_value2 = group2[path]
      u_value2 = group2["u"+path]

      n_value3 = (n_value1+n_value2)/2
      u_value3 = (u_value1+u_value2)/2

      n_jnd1.append(n_value1)
      u_jnd1.append(u_value1)

      n_jnd2.append(n_value2)
      u_jnd2.append(u_value2)

      n_jnd3.append(n_value3)
      u_jnd3.append(u_value3)

    for k in range(9):
      image = str(k+1)
      subject = "Subject-0"+str(i)
      normalize = jnd_mean[dis_type]
      row1 = [subject,dis_type,"natural",image,n_jnd1[k]/normalize]
      row2 = [subject,dis_type,"unnatural",image,u_jnd1[k]/normalize]

      row3 = [subject,dis_type,"natural",image,n_jnd2[k]/normalize]
      row4 = [subject,dis_type,"unnatural",image,u_jnd2[k]/normalize]
      data_indivi.extend([row1,row2,row3,row4])

df = pd.DataFrame(data_indivi, columns=["Subject","Distortion Type","Naturalness","Image Stimuli","Distortion Intensity"])

csv_name = "total_analysis_random_factor_twice_normalize.csv"
df.to_csv(csv_name, index=False)
conchincradle commented 12 months ago

Naturalness Effect, JND difference prediction

from scipy.stats import pearsonr, spearmanr, kendalltau
import numpy as np
step = "_0.005"
models = ["HUMAN","MSE","SSIM","MS_SSIM","NLPD","LPIPS","DISTS"]
for model in models:
  filename = "total_analysis_normalize.csv"
  if model != "HUMAN":
    filename2 = model+step+"_predict.csv"
    data2 = pd.read_csv(filename2)
    df2 =  pd.DataFrame(data2)

  data = pd.read_csv(filename)
  df =  pd.DataFrame(data)

  plccs = []
  srccs = []
  krccs = []
  n_plccs = []
  n_srccs = []
  n_krccs = []
  u_plccs = []
  u_srccs = []
  u_krccs = []
  for subject in range(14):
    num1 = df.iloc[subject][1:].tolist()

    if model=="HUMAN":
      num2 = [0 for i in range(54)]
      for human in range(14):
        if human==subject:
          continue
        values = df.iloc[human][1:].tolist()
        for k in range(54):
          num2[k] += values[k]

      num2 = [each/13 for each in num2]
    else:
      num2 = df2.iloc[subject][1:].tolist()

    plcc,_ = pearsonr(num1,num2)

    srcc,_ = spearmanr(num1,num2)
    krcc,_ = kendalltau(num1,num2)
    plccs.append(plcc)
    srccs.append(srcc)
    krccs.append(krcc)
    n_num1 = num1[:9]+num1[18:27]+num1[36:45]
    u_num1 = num1[9:18]+num1[27:36]+num1[45:54]
    # swirl
    n_num1 = num1[:9]
    u_num1 = num1[9:18]
    # blur
    n_num1 = num1[18:27]
    u_num1 = num1[27:36]
    # noise
    n_num1 = num1[36:45]
    u_num1 = num1[45:54]

    minus1 = [u1-n1 for (u1,n1)in zip(u_num1,n_num1)]

    n_num2 = num2[:9]+num2[18:27]+num2[36:45]
    u_num2 = num2[9:18]+num2[27:36]+num2[45:54]
    # swirl
    n_num2 = num2[:9]
    u_num2 = num2[9:18]
    # blur
    n_num2 = num2[18:27]
    u_num2 = num2[27:36]
    # noise
    n_num2 = num2[36:45]
    u_num2 = num2[45:54]

    minus2 = [u2-n2 for (u2,n2)in zip(u_num2,n_num2)]
    minus_plcc,_ = pearsonr(minus1,minus2)

    n_plcc,_ = pearsonr(n_num1,n_num2)
    n_srcc,_ = spearmanr(n_num1,n_num2)
    n_krcc,_ = kendalltau(n_num1,n_num2)
    n_plccs.append(n_plcc)
    n_srccs.append(n_srcc)
    n_krccs.append(n_krcc)
    u_plcc,_ = pearsonr(u_num1,u_num2)
    u_srcc,_ = spearmanr(u_num1,u_num2)
    u_krcc,_ = kendalltau(u_num1,u_num2)
    u_plccs.append(u_plcc)
    u_srccs.append(u_srcc)
    u_krccs.append(u_krcc)
  plcc = np.mean(plccs)
  srcc = np.mean(srccs)
  krcc = np.mean(krccs)
  n_plcc = np.mean(n_plccs)
  n_srcc = np.mean(n_srccs)
  n_krcc = np.mean(n_krccs)
  u_plcc = np.mean(u_plccs)
  u_srcc = np.mean(u_srccs)
  u_krcc = np.mean(u_krccs)
  if model=="SSIM" or model=="MS_SSIM":
    model = "1-"+model
  if model=="1-MS_SSIM":
    model = "1-MS\\_SSIM"

  print( str(round(minus_plcc,4)))
conchincradle commented 12 months ago

human PLCC figures for swirl

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
# df = df.transpose()
for subject in range(14):
  num1 = df.iloc[subject][1:].tolist()
  num2 = [0 for i in range(54)]
  for human in range(14):
    if human==subject:
      continue
    values = df.iloc[human][1:].tolist()
    for k in range(54):
      num2[k] += values[k]
  num2 = [each/13 for each in num2]

  # 创建示例数据
  x = num1[36:54] # 替换为您的x列数据
  y = num2[36:54]  # 替换为您的y列数据
  c = []
  # types = ["swirl-0","swirl-1","blur-0","blur-1","noise-0","noise-1"]
  types = ["swirl-0","swirl-1"]
  # types = ["blur-0","blur-1"]
  # types = ["noise-0","noise-1"]

  for k in range(2):
    for i in range(9):
      c.append(types[k])

  categories = c  # 替换为包含类别信息的列

  # categories = types  # 替换为您的类别数据

  # 设置形状和颜色映射
  # marker_map = dict(zip(types,["X","X","o","o","s","s"]))
  # color_map = dict(zip(types,["darkorange","forestgreen"]*3))
  marker_map = dict(zip(types,["X","X"]))
  color_map = dict(zip(types,["darkorange","forestgreen"]))
  print(color_map)

  # 绘制散点图
  sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                  markers=marker_map, palette=color_map)
  top = max(x+y)+0.01
  bottom = min(x+y)-0.01

  plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

  # 显示图例
  plt.legend()
  title = "0"+str(subject+1) if subject<9 else str(subject+1)
  plt.title('Total: Subject-'+title)
  plt.xlabel("human JND")
  plt.ylabel("the mean of other subjects' human JND")

  plt.savefig("human_fig/"+str(subject)+"_human_corr_noise.png")
  plt.show()
  # print(nums)
conchincradle commented 12 months ago

for each model correlation figure

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"

for model in ["MSE", "SSIM", "NLPD", "MS_SSIM", "LPIPS", "DISTS"]:

  filename2 = "stepPredict/"+model+"_0.005_predict.csv"
  data2 = pd.read_csv(filename2)
  data = pd.read_csv(filename)
  df =  pd.DataFrame(data)
  df2 =  pd.DataFrame(data2)
  # df = df.transpose()
  for subject in range(14):
    num1 = df.iloc[subject][1:].tolist()
    num2 = df2.iloc[subject][1:].tolist()

    # num2 = [0 for i in range(54)]
    # for human in range(14):
    #   if human==subject:
    #     continue
    #   values = df.iloc[human][1:].tolist()
    #   for k in range(54):
    #     num2[k] += values[k]
    # num2 = [each/13 for each in num2]

    # 创建示例数据
    x = num1 # 替换为您的x列数据
    y = num2  # 替换为您的y列数据
    c = []
    types = ["swirl-0","swirl-1","blur-0","blur-1","noise-0","noise-1"]
    for k in range(6):
      for i in range(9):
        c.append(types[k])

    categories = c  # 替换为包含类别信息的列

    # categories = types  # 替换为您的类别数据

    # 设置形状和颜色映射
    marker_map = dict(zip(types,["X","X","o","o","s","s"]))
    color_map = dict(zip(types,["darkorange","forestgreen"]*3))
    print(color_map)

    # 绘制散点图
    sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                    markers=marker_map, palette=color_map)
    top = max(x+y)+0.01
    bottom = min(x+y)-0.01

    plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

    # 显示图例
    plt.legend()
    title = "0"+str(subject+1) if subject<9 else str(subject+1)
    plt.title('Total: Subject-'+title)
    plt.xlabel("human JND")
    model_small = model.lower()
    label = model
    if model=="SSIM" or model=="MS_SSIM":
      label = "1-"+model
    plt.ylabel(label+" predicted JND")

    plt.savefig(model_small+"_fig/"+str(subject)+"_"+model_small+"_corr_total.png")
    plt.show()
    # print(nums)
conchincradle commented 12 months ago

for swirl each model

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
# df = df.transpose()
for subject in range(14):
  num1 = df.iloc[subject][1:].tolist()
  num2 = [0 for i in range(54)]
  for human in range(14):
    if human==subject:
      continue
    values = df.iloc[human][1:].tolist()
    for k in range(54):
      num2[k] += values[k]
  num2 = [each/13 for each in num2]

  # 创建示例数据
  x = num1 # 替换为您的x列数据
  y = num2  # 替换为您的y列数据
  c = []
  types = ["swirl-0","swirl-1","blur-0","blur-1","noise-0","noise-1"]
  for k in range(6):
    for i in range(9):
      c.append(types[k])

  categories = c  # 替换为包含类别信息的列

  # categories = types  # 替换为您的类别数据

  # 设置形状和颜色映射
  marker_map = dict(zip(types,["X","X","o","o","s","s"]))
  color_map = dict(zip(types,["darkorange","forestgreen"]*3))
  print(color_map)

  # 绘制散点图
  sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                  markers=marker_map, palette=color_map)
  top = max(x+y)+0.01
  bottom = min(x+y)-0.01

  plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

  # 显示图例
  plt.legend()
  title = "0"+str(subject+1) if subject<9 else str(subject+1)
  plt.title('Total: Subject-'+title)
  plt.xlabel("human JND")
  plt.ylabel("the mean of other subjects' human JND")

  plt.savefig("human_fig/"+str(subject)+"_human_corr_total.png")
  plt.show()
  # print(nums)
conchincradle commented 12 months ago

human JND difference of natural and non-natural

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np

filename = "total_analysis_normalize.csv"
data = pd.read_csv(filename)
df =  pd.DataFrame(data)
# df = df.transpose()
for subject in range(14):
  num1 = df.iloc[subject][1:].tolist()
  num2 = [0 for i in range(54)]
  for human in range(14):
    if human==subject:
      continue
    values = df.iloc[human][1:].tolist()
    for k in range(54):
      num2[k] += values[k]
  num2 = [each/13 for each in num2]

  # 创建示例数据
  x = num1 # 替换为您的x列数据
  x10 = num1[:9]
  x11= num1[9:18]
  x20=num1[18:27]
  x21 = num1[27:36]
  x30=num1[36:45]
  x31=num1[45:54]
  x = [x2-x1 for x1,x2 in zip(x10,x11)]+[x2-x1 for x1,x2 in zip(x20,x21)]+[x2-x1 for x1,x2 in zip(x30,x31)]

  y = num2  # 替换为您的y列数据
  num1 = num2
  x10 = num1[:9]
  x11= num1[9:18]
  x20=num1[18:27]
  x21 = num1[27:36]
  x30=num1[36:45]
  x31=num1[45:54]
  y = [x2-x1 for x1,x2 in zip(x10,x11)]+[x2-x1 for x1,x2 in zip(x20,x21)]+[x2-x1 for x1,x2 in zip(x30,x31)]

  c = []
  types = ["swirl","blur","noise"]
  for k in range(3):
    for i in range(9):
      c.append(types[k])

  categories = c  # 替换为包含类别信息的列

  # categories = types  # 替换为您的类别数据

  # 设置形状和颜色映射
  marker_map = dict(zip(types,["X","o","s"]))
  color_map = dict(zip(types,["darkorange","forestgreen","slateblue"]))
  print(color_map)

  # 绘制散点图
  sns.scatterplot(x=x, y=y, hue=categories, style=categories,
                  markers=marker_map, palette=color_map)
  top = max(x+y)+0.01
  bottom = min(x+y)-0.01

  plt.plot([bottom, top], [bottom, top], color='gray', linestyle='--')

  # 显示图例
  plt.legend()
  title = "0"+str(subject+1) if subject<9 else str(subject+1)
  plt.title('Total: Subject-'+title)
  plt.xlabel("human JND difference")
  plt.ylabel("the mean of other subjects' human JND difference")

  plt.savefig("na_human_fig/"+str(subject)+"_human_corr_total.png")
  plt.show()
  # print(nums)