I am running koncept512_train_test_py3.ipynb but changed it to use it in pycharm and I get this error : tensorflow.python.framework.errors_impl.InvalidArgumentError: Failed to create a directory: logs/koniq/KonCept512/bsz:16 i:1[384,512,3] l:MSE o:1[1]; Invalid argument [Op:CreateSummaryFileWriter]
could you please help me? is it related to how I call input data?
from ku import model_helper as mh
from ku import applications as apps
from ku import tensor_ops as ops
from ku import generic as gen
from ku import image_utils as iu
import pandas as pd, numpy as np, os
from matplotlib import pyplot as plt
from munch import Munch
I am running koncept512_train_test_py3.ipynb but changed it to use it in pycharm and I get this error : tensorflow.python.framework.errors_impl.InvalidArgumentError: Failed to create a directory: logs/koniq/KonCept512/bsz:16 i:1[384,512,3] l:MSE o:1[1]; Invalid argument [Op:CreateSummaryFileWriter]
could you please help me? is it related to how I call input data?
This how I changed the code :
Setup paths
drive_mount = "" drive_root = "" # persistent storage for dataset images data_root = "" # library install path
from ku import model_helper as mh from ku import applications as apps from ku import tensor_ops as ops from ku import generic as gen from ku import image_utils as iu
import pandas as pd, numpy as np, os from matplotlib import pyplot as plt from munch import Munch
import os import shutil import subprocess
from google.colab import drive
drive.mount(drive_mount)
Make directories
data_root = "C:/Users/sheyd/PycharmProjects/NRIQA_methods/Koniq512/koniq10k_512x384/"
define the KonCeptmodel
ids = pd.read_csv(data_root + 'metadata/koniq10k_distributions_sets.csv')
from keras.models import Model
Build scoring model
base_model, preprocess_fn = apps.get_model_imagenet(apps.InceptionResNetV2) head = apps.fc_layers(base_model.output, name='fc', fc_sizes = [2048, 1024, 256, 1], dropout_rates = [0.25, 0.25, 0.5, 0], batch_norm = 2)
model = Model(inputs = base_model.input, outputs = head)
Parameters of the generator
pre = lambda im: preprocess_fn( iu.ImageAugmenter(im, remap=False).fliplr().result) gen_params = dict(batch_size = 16, data_path = data_root+'images/512x384/', process_fn = pre, input_shape = (384,512,3), inputs = ['image_name'], outputs = ['MOS'])
Wrapper for the model, helps with training and testing
helper = mh.ModelHelper(model, 'KonCept512', ids, loss='MSE', metrics=["MAE", ops.plcc_tf], monitor_metric = 'val_loss', monitor_mode = 'min', multiproc = True, workers = 5, logs_root = drive_root + 'logs/koniq/', models_root = drive_root + 'models/koniq/', gen_params = gen_params)
do validation in memory
valid_gen = helper.make_generator(ids[ids.set=='validation'], batch_size = len(ids[ids.set=='validation'])) valid_data = valid_gen[0][0]
helper.train(valid_gen=valid_data, lr=1e-4, epochs=40) helper.load_model() helper.train(valid_gen=valid_data, lr=1e-4/5, epochs=20)