bethgelab / foolbox

A Python toolbox to create adversarial examples that fool neural networks in PyTorch, TensorFlow, and JAX
https://foolbox.jonasrauber.de
MIT License
2.77k stars 427 forks source link

Foolbox predictions donot match tensorflow slim predictions #178

Closed Arkadeep-sophoIITG closed 6 years ago

Arkadeep-sophoIITG commented 6 years ago

Foolbox tensorflow model predictions donot match tensorflow slim model predictions on clean images . Is there some issue while restoring the model ?


from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import time

import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim

from config.parse_config import parse_config_file
from nets import nets_factory
from preprocessing import inputs
import foolbox
from scipy.misc import imsave
from tensorflow.python.training import saver as tf_saver

from PIL import Image
import cv2

# !/usr/bin/python
# -*- coding: utf-8 -*-
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from foolbox.criteria import TopKMisclassification
import csv

def test(tfrecords, checkpoint_path, save_dir, max_iterations, eval_interval_secs, cfg, read_images=False):
    """
    Args:
        tfrecords (list)
        checkpoint_path (str)
        savedir (str)
        max_iterations (int)
        cfg (EasyDict)
    """
    tf.logging.set_verbosity(tf.logging.DEBUG)

    graph = tf.Graph()
    with graph.as_default():

        global_step = slim.get_or_create_global_step()

        with tf.device('/cpu:0'):
            batch_dict = inputs.input_nodes(
                tfrecords=tfrecords,
                cfg=cfg.IMAGE_PROCESSING,
                num_epochs=1,
                batch_size=cfg.BATCH_SIZE,
                num_threads=cfg.NUM_INPUT_THREADS,
                shuffle_batch=cfg.SHUFFLE_QUEUE,
                random_seed=cfg.RANDOM_SEED,
                capacity=cfg.QUEUE_CAPACITY,
                min_after_dequeue=cfg.QUEUE_MIN,
                add_summaries=False,
                input_type='test',
                read_filenames=read_images
            )

            batched_one_hot_labels = slim.one_hot_encoding(batch_dict['labels'],
                                                           num_classes=cfg.NUM_CLASSES)

        arg_scope = nets_factory.arg_scopes_map[cfg.MODEL_NAME]()

        with slim.arg_scope(arg_scope):
            logits, end_points = nets_factory.networks_map[cfg.MODEL_NAME](
                images=batch_dict['inputs'],
                num_classes=cfg.NUM_CLASSES,
                is_training=False
            )

            predictions = end_points['Predictions']
            # labels = tf.squeeze(batch_dict['labels'])
            labels = batch_dict['labels']
            # Add the loss summary
            loss = tf.losses.softmax_cross_entropy(
                logits=logits, onehot_labels=batched_one_hot_labels, label_smoothing=0., weights=1.0)

        if 'MOVING_AVERAGE_DECAY' in cfg and cfg.MOVING_AVERAGE_DECAY > 0:
            variable_averages = tf.train.ExponentialMovingAverage(
                cfg.MOVING_AVERAGE_DECAY, global_step)
            variables_to_restore = variable_averages.variables_to_restore(
                slim.get_model_variables())
            variables_to_restore[global_step.op.name] = global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()
            variables_to_restore.append(global_step)

        if variables_to_restore is not None:
            saver = tf_saver.Saver(variables_to_restore)
        # Define the metrics:
        metric_map = {
            'Accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(predictions, 1)),
            # slim.metrics.streaming_accuracy(labels=labels, predictions=tf.argmax(predictions, 1)),
            loss.op.name: slim.metrics.streaming_mean(loss)
        }

        if len(cfg.ACCURACY_AT_K_METRIC) > 0:
            bool_labels = tf.ones([cfg.BATCH_SIZE], dtype=tf.bool)
            for k in cfg.ACCURACY_AT_K_METRIC:
                if k <= 1 or k > cfg.NUM_CLASSES:
                    continue
                in_top_k = tf.nn.in_top_k(predictions=predictions, targets=labels, k=k)
                metric_map['Accuracy_at_%s' % k] = tf.metrics.accuracy(labels=bool_labels,
                                                                       predictions=in_top_k)  # slim.metrics.streaming_accuracy(labels=bool_labels, predictions=in_top_k)

        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(metric_map)

        # Print the summaries to screen.
        print_global_step = True
        for name, value in names_to_values.iteritems():
            summary_name = 'eval/%s' % name
            op = tf.summary.scalar(summary_name, value, collections=[])
            if print_global_step:
                op = tf.Print(op, [global_step], "Model Step ")
                print_global_step = False
            op = tf.Print(op, [value], summary_name)
            tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        if max_iterations > 0:
            num_batches = max_iterations
        else:
            # This ensures that we make a single pass over all of the data.
            # We could use ceil if the batch queue is allowed to pad the last batch
            num_batches = np.floor(cfg.NUM_TEST_EXAMPLES / float(cfg.BATCH_SIZE))

        sess_config = tf.ConfigProto(
            log_device_placement=cfg.SESSION_CONFIG.LOG_DEVICE_PLACEMENT,
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(
                per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.PER_PROCESS_GPU_MEMORY_FRACTION
            ),
            intra_op_parallelism_threads=cfg.SESSION_CONFIG.INTRA_OP_PARALLELISM_THREADS if 'INTRA_OP_PARALLELISM_THREADS' in cfg.SESSION_CONFIG else None,
            inter_op_parallelism_threads=cfg.SESSION_CONFIG.INTER_OP_PARALLELISM_THREADS if 'INTER_OP_PARALLELISM_THREADS' in cfg.SESSION_CONFIG else None
        )

        if eval_interval_secs > 0:

            if not os.path.isdir(checkpoint_path):
                raise ValueError("checkpoint_path should be a path to a directory when " \
                                 "evaluating in a loop.")

            slim.evaluation.evaluation_loop(
                master='',
                checkpoint_dir=checkpoint_path,
                logdir=save_dir,
                num_evals=num_batches,
                initial_op=None,
                initial_op_feed_dict=None,
                eval_op=names_to_updates.values(),
                eval_op_feed_dict=None,
                final_op=None,
                final_op_feed_dict=None,
                summary_op=tf.summary.merge_all(),
                summary_op_feed_dict=None,
                variables_to_restore=variables_to_restore,
                eval_interval_secs=eval_interval_secs,
                max_number_of_evaluations=None,
                session_config=sess_config,
                timeout=None
            )

        else:
            if os.path.isdir(checkpoint_path):
                checkpoint_dir = checkpoint_path
                checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)

                if checkpoint_path is None:
                    raise ValueError("Unable to find a model checkpoint in the " \
                                     "directory %s" % (checkpoint_dir,))

            tf.logging.info('Evaluating %s' % checkpoint_path)

            slim.evaluation.evaluate_once(
                master='',
                checkpoint_path=checkpoint_path,
                logdir=save_dir,
                num_evals=num_batches,
                eval_op=tf.Print(list(names_to_updates.values()), [labels, predictions, tf.argmax(predictions,1)],
                                 message="original label   predictions       predicted label ", summarize=100),
                variables_to_restore=variables_to_restore,
                session_config=sess_config
            )

        with tf.Session() as sess:
            # init_fn(sess)
            saver.restore(sess, checkpoint_path)
            model = foolbox.models.TensorFlowModel(batch_dict['inputs'], logits, (0, 255))
            criteria = TopKMisclassification(k=1)
            attacks = ['deepfool', 'igrad', 'igradsign']
            criter1 = foolbox.criteria.TargetClassProbability(0, 0.6)
            criter2 = foolbox.criteria.TargetClassProbability(1, 0.6)
            attack_1 = foolbox.attacks.DeepFoolAttack(model)
            attack_2 = foolbox.attacks.IterativeGradientAttack(model)
            attack_3 = foolbox.attacks.IterativeGradientSignAttack(model)
            attack_4 = foolbox.attacks.ApproximateLBFGSAttack(model, criter1)
            attack_5 = foolbox.attacks.SinglePixelAttack(model)
            attack_6 = foolbox.attacks.AdditiveUniformNoiseAttack(model)
            path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/chest_disease/'
            dirs = os.listdir(path)
            counter = 0
            header = ['image', 'clean predicted label', 'clean predicted label confidence', 'other label conf',
                      'adversarial label', 'adversarial predicted label confidence', 'other adv label confidence ']
            data = []
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_1(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/deepfool/chest_disease/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
            with open('deepfool.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path2 = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/No_finding/'
            dirs = os.listdir(path2)
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path2 + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_1(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite(
                        '/local-scratch/ada77/cedar-rm/scratch/adversary/deepfool/No_finding/' + str(counter) + '.png',
                        adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
            with open('deepfool.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                # writer.writerow(header)
                for j in data:
                    writer.writerow(j)

            del data[:]

            adversarial_1 = None
            path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/chest_disease/'
            dirs = os.listdir(path)
            counter = 0
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_3(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]

                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/igradsignattack/chest_disease/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('igradsignattack.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path2 = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/No_finding/'
            dirs = os.listdir(path2)
            attacks = ['deepfool', 'igrad', 'igradsign']
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path2 + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_3(image, 1, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/igradsignattack/No_finding/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('igradsignattack.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                # writer.writerow( header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/chest_disease/'
            dirs = os.listdir(path)
            counter = 0
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_3(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/igradattack/chest_disease/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('igradattack.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path2 = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/No_finding/'
            dirs = os.listdir(path2)
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path2 + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_3(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/igradattack/No_finding/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('igradattack.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                # writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/chest_disease/'
            dirs = os.listdir(path)
            counter = 0
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_6(image, 0, unpack=False)

                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite(
                        '/local-scratch/ada77/cedar-rm/scratch/adversary/add_uniform_noise/chest_disease/' + str(
                            counter) + '.png',
                        adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])

                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('add_unif_noise.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path2 = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/No_finding/'
            dirs = os.listdir(path2)
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path2 + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_6(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/add_uniform_noise/No_finding/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('add_unif_noise.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                # writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/chest_disease/'
            dirs = os.listdir(path)
            counter = 0
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_4(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/approxlbfgs/chest_disease/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])

                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('approxlbfgs.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

            adversarial_1 = None
            path2 = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test/No_finding/'
            dirs = os.listdir(path2)
            for item in dirs:
                counter = counter + 1
                image = cv2.imread(path2 + item)
                print(item)
                example_label = np.argmax(model.predictions(image))
                print(example_label)
                try:
                    adversarial_1 = attack_4(image, 0, unpack=False)
                except:
                    print('Not found adversarials dammit.. or some other weird error')
                pred = foolbox.utils.softmax(model.predictions(image))[example_label]
                if adversarial_1.image is not None:
                    cv2.imwrite('/local-scratch/ada77/cedar-rm/scratch/adversary/approxlbfgs/No_finding/' + str(
                        counter) + '.png', adversarial_1.image)
                    label_adv = np.argmax(model.predictions(adversarial_1.image))
                    print(label_adv)
                    pred_adv = foolbox.utils.softmax(model.predictions(adversarial_1.image))[label_adv]
                    data.append([item, example_label, pred, 1 - pred, label_adv, pred_adv, 1 - pred_adv])
                else:
                    data.append([item, example_label, pred, 1 - pred, example_label, pred, 1 - pred])
            with open('approxlbfgs.csv', 'a') as file:
                writer = csv.writer(file, delimiter=',')
                # writer.writerow(header)
                for j in data:
                    writer.writerow(j)
            del data[:]

def parse_args():
    parser = argparse.ArgumentParser(description='Test the person classifier')

    parser.add_argument('--tfrecords', dest='tfrecords',
                        help='Paths to tfrecords.', type=str,
                        nargs='+', required=True)

    parser.add_argument('--checkpoint_path', dest='checkpoint_path',
                        help='Path to a specific model to test against. If a directory, then the newest checkpoint file will be used.',
                        type=str,
                        required=True, default=None)

    parser.add_argument('--save_dir', dest='savedir',
                        help='Path to directory to store summary files.', type=str,
                        required=True)

    parser.add_argument('--config', dest='config_file',
                        help='Path to the configuration file.',
                        required=True, type=str)

    parser.add_argument('--eval_interval_secs', dest='eval_interval_secs',
                        help='Go into an evaluation loop, waiting this many seconds between evaluations. Default is to evaluate once.',
                        required=False, type=int, default=0)

    parser.add_argument('--batch_size', dest='batch_size',
                        help='The number of images in a batch.',
                        required=False, type=int, default=None)

    parser.add_argument('--batches', dest='batches',
                        help='Maximum number of iterations to run. Default is all records (modulo the batch size).',
                        required=False, type=int, default=0)

    parser.add_argument('--model_name', dest='model_name',
                        help='The name of the architecture to use.',
                        required=False, type=str, default=None)

    parser.add_argument('--read_images', dest='read_images',
                        help='Read the images from the file system using the `filename` field rather than using the `encoded` field of the tfrecord.',
                        action='store_true', default=False)

    args = parser.parse_args()
    return args

def main():
    args = parse_args()

    cfg = parse_config_file(args.config_file)

    if args.batch_size != None:
        cfg.BATCH_SIZE = args.batch_size

    if args.model_name != None:
        cfg.MODEL_NAME = args.model_name

    test(
        tfrecords=args.tfrecords,
        checkpoint_path=args.checkpoint_path,
        save_dir=args.savedir,
        max_iterations=args.batches,
        eval_interval_secs=args.eval_interval_secs,
        cfg=cfg,
        read_images=args.read_images
    )

if __name__ == '__main__':
    main()
jonasrauber commented 6 years ago

Could you please give a minimal example (*) that shows how you evaluate your model directly and how you evaluate it using foolbox and the predicted logits / probabilities in both cases.

Arkadeep-sophoIITG commented 6 years ago

I think I figured out the reason. What should be the input to the tensorflow model, the whole test dataset right ? I think I am unable to provide the 4-d placeholder to the model as input. Can you please help me with that ?

import cv2
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory
import foolbox
from PIL import Image
import tensorflow.contrib.slim as slim
import os

graph = tf.Graph()
image = np.asarray(
    Image.open('/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test_nasnet/No_finding/00000820_021.jpeg'))

with graph.as_default():
    data_tf = tf.convert_to_tensor(image, np.float32)
    global_step = slim.get_or_create_global_step()
    arg_scope = nets_factory.arg_scopes_map['nasnet_large']()

    with slim.arg_scope(arg_scope):
        logits, end_points = nets_factory.networks_map['nasnet_large'](
            inputs=data_tf,
            num_classes=2,
            is_training=False
        )
    predictions = end_points['Predictions']
    labels = 1
    variables_to_restore = slim.get_variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    metric_map = {
        'Accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(predictions, 1))
    }
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(metric_map)
    checkpoint_dir1 = '/local-scratch/ada77/07_JUne/final_model_scratch/final_model'
    checkpoint_dir = '/local-scratch/ada77/07_JUne/Generalized_tfclassifer/tf_classification_nasnet/experiment/logdir'
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    num_batches = 1
    slim.evaluation.evaluate_once(
        master='',
        checkpoint_path=checkpoint_path,
        logdir='/',
        num_evals=num_batches,
        eval_op=tf.Print(list(names_to_updates.values()), [labels, predictions, tf.argmax(predictions, 1)],
                         message="original label   predictions       predicted label ", summarize=100),
        variables_to_restore=variables_to_restore
    )

    with tf.Session() as sess:
        saver.restore(sess, checkpoint_path)
        model = foolbox.models.TensorFlowModel(data_tf, logits, (0, 255))
        path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test_nasnet/chest_disease/'
        dirs = os.listdir(path)
        counter = 0
        header = ['image', 'clean predicted label', 'clean predicted label confidence', 'other label conf',
                  'adversarial label', 'adversarial predicted label confidence', 'other adv label confidence ']
        data = []
        for item in dirs:
            counter = counter + 1
            image = cv2.imread(path + item)
            print(item)
            example_label = np.argmax(model.predictions(image))
            print(example_label)
            pred = foolbox.utils.softmax(model.predictions(image))[0]
            print(pred , 1-pred)
        # Label is always 0 no matter what the image is with high confidence score
Arkadeep-sophoIITG commented 6 years ago

Here's another example I reproduced rectifying the previous error . But now there is a problem with the logits and it is throwing the same error everytime. Here's the code

import numpy as np
import os
import cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory
import foolbox
from PIL import Image
import tensorflow.contrib.slim as slim

path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test_nasnet/chest_disease/'
dirs = os.listdir(path)
images = []
counter = 0
for item in dirs:
    counter = counter + 1
    image = cv2.imread(path + item)
    img = np.asarray(image, np.float32)
    images.append(img)

graph = tf.Graph()
with graph.as_default():
    data_tf = tf.convert_to_tensor(np.array(images))
    global_step = slim.get_or_create_global_step()
    arg_scope = nets_factory.arg_scopes_map['nasnet_large']()
    with slim.arg_scope(arg_scope):
        logits, end_points = nets_factory.networks_map['nasnet_large'](
            inputs=data_tf,
            num_classes=2,
            is_training=False
        )
    predictions = end_points['Predictions']
    variable_averages = tf.train.ExponentialMovingAverage(
        0.9999, global_step)
    variables_to_restore = variable_averages.variables_to_restore(
        slim.get_model_variables())
    saver = tf.train.Saver(variables_to_restore)

    checkpoint_dir1 = '/local-scratch/ada77/07_JUne/final_model_scratch/final_model'
    checkpoint_dir = '/local-scratch/ada77/07_JUne/Generalized_tfclassifer/tf_classification_nasnet/experiment/logdir'
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    with tf.Session() as sess:
        saver.restore(sess, checkpoint_path)
        model = foolbox.models.TensorFlowModel(data_tf, logits, (0, 255))
        path = '/local-scratch/ada77/cedar-rm/scratch/afresh/organised_test_nasnet/chest_disease/'
        dirs = os.listdir(path)
        counter = 0
        for item in dirs:
            counter = counter + 1
            image = cv2.imread(path + item)
            print(item)
            example_label = np.argmax(model.predictions(image))
            print(example_label)
            pred = foolbox.utils.softmax(model.predictions(image))[0]
            print(pred, 1 - pred)

Here's the traceback: Traceback (most recent call last): File "fool_nasnet.py", line 45, in model = foolbox.models.TensorFlowModel(data_tf, logits, (0, 255)) File "/home/ada77/.local/lib/python2.7/site-packages/foolbox/models/tensorflow.py", line 55, in init self._logits = tf.squeeze(logits, axis=0) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 2630, in squeeze return gen_array_ops.squeeze(input, axis, name) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 7862, in squeeze "Squeeze", input=input, squeeze_dims=axis, name=name) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper op_def=op_def) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3392, in create_op op_def=op_def) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1734, in init control_input_ops) File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.8.0-GPU-PY2713/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1570, in _create_c_op raise ValueError(str(e)) ValueError: Can not squeeze dim[0], expected a dimension of 1, got 93 for 'Squeeze' (op: 'Squeeze') with input shapes: [93,2].

wielandbrendel commented 6 years ago

It's very difficult to help you with this issue since we can't run your example (e.g. you are referring to checkpoints on your local harddrive). Please provide a self-contained example without references to files that we cannot access. Please reopen this issue if it's still relevant.