Kodee123 / Projects

Random projects
0 stars 0 forks source link

CIFAR-10 CLASSIFICATION #1

Open Kodee123 opened 5 months ago

Kodee123 commented 5 months ago

Implement and test a neural network model for classifying the CIFAR-10 dataset.

CIFAR-10 is a popular dataset containing 60,000 color images of 32x32 pixels, divided into 10 classes. Each class represents different objects, such as cars, dogs, ships, etc.

Suggested layers to use: Conv2D, Dropout, Dense, MaxPooling2D, Flatten.

Note: If the number of layers exceeds 10, add a kernel_initializer to improve the CNN's performance and prevent the vanishing gradient problem.

The results of the network should be presented in a report including:

Kodee123 commented 5 months ago

import tensorflow as tf from tensorflow.keras import layers, models import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay import numpy as np

(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data() train_images, test_images = train_images / 255.0, test_images / 255.0

model = models.Sequential([ layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), layers.Flatten(), layers.Dense(64, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='softmax') ])

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

history = model.fit(train_images, train_labels, epochs=20, validation_data=(test_images, test_labels))

test_loss, test_acc = model.evaluate(test_images, test_labels) print(f"Test Accuracy: {test_acc:.4f}")

model.save('cifar10_model.keras')

plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label='val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2) plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show()

predictions = model.predict(test_images) y_pred = np.argmax(predictions, axis=1) y_true = test_labels.flatten()

cm = confusion_matrix(y_true, y_pred) plt.figure(figsize=(10, 8)) sns.heatmap(cm, annot=True, fmt='d', cmap='Blues') plt.xlabel('Predicted Label') plt.ylabel('True Label') plt.title('Confusion Matrix') plt.show()

disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=range(10)) disp.plot(cmap=plt.cm.Blues) plt.show()

def create_model(num_layers): model = models.Sequential() for i in range(num_layers): if i == 0: model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3), kernel_initializer='he_normal')) else: model.add(layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(10, activation='softmax')) return model

num_layers = 10 model = create_model(num_layers) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])