如何在深度学习代码的运行/控制台/终端中显示错误的预测?
我试图在IDE内的运行/终端/控制台上显示所有错过的主题(假阳性/假否定词)。原因是因为我使用的是100个由两个文件夹(测试和训练)分开的主题,并且当它完成最后一个时代时,它显示了混淆矩阵,看起来像是
”看,看到错过的主题是不可读的。
问题: 如何在运行控制台上显示错过的主题?
# Importing libraries
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
from keras.utils.np_utils import to_categorical
from image_dataset_loader import load
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
np.random.seed(1000)
# Instantiation
AlexNet = Sequential()
# 1st Convolutional Layer
AlexNet.add(Conv2D(filters=96, input_shape=(227, 227, 3), kernel_size=(11, 11), strides=(4, 4), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 2nd Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 3rd Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# 4th Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# 5th Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Passing it to a Fully Connected layer
AlexNet.add(Flatten())
# 1st Fully Connected Layer
AlexNet.add(Dense(4096, input_shape=(32, 32, 3,)))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout to prevent overfitting
AlexNet.add(Dropout(0.4))
# 2nd Fully Connected Layer
AlexNet.add(Dense(4096))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout
AlexNet.add(Dropout(0.4))
# 3rd Fully Connected Layer
AlexNet.add(Dense(1000))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout
AlexNet.add(Dropout(0.4))
# Output Layer
AlexNet.add(Dense(100)) #LFW-CARE
#AlexNet.add(Dense(24)) #FERET
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('softmax'))
# Model Summary
AlexNet.summary()
# Compiling the model
AlexNet.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
# Josh Plan 1: FERET Pathway
#path = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet"
#imgPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet/Josh_Training_CHD_8I_24S_227"
#testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet/Josh_Testing_CHD_24S_227"
#LFW-CARE Pathway
path = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE"
imgPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Josh_Training_SEN_8I_100S_227"
testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Josh_Testing_SEN_100S_227"
(x_train, y_train), (x_test, y_test) = load(path, [imgPath, testPath])
temp = []
for label in y_train:
temp.append([label])
y_train = np.array(temp)
# print('-------------------------4')
# print(y_train)
temp = []
for label in y_test:
temp.append([label])
y_test = np.array(temp)
# Train-validation-test split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=.12)
# Dimension of the CIFAR10 dataset
print((x_train.shape, y_train.shape))
print((x_val.shape, y_val.shape))
print((x_test.shape, y_test.shape))
# Onehot Encoding the labels.
# from sklearn.utils.multiclass import unique_labels
# Since we have 10 classes we should expect the shape[1] of y_train,y_val and y_test to change from 1 to 10
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
# Verifying the dimension after one hot encoding
print((x_train.shape, y_train.shape))
print((x_val.shape, y_val.shape))
print((x_test.shape, y_test.shape))
# Image Data Augmentation
train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
val_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
# Fitting the augmentation defined above to the data
train_generator.fit(x_train)
val_generator.fit(x_val)
test_generator.fit(x_test)
# Learning Rate Annealer
lrr = ReduceLROnPlateau(monitor='val_accuracy', factor=.01, patience=3, min_lr=1e-5)
# Defining the parameters
batch_size = 10
epochs = 50
learn_rate = .001
# Training the model
AlexNet.fit(train_generator.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs, steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=val_generator.flow(x_val, y_val, batch_size=batch_size),
validation_steps=2, callbacks=[lrr], verbose=1)
# After successful training, we will visualize its performance.
# Plotting the training and validation loss
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Assigning the first subplot to graph training loss and validation loss
ax.plot(AlexNet.history.history['loss'], color='b', label='Training Loss')
ax.plot(AlexNet.history.history['val_loss'], color='r', label='Validation Loss')
plt.legend()
plt.show()
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Plotting the training accuracy and validation accuracy
ax.plot(AlexNet.history.history['accuracy'], color='b', label='Training Accuracy')
ax.plot(AlexNet.history.history['val_accuracy'], color='r', label='Validation Accuracy')
plt.legend()
plt.show()
# Defining function for confusion matrix plot
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# Print Confusion matrix
fig, ax = plt.subplots(figsize=(4, 4))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", color="white"
if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
np.set_printoptions(precision=2)
# Making prediction
y_pred = (AlexNet.predict_classes(x_test))
y_true = np.argmax(y_test, axis=1)
print(y_pred)
print(y_pred.shape)
# Plotting the confusion matrix
confusion_mtx = confusion_matrix(y_true, y_pred)
# class_names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
CLASS_NAMES = [f.name for f in os.scandir(imgPath) if f.is_dir()]
class_names = CLASS_NAMES
print(class_names)
print("ypred\n", y_pred)
print("ytrue", y_true)
# Plotting non-normalized confusion matrix
plot_confusion_matrix(y_true, y_pred, classes=class_names, title='AlexNet Confusion matrix, without normalization')
plt.show()
# Plotting normalized confusion matrix
# plot_confusion_matrix(y_true, y_pred, classes=class_names, normalize=True, title='Normalized confusion matrix')
# plt.show()
# Classification Metrics
acc_score = accuracy_score(y_true, y_pred)
print('\n\n\t\t Accuracy Score: ', str(round((100 * acc_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='macro')
print(' Precision Score Macro: ', str(round((100 * prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='micro')
print(' Precision Score Micro: ', str(round((100 * prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='weighted')
print('Precision Score Weighted: ', str(round((100 * prec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='macro')
print('\t\t\tRecall Macro: ', str(round((100 * rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='micro')
print('\t\t\tRecall Micro: ', str(round((100 * rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='weighted')
print('\t\t Recall Weighted: ', str(round((100 * rec_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='macro')
print('\t\t F1 Score Macro: ', str(round((100 * f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='micro')
print('\t\t F1 Score Micro: ', str(round((100 * f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='weighted')
print('\t F1 Score Weighted: ', str(round((100 * f_score), 2)), '%')
I am trying to display all missed subject (False Positive/False Negatives) onto the run/terminal/console within the IDE. The reason why is because I am using 100 subjects that are separated by two folders (testing and training) and when it finishes the last epoch, it displays the confusion matrix, which looks like this
As you can see, it is unreadable to see the missed subjects.
Question:
How can I display the missed subjects on the run console?
# Importing libraries
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
from keras.utils.np_utils import to_categorical
from image_dataset_loader import load
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
np.random.seed(1000)
# Instantiation
AlexNet = Sequential()
# 1st Convolutional Layer
AlexNet.add(Conv2D(filters=96, input_shape=(227, 227, 3), kernel_size=(11, 11), strides=(4, 4), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 2nd Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 3rd Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# 4th Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# 5th Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Passing it to a Fully Connected layer
AlexNet.add(Flatten())
# 1st Fully Connected Layer
AlexNet.add(Dense(4096, input_shape=(32, 32, 3,)))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout to prevent overfitting
AlexNet.add(Dropout(0.4))
# 2nd Fully Connected Layer
AlexNet.add(Dense(4096))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout
AlexNet.add(Dropout(0.4))
# 3rd Fully Connected Layer
AlexNet.add(Dense(1000))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout
AlexNet.add(Dropout(0.4))
# Output Layer
AlexNet.add(Dense(100)) #LFW-CARE
#AlexNet.add(Dense(24)) #FERET
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('softmax'))
# Model Summary
AlexNet.summary()
# Compiling the model
AlexNet.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
# Josh Plan 1: FERET Pathway
#path = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet"
#imgPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet/Josh_Training_CHD_8I_24S_227"
#testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_AlexNet/Josh_Testing_CHD_24S_227"
#LFW-CARE Pathway
path = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE"
imgPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Josh_Training_SEN_8I_100S_227"
testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Josh_Testing_SEN_100S_227"
(x_train, y_train), (x_test, y_test) = load(path, [imgPath, testPath])
temp = []
for label in y_train:
temp.append([label])
y_train = np.array(temp)
# print('-------------------------4')
# print(y_train)
temp = []
for label in y_test:
temp.append([label])
y_test = np.array(temp)
# Train-validation-test split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=.12)
# Dimension of the CIFAR10 dataset
print((x_train.shape, y_train.shape))
print((x_val.shape, y_val.shape))
print((x_test.shape, y_test.shape))
# Onehot Encoding the labels.
# from sklearn.utils.multiclass import unique_labels
# Since we have 10 classes we should expect the shape[1] of y_train,y_val and y_test to change from 1 to 10
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
# Verifying the dimension after one hot encoding
print((x_train.shape, y_train.shape))
print((x_val.shape, y_val.shape))
print((x_test.shape, y_test.shape))
# Image Data Augmentation
train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
val_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
# Fitting the augmentation defined above to the data
train_generator.fit(x_train)
val_generator.fit(x_val)
test_generator.fit(x_test)
# Learning Rate Annealer
lrr = ReduceLROnPlateau(monitor='val_accuracy', factor=.01, patience=3, min_lr=1e-5)
# Defining the parameters
batch_size = 10
epochs = 50
learn_rate = .001
# Training the model
AlexNet.fit(train_generator.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs, steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=val_generator.flow(x_val, y_val, batch_size=batch_size),
validation_steps=2, callbacks=[lrr], verbose=1)
# After successful training, we will visualize its performance.
# Plotting the training and validation loss
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Assigning the first subplot to graph training loss and validation loss
ax.plot(AlexNet.history.history['loss'], color='b', label='Training Loss')
ax.plot(AlexNet.history.history['val_loss'], color='r', label='Validation Loss')
plt.legend()
plt.show()
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Plotting the training accuracy and validation accuracy
ax.plot(AlexNet.history.history['accuracy'], color='b', label='Training Accuracy')
ax.plot(AlexNet.history.history['val_accuracy'], color='r', label='Validation Accuracy')
plt.legend()
plt.show()
# Defining function for confusion matrix plot
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# Print Confusion matrix
fig, ax = plt.subplots(figsize=(4, 4))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", color="white"
if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
np.set_printoptions(precision=2)
# Making prediction
y_pred = (AlexNet.predict_classes(x_test))
y_true = np.argmax(y_test, axis=1)
print(y_pred)
print(y_pred.shape)
# Plotting the confusion matrix
confusion_mtx = confusion_matrix(y_true, y_pred)
# class_names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
CLASS_NAMES = [f.name for f in os.scandir(imgPath) if f.is_dir()]
class_names = CLASS_NAMES
print(class_names)
print("ypred\n", y_pred)
print("ytrue", y_true)
# Plotting non-normalized confusion matrix
plot_confusion_matrix(y_true, y_pred, classes=class_names, title='AlexNet Confusion matrix, without normalization')
plt.show()
# Plotting normalized confusion matrix
# plot_confusion_matrix(y_true, y_pred, classes=class_names, normalize=True, title='Normalized confusion matrix')
# plt.show()
# Classification Metrics
acc_score = accuracy_score(y_true, y_pred)
print('\n\n\t\t Accuracy Score: ', str(round((100 * acc_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='macro')
print(' Precision Score Macro: ', str(round((100 * prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='micro')
print(' Precision Score Micro: ', str(round((100 * prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average='weighted')
print('Precision Score Weighted: ', str(round((100 * prec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='macro')
print('\t\t\tRecall Macro: ', str(round((100 * rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='micro')
print('\t\t\tRecall Micro: ', str(round((100 * rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average='weighted')
print('\t\t Recall Weighted: ', str(round((100 * rec_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='macro')
print('\t\t F1 Score Macro: ', str(round((100 * f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='micro')
print('\t\t F1 Score Micro: ', str(round((100 * f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average='weighted')
print('\t F1 Score Weighted: ', str(round((100 * f_score), 2)), '%')
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
只需添加一个for循环即可打印在预测错误和y_pred时的预测错误的地方。
显然,这会打印出误分类的标签,如果您只想要唯一的值,则可以将它们附加到集合()或存储关联的索引i并使用它来打印所需的任何信息。
just add a for loop to print where the predictions are wrong by comparing y_true and y_pred at the end.
obviously this prints the labels that were mis classified, if you only want the unique values you can append them to a set() or store the associated index i and use that to print whatever information you need.