I would like to train a model which can seperate different cat's can,but it's seems to be false,and I don't know why the model can't predict correctly.
The dataset we made by ourself,and we took photos from different angles.
I'm not sure which part is wrong.
Would you please help to see how can I get the model?
Here is my code:
import keras.backend
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Input, Dense, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import matplotlib.pyplot as plt
from keras.models import Model
import tensorflow as tf
from keras.applications import Xception
train_path = './train'
test_path = './test'
batch_size = 16
image_size = (224,224)
epoch = 30
FREEZE_LAYERS = 2
model = Xception(include_top=False,
weights='imagenet',
input_shape=(224,224, 3))
x = model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
predictions = Dense(26, activation='softmax')(x)
model = Model(inputs=model.input, outputs=predictions)
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
estop = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
checkpoint = ModelCheckpoint('Xception_checkpoint.h5', verbose=1,
monitor='val_loss', save_best_only=True,
mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, mode='min', verbose=1,
min_lr=1e-4)
train_datagen = ImageDataGenerator(rescale= 1.0/255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=10,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale= 1.0/255)
train_generator = train_datagen.flow_from_directory(train_path,
target_size=image_size,
class_mode='categorical',
shuffle=True,
batch_size=batch_size)
valid_generator = val_datagen.flow_from_directory(test_path,
target_size=image_size,
class_mode='categorical',
shuffle=False,
batch_size=batch_size)
history = model.fit_generator(train_generator,
epochs=epoch, verbose=1,
steps_per_epoch=train_generator.samples//batch_size,
validation_data=valid_generator,
validation_steps=valid_generator.samples//batch_size,
callbacks=[checkpoint, estop, reduce_lr])
#class_weight=class_weights)
model.save('./Xception_retrained_v2.h5')
print('saved Xception_retrained_v2.h5')
Related
I have set up a job lib for parallel computing, so far, I have been able to use it in computing several metrics. I intend to compute g_mean, after the roc_auc. However, I am unable to retrieve y_test and svm_probs from the function. It gives an error when I try to retrieve it.
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.datasets import make_classification
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix, f1_score
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import LinearSVC
from joblib import Parallel, delayed
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
X = data.data
y = data.target
skf = StratifiedKFold(n_splits=5)
clf = svm.SVC(kernel='rbf', probability=True)
def train(train_index, test_index):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf.fit(X_train, y_train)
r_probs = [0 for _ in range (len(y_test))]
svm_probs = clf.predict_proba(X_test)
svm_probs = svm_probs[:,1]
svm_auc = roc_auc_score(y_test, svm_probs)
return dict(svm_auc=svm_auc)
out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
delayed(train)(train_index, test_index) for train_index, test_index in skf.split(X, y))
svm_auc = [d['svm_auc'] for d in out]
print(np.mean(svm_auc))
rf_fpr,rf_tpr, _ = roc_curve(y_test,svm_probs)
gmeans_rf = np.sqrt(rf_tpr * (1-rf_fpr))
ix_rf = np.argmax(gmeans_rf)
print("%.3f" % gmeans_rf[ix_rf])
Could you please take a look at the code below? I am trying to implement a simple sparse coding algorithm. I try to visualize the filters at the end, but it seems that filters are not learnt at all.
in the code, phi and weights should be learnt independently. I tried ISTA algorithm to learn phi.
I appreciate it if you could take a look.
Thank you.
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('device is:', device)
# dataset definition
mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))
mnist_testset = datasets.MNIST(root='./data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor()]))
mnist_trainset.data = mnist_trainset.data[:10000]
mnist_testset.data = mnist_testset.data[:5000]
from torch.utils.data import DataLoader
train_dl = DataLoader(mnist_trainset, batch_size=32, shuffle=True)
test_dl = DataLoader(mnist_testset, batch_size=1024, shuffle=False)
from numpy import vstack
from sklearn.metrics import accuracy_score
from torch.optim import SGD
from torch.nn import Module
from torch.nn import Linear
from tqdm import tqdm
class MNIST_ISTA(Module):
# define model elements
def __init__(self, n_inputs):
self.lambda_ = 0.5e-5
super(MNIST_ISTA, self).__init__()
# input to first hidden layer
# self.sc = Scattering2D(shape=(28,28), J=2)
# self.view = Vi
self.neurons = 400
self.receptive_field = 10
self.output = Linear(self.neurons, 28*28)
self.phi = None
# forward propagate input
def ista_(self, img_batch):
self.phi = torch.zeros((img_batch.shape[0], 400), requires_grad=True)
converged = False
# optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
optimizer = torch.optim.SGD([{'params': self.phi, "lr": 0.1e-3},{'params': self.parameters(), "lr": 0.1e-3}])
while not converged:
phi_old = self.phi.clone().detach()
pred = self.output(self.phi)
loss = ((img_batch-pred)**2).sum() + torch.norm(self.phi,p=1)
loss.backward()
optimizer.step()
self.zero_grad()
self.phi.data = self.soft_thresholding_(self.phi, self.lambda_ )
converged = torch.norm(self.phi - phi_old)/torch.norm(phi_old)<1e-1
def soft_thresholding_(self,x, alpha):
with torch.no_grad():
rtn = F.relu(x-alpha)- F.relu(-x-alpha)
return rtn.data
def zero_grad(self):
self.phi.grad.zero_()
self.output.zero_grad()
def forward(self, img_batch):
self.ista_(img_batch)
pred = self.output(self.phi)
return pred
ista_model = MNIST_ISTA(400)
optim = torch.optim.SGD([{'params': ista_model.output.weight, "lr": 0.01}])
for epoch in range(100):
running_loss = 0
c=0
for img_batch in tqdm(train_dl, desc='training', total=len(train_dl)):
img_batch = img_batch[0]
img_batch = img_batch.reshape(img_batch.shape[0], -1)
pred = ista_model(img_batch)
loss = ((img_batch - pred) ** 2).sum()
running_loss += loss.item()
loss.backward()
optim.step()
# zero grad
ista_model.zero_grad()
weight = ista_model.output.weight.data.numpy()
print(weight.shape)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20,20))
for i in range(20):
for j in range(20):
ax=fig.add_subplot(20,20,i*20+j+1)
ax.imshow(weight[:,1].reshape((28,28)))
ax.axis('off')
# plt.close(fig)
I am working on RNN. After training, I got a high accuracy on the test data set. However, when I make a prediction with some external data, it predicts so poorly. Also, I used the same data set, which has over 300,000 texts and 57 classes, on artificial neural networks, it's still predicting very poorly. When I tried the same data set on a machine learning model, it worked fine.
Here is my code:
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, LSTM, BatchNormalization
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
df = pd.read_excel("data.xlsx", usecols=["X", "y"])
df = df.sample(frac = 1)
X = np.array(df["X"])
y = np.array(df["y"])
le = LabelEncoder()
y = le.fit_transform(y)
y = y.reshape(-1,1)
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y)
num_words = 100000
token = Tokenizer(num_words=num_words)
token.fit_on_texts(X)
seq = token.texts_to_sequences(X)
X = sequence.pad_sequences(seq, padding = "pre", truncating = "pre")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = Sequential()
model.add(Embedding(num_words, 96, input_length = X.shape[1]))
model.add(LSTM(108, activation='relu', dropout=0.1, recurrent_dropout = 0.2))
model.add(BatchNormalization())
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=4, batch_size=64, validation_data = (X_test, y_test))
loss, accuracy = model.evaluate(X_test, y_test)
Here are the history plots of the model:
After doing some research, I have realized that the model was actually working fine. The problem was using Keras Tokenizer wrongly.
At the end of the code, I used the following code:
sentence = ["Example Sentence to Make Prediction."]
token.fit_on_texts(sentence) # <- This row is redundant.
seq = token.texts_to_sequences(sentence)
cx = sequence.pad_sequences(seq, maxlen = X.shape[1])
sx = np.argmax(model.predict(cx), axis=1)
The problem occurs when I want to fit Tokenizer again, on the new data. So, removing that code line solved the problem for me.
I am new to Transfer learning and Cnn's,was just playing around with cnn and got this error.Tried many solutions but none of them works.
import numpy as np
import keras
from keras import backend as k
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling2D
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
%matplotlib inline
mobile = keras.applications.mobilenet.MobileNet()
#mobile.summary()
train_path = 'chest_xray/train'
val_path = 'chest_xray/val'
test_path = 'chest_xray/test'
train_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
train_path,
target_size = (224,224),
batch_size = 10)
test_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
test_path,
target_size = (224,224),
batch_size = 10,
shuffle = False)
val_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
val_path,
target_size = (224,224),
batch_size = 10)
def prepare_image(file):
image_path = ''
img = image.load_img(image_path+file,target_size = (224,224))
img_array = image.img_to_array(img)
img_array_dims = np.expand_dims(img_array,axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_dims)
x = mobile.layers[-60].output
predictions = Dense(1,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
print(mobile.input)
#model.summary()
for layer in model.layers[:-5]:
layer.trainable = False
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit_generator(train_batch,
steps_per_epoch=4,
validation_data=val_batch,
validation_steps=2,
epochs = 30)
I am using mobilenet for transfer learning and an error is spotted every time.None of the solutins seems to work.Tried playing with the Flatten() then 2dmaxpooling() but no results.
ERROR:
ValueError Traceback (most recent call last)
<ipython-input-187-08820ea8d15a> in <module>()
3 validation_data=val_batch,
4 validation_steps=2,
----> 5 epochs = 30)
Value-error: Error when checking target: expected dense_39 to have 4 dimensions, but got array with shape (10, 2)
The layer of the MobileNet at which you are chopping of (-60) is conv_dw_5_relu which has output dimensions (None, 28, 28, 256). So you will have to flatten it before connecting a Dense layer to it.
Working code
mobile = keras.applications.mobilenet.MobileNet()
x = mobile.layers[-60].output
x = Flatten()(x)
predictions = Dense(2,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
#model.summary()
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(np.random.rand(10, 224, 224, 3), np.random.rand(10,2))
Thanks in advance to anyone who takes time to answer this. I'm learning Keras and got stuck with a problem where I have 3 classes and the test set accuracy moves up to 0.6667 and then stalls on that exact number for 50 epochs. The accuracy is also way higher than what it should be if it were correct. This worked fine when I only had 2 classes.
What am I doing wrong here?
import pandas as pd
import numpy as np
import keras.utils
#Create train and test data
def create_Xt_Yt(X, y, percentage=0.8):
p = int(len(X) * percentage)
X_train = X[0:p]
Y_train = y[0:p]
X_test = X[p:]
Y_test = y[p:]
return X_train, X_test, Y_train, Y_test
df = pd.read_csv('data.csv', parse_dates=['Date'])
df.set_index(['Date'], inplace=True)
df.drop(['Volume'],1, inplace=True)
df.dropna(inplace=True)
data = df.loc[:, 'AMD-close'].tolist()
window = 30
forecast = 3
forecast_target_long = 1.015
forecast_target_short= 0.985
x_holder = []
y_holder = []
for i in range(len(data)):
try:
x_class = data[i:i+window]
y_class = data[i+window+forecast]
window_last_price = data[i+window]
forecast_price = y_class
if forecast_price > (window_last_price*forecast_target_long):
y_class = [1]
elif forecast_price < (window_last_price*forecast_target_short):
y_class = [-1]
else:
y_class = [0]
y_holder.append(y_class)
x_holder.append(x_class)
except Exception as e:
print(e)
break
normalize = [(np.array(i) - np.mean(i)) / np.std(i) for i in x_holder]
y_holder = keras.utils.to_categorical(y_holder, 3)
x_holder, y_holder = np.array(x_holder), np.array(y_holder)
X_train, X_test, Y_train, Y_test = create_Xt_Yt(x_holder, y_holder)
This is the model:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.optimizers import RMSprop, Adam, SGD, Nadam
from keras.callbacks import ReduceLROnPlateau
from keras import regularizers
from keras import losses
model = Sequential()
model.add(Dense(64, input_dim=window, activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(3))
model.add(Activation('sigmoid'))
reduce_learning_ontop = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=25, min_lr=0.000001, verbose=1)
model.compile(Adam(lr=.0001),loss='binary_crossentropy', metrics=['accuracy'])
myModel = model.fit(X_train, Y_train, batch_size=128, epochs=160, verbose=1, shuffle=True, validation_data=(X_test, Y_test))
So two thing here:
Change activation:
model.add(Activation('softmax'))
sigmoid is designed for binary classification - in case of multiclass classification - softmax is the state of the art activation.
Change loss:
model.compile(
Adam(lr=.0001),
loss='categorical_crossentropy', metrics=['accuracy'])
binary_crossentropy is also designed for binary_classification. An equivalent to this is categorical_crossentropy.