I'm using DNN to fit these data, and I use softmax to classify them into 2 class, and each of them has a demensity of 4040, can someone with experience tell me what's wrong with my nets.
It is strange that my initial loss is 7.6 and my initial error is 0.5524, and Basically they won't change anymore.
for train, test in kfold.split(data_pro, valence_labels):
model = keras.Sequential()
model.add(keras.layers.Dense(5000,activation='relu',input_shape=(4040,)))
model.add(keras.layers.Dropout(rate=0.25))
model.add(keras.layers.Dense(500, activation='relu'))
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(1000, activation='relu'))
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.add(keras.layers.Dropout(rate=0.5))
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001,rho=0.9),
loss='binary_crossentropy',
metrics=['accuracy'])
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# Fit data to model
history = model.fit(data_pro[train], valence_labels[train],
batch_size=128,
epochs=50,
verbose=1,
callbacks=[tensorboard_callback]
)
# Generate generalization metrics
scores = model.evaluate(data_pro[test], valence_labels[test], verbose=0)
print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Increase fold number
fold_no = fold_no + 1
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(acc_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}%')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
print(f'> Loss: {np.mean(loss_per_fold)}')
print('------------------------------------------------------------------------')
You shouldn't add Dropout after the final Dense , delete the model.add(keras.layers.Dropout(rate=0.5))
And I think your code may raise error because your labels's dim is 1 , But your final Dense's units is 2 . Change model.add(keras.layers.Dense(2, activation='softmax')) to model.add(keras.layers.Dense(1, activation='sigmoid'))
Read this to learn tensorflow
Update 1 :
Change
model.compile(optimizer= tf.keras.optimizers.SGD(learning_rate = 0.00001,momentum=0.9,nesterov=True),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'])
to
model.compile(optimizer= tf.keras.optimizers.Adam(learning_rate=3e-4),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'])
And change
accAll = []
for epoch in range(1, 50):
model.fit(train_data, train_labels,
batch_size=50,epochs=5,
validation_data = (val_data, val_labels))
val_loss, val_Accuracy = model.evaluate(val_data,val_labels,batch_size=1)
accAll.append(val_Accuracy)
to
accAll = model.fit(
train_data, train_labels,
batch_size=50,epochs=20,
validation_data = (val_data, val_labels)
)
Related
I've written a snippet to classify Omniglot images. I calculate the training and validation losses in each epoch, where the latter is computed using images that were not seen by the network before. The two plots are as below:
Since the training loss decreases while the validation loss increases, I have concluded that my model overfits. I've tried several suggestions (e.g. here) to overcome this, including:
Increasing the size of the training set.
shuffling the data.
Adding dropout layers (up to p=0.9).
Using smaller model.
Altering the architecture.
Changing the learning rate.
Reducing the batch size.
Adding weight decay.
However, the validation loss still increases. I wonder if there are any other suggestions to improve this behavior or if this is not overfitting, but the problem is something else. Below is the snippet used in this question.
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn, optim
from torch.utils.data import DataLoader
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
dim_out = 964
# -- embedding params
self.cn1 = nn.Conv2d(1, 16, 7)
self.cn2 = nn.Conv2d(16, 32, 4)
self.cn3 = nn.Conv2d(32, 64, 3)
self.pool = nn.MaxPool2d(2)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(32)
self.bn3 = nn.BatchNorm2d(64)
# -- prediction params
self.fc1 = nn.Linear(256, 170)
self.fc2 = nn.Linear(170, 50)
self.fc3 = nn.Linear(50, dim_out)
# -- non-linearity
self.relu = nn.ReLU()
self.Beta = 10
self.sopl = nn.Softplus(beta=self.Beta)
def forward(self, x):
y1 = self.pool(self.bn1(self.relu(self.cn1(x))))
y2 = self.pool(self.bn2(self.relu(self.cn2(y1))))
y3 = self.relu(self.bn3(self.cn3(y2)))
y3 = y3.view(y3.size(0), -1)
y5 = self.sopl(self.fc1(y3))
y6 = self.sopl(self.fc2(y5))
return self.fc3(y6)
class Train:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# -- data
dim = 28
batch_size = 400
my_transforms = transforms.Compose([transforms.Resize((dim, dim)), transforms.ToTensor()])
trainset = torchvision.datasets.Omniglot(root="./data/omniglot_train/", download=False, transform=my_transforms)
validset = torchvision.datasets.Omniglot(root="./data/omniglot_train/", background=False, download=False,
transform=my_transforms)
self.TrainDataset = DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True)
self.ValidDataset = DataLoader(dataset=validset, batch_size=len(validset), shuffle=False)
self.N_train = len(trainset)
self.N_valid = len(validset)
# -- model
self.model = MyModel().to(self.device)
# -- train
self.epochs = 3000
self.loss = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
def train_epoch(self):
self.model.train()
train_loss = 0
for batch_idx, data_batch in enumerate(self.TrainDataset):
# -- predict
predict = self.model(data_batch[0].to(self.device))
# -- loss
loss = self.loss(predict, data_batch[1].to(self.device))
# -- optimize
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
return train_loss/(batch_idx+1)
def valid_epoch(self):
with torch.no_grad():
self.model.eval()
for data_batch in self.ValidDataset:
# -- predict
predict = self.model(data_batch[0].to(self.device))
# -- loss
loss = self.loss(predict, data_batch[1].to(self.device))
return loss.item()
def __call__(self):
for epoch in range(self.epochs):
train_loss = self.train_epoch()
valid_loss = self.valid_epoch()
print('Epoch {}: Training loss = {:.5f}, Validation loss = {:.5f}.'.format(epoch, train_loss, valid_loss))
torch.save(self.model.state_dict(), './model_stat.pth')
if __name__ == '__main__':
my_train = Train()
my_train()
If your train accuracy is good but testing (data not used in training) accuracy is bad then you have an overfitting problem. I had the same problem with a CNN model. You can use two methods to overcome overfitting. First is early stopping for your train and second is regularization. Check the below example:
# L2 regularizers for layers
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(32, 32)),
keras.layers.Reshape(target_shape=(32, 32, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu, use_bias=True , kernel_regularizer =tf.keras.regularizers.l2( l=0.01)),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation = 'softmax', use_bias=True)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
#Early Stopping
history = model.fit(X_train, Y_train,
validation_data=(X_dev, Y_dev),
epochs=4000,
callbacks=EarlyStopping(monitor='val_loss'))
Do not forget to import for early stopping.
from tensorflow.keras.callbacks import EarlyStopping
I build a neural network model in Pytorch for a simple regression problem (w1x1+w2x2+w3x3 = y) where I generated 2000 records for training data with random values for x1,x2,x3 and W1=4, W2=6, W3=2. I created a test dataset of 20 records with just values for x1,x2,x3 and I was hoping to get the result for But, the model returns same value for all 20 input rows. I don't know where the issue is. Below is the code snippet.
inputs = df[['x1', 'x2', 'x3']]
target = df['y']
inputs = torch.tensor(inputs.values).float()
target = torch.tensor(target.values).float()
test_data = torch.tensor(test_data.values).float()
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
hidden1 = 10
hidden2 = 15
self.fc1 = nn.Linear(3,hidden1)
self.fc2 = nn.Linear(hidden1,hidden2)
self.fc3 = nn.Linear(hidden2,1)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
#instantiate the model
model = Net()
print(model)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
model.train()
#epochs
epochs = 500
for x in range(epochs):
#initialize the training loss to 0
train_loss = 0
#clear out gradients
optimizer.zero_grad()
#calculate the output
output = model(inputs)
#calculate loss
loss = criterion(output,target)
#backpropagate
loss.backward()
#update parameters
optimizer.step()
if ((x%5)==0):
print('Training Loss after epoch {:2d} is {:2.6f}'.format(x,loss))
#set the model in evaluation mode
model.eval()
#Test the model on unseen data
test_output = model(test_data)
print(test_output)
I am trying LSTM model on this dataset: https://www.kaggle.com/rtatman/speech-accent-archive
This is the model that I am working on:
def train_lstm_model(X_train, y_train, X_validation, y_validation, EPOCHS, batch_size=128):
# Get row, column, and class sizes
rows = X_train[0].shape[0]
cols = X_train[0].shape[1]
val_rows = X_validation[0].shape[0]
val_cols = X_validation[0].shape[1]
num_classes = len(y_train[0])
input_shape = (rows, cols)
X_train = X_train.reshape(X_train.shape[0], rows, cols)
X_validation = X_validation.reshape(X_validation.shape[0], val_rows, val_cols)
lstm = Sequential()
lstm.add(LSTM(64, return_sequences=True, stateful=False, input_shape=input_shape, activation='tanh'))
lstm.add(LSTM(64, return_sequences=True, stateful=False, activation='tanh'))
lstm.add(LSTM(64, stateful=False, activation='tanh'))
# add dropout to control for overfitting
lstm.add(Dropout(.25))
# squash output onto number of classes in probability space
lstm.add(Dense(num_classes, activation='softmax'))
# adam = optimizers.adam(lr=0.0001)
rmsprop = optimizers.adam(lr=0.002)
lstm.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=["accuracy"])
es = EarlyStopping(monitor='acc', min_delta=.005, patience=10, verbose=1, mode='auto')
# Creates log file for graphical interpretation using TensorBoard
tb = TensorBoard(log_dir=LOG_DIR, histogram_freq=0, batch_size=32, write_graph=True, write_grads=True,
write_images=True, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None)
lstm.fit(X_train, y_train, batch_size=batch_size,
epochs=EPOCHS, validation_data=(X_validation,y_validation),
callbacks=[es,tb])
return lstm
And when I run it for 15 epochs, I get this loss curve for the validation data. https://imgur.com/a/hB4uK
And this is the accuracy on validation data.
https://imgur.com/a/9knGD
This is the training accuracy: https://imgur.com/a/HBfgF
And this is the training loss: https://imgur.com/a/JRdQ9
I've only used three classes from the dataset.
Any suggestions on what can I improve in the model?
These are the steps I followed:
1. Read wav file [only reading 90 samples per class]
2. calculate melspectrogram
3. split mel-spec into segments. [this gives around 11k samples]
3. normalize the mel-spec.
4. feed into network.
I clearly don't understand something (first Keras toy)
My input x,y. X is 1D real values and y is a scalar
I want to predict if y is positive or negative. One way is to encode as one hot and use categorical_cross_entropy (which works) and the other is with a custome loss function that does the same (which doesn't work)
I'm training on a 8 examples and checking that I can overfit. My custom function gets stuck at 0.56
Here's the code:
import keras.backend as K
def custom_cross_entrophy(y_true, y_pred):
'''expected return'''
return -(K.log(y_pred[:,0])*K.cast(y_true<=0, dtype='float32')
+ K.log(y_pred[:,1])*K.cast(y_true>0, dtype='float32'))
def build_model(x_dim, unites, loss_fuc):
model = Sequential()
model.add(Dense(
units=unites,
activation='relu',
input_shape=(x_dim,),
# return_sequences=True
))
model.add(Dense(
units=2))
model.add(Activation("softmax"))
start = time.time()
model.compile(loss=loss_fuc, optimizer="adam")
print("Compilation Time : ", time.time() - start)
return model
Now build and run model with custom
model = build_model(X_train.shape[1], 20, custom_cross_entrophy)
model.fit(X_train,y_train,
batch_size=8,epochs=10000,
validation_split=0.,verbose=0)
print model.evaluate(X_train, y_train, verbose=1)
#assert my custom_cross_entrophy is like catergorical_cross_entropy
pred = model.predict(X)
y_onehot = np.zeros((len(K.eval(y_true)),2))
for i in range(len(K.eval(y_true))):
y_onehot[i,int(K.eval(y_true)[i]>0)]=1
print K.eval(custom_cross_entrophy(K.variable(y_train), K.variable(pred)))
print K.eval(categorical_crossentropy(K.variable(y_onehot), K.variable(pred)))
output:
('Compilation Time : ', 0.06212186813354492)
8/8 [==============================] - 0s 52ms/step
0.562335193157
[ 1.38629234 0.28766826 1.38613474 0.28766349 0.28740349 0.28795806
0.28766707 0.28768104]
[ 1.38629234 0.28766826 1.38613474 0.28766349 0.28740349 0.28795806
0.28766707 0.28768104]
now do the same with the Keras loss:
model = build_model(X_train.shape[1], 20, categorical_crossentropy)
model.fit(X_train,y_onehot,
batch_size=8,epochs=10000,
validation_split=0.,verbose=0)
print model.evaluate(X_train, y_onehot, verbose=1)
output:
('Compilation Time : ', 0.04332709312438965)
8/8 [==============================] - 0s 34ms/step
4.22694138251e-05
How is this possible? the losses should be the same mathematically
Thanks!
Off the top of my head, I'd say you're running two different evaluations:
print model.evaluate(X_train, y_train, verbose=1)
# ...
print model.evaluate(X_train, y, verbose=1)
but I don't know what's in y and y_train, so you might need to expand a bit more on what you're doing and how you're splitting the data.
Try and run:
print model.evaluate(X_train, y_onehot, verbose=1)
to see if it was just a typo.
Cheers
I implemented highway networks with keras and with lasagne, and the keras version consistently underperforms to the lasagne version. I am using the same dataset and metaparameters in both of them. Here is the keras version's code:
X_train, y_train, X_test, y_test, X_all = hacking_script.load_all_data()
data_dim = 144
layer_count = 32
dropout = 0.04
hidden_units = 32
nb_epoch = 10
model = Sequential()
model.add(Dense(hidden_units, input_dim=data_dim))
model.add(Dropout(dropout))
for index in range(layer_count):
model.add(Highway(activation = 'relu'))
model.add(Dropout(dropout))
model.add(Dropout(dropout))
model.add(Dense(2, activation='softmax'))
print 'compiling...'
model.compile(loss='binary_crossentropy', optimizer='adagrad')
model.fit(X_train, y_train, batch_size=100, nb_epoch=nb_epoch,
show_accuracy=True, validation_data=(X_test, y_test), shuffle=True, verbose=0)
predictions = model.predict_proba(X_test)
And here is the lasagne version's code:
class MultiplicativeGatingLayer(MergeLayer):
def __init__(self, gate, input1, input2, **kwargs):
incomings = [gate, input1, input2]
super(MultiplicativeGatingLayer, self).__init__(incomings, **kwargs)
assert gate.output_shape == input1.output_shape == input2.output_shape
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
return inputs[0] * inputs[1] + (1 - inputs[0]) * inputs[2]
def highway_dense(incoming, Wh=Orthogonal(), bh=Constant(0.0),
Wt=Orthogonal(), bt=Constant(-4.0),
nonlinearity=rectify, **kwargs):
num_inputs = int(np.prod(incoming.output_shape[1:]))
l_h = DenseLayer(incoming, num_units=num_inputs, W=Wh, b=bh, nonlinearity=nonlinearity)
l_t = DenseLayer(incoming, num_units=num_inputs, W=Wt, b=bt, nonlinearity=sigmoid)
return MultiplicativeGatingLayer(gate=l_t, input1=l_h, input2=incoming)
# ==== Parameters ====
num_features = X_train.shape[1]
epochs = 10
hidden_layers = 32
hidden_units = 32
dropout_p = 0.04
# ==== Defining the neural network shape ====
l_in = InputLayer(shape=(None, num_features))
l_hidden1 = DenseLayer(l_in, num_units=hidden_units)
l_hidden2 = DropoutLayer(l_hidden1, p=dropout_p)
l_current = l_hidden2
for k in range(hidden_layers - 1):
l_current = highway_dense(l_current)
l_current = DropoutLayer(l_current, p=dropout_p)
l_dropout = DropoutLayer(l_current, p=dropout_p)
l_out = DenseLayer(l_dropout, num_units=2, nonlinearity=softmax)
# ==== Neural network definition ====
net1 = NeuralNet(layers=l_out,
update=adadelta, update_rho=0.95, update_learning_rate=1.0,
objective_loss_function=categorical_crossentropy,
train_split=TrainSplit(eval_size=0), verbose=0, max_epochs=1)
net1.fit(X_train, y_train)
predictions = net1.predict_proba(X_test)[:, 1]
Now the keras version barely outperforms logistic regression, while the lasagne version is the best scoring algorithm so far. Any ideas as to why?
Here are some suggestions (I'm not sure if they will actually close the performance gap you are observing):
According to the Keras documentation the Highway layer is initialized using Glorot Uniform weights while in your Lasagne code you are using Orthogonal weight initialization. Unless you have another part of your code where you set the weight initialization to Orthogonal for the Keras Highway layer, this could be a source of the performance gap.
It also seems like you are using Adagrad for your Keras model, but you are using Adadelta for your Lasagne model.
Also I am not 100% sure about this, but you may also want to verify that your transform bias terms are initialized the same way.