Model Doesn't Learn When Used With Grid Search - deep-learning

I have built two models, the first works fine, the code is
model = Sequential()
model.add(Dense(25, input_dim=8, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(25, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(25, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(1, activation='tanh'))
opt = SGD(lr=0.01, momentum=0.9)
model.compile(loss='mean_squared_error', optimizer=opt)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=1000, verbose=1)
train_mse = model.evaluate(X_train, y_train, verbose=0)
test_mse = model.evaluate(X_test, y_test, verbose=0)
I believe the next model below, implemented using grid search, is identical. Except it doesn't seem to learn, the loss starts off astronomical and barely decreases. The code is:
def build_classifier():
classifier = Sequential()
classifier.add(Dense(25, input_dim=8, activation='relu', kernel_initializer='he_uniform'))
classifier.add(Dense(25, activation='relu', kernel_initializer='he_uniform'))
classifier.add(Dense(25, activation='relu', kernel_initializer='he_uniform'))
classifier.add(Dense(1, activation='tanh'))
opt = SGD(lr=0.01, momentum=0.9)
classifier.compile(loss='mean_squared_error', optimizer=opt, metrics=["accuracy"])
return classifier
classifier = KerasClassifier(build_fn = build_classifier)
parameters = {'epochs':[1000]}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
cv = 5)
grid_search = grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
^I am aware I am not actually tuning any hyper parameters here. I kept reduce and removing the hyper parameters I was tuning, in the hope I could identify the problem.
I have cross_categorical models using the same data, and do not have this problem (making sure my model is reasonable with test train split, then remaking with grid search for tuning hyper parameters).
I cannot see what the difference between the two models would be, that allows the first to learn reasonably but the second to not work at all.

Related

GAN, generate regression output by the real image, not from the random noise

Is this concept possible to be implemented with the GAN algorithm?
I want the GAN to generate a regression-output(G-Value) of the shape(4,) by the real-image, not from the random noise, and discriminate G-Value with real regression-value(R-Value) of the same shape(4, ). R-Value is of the "y-train" dataset.
It means that if an image has a pattern like circular, it generally has the 4 features of position x, y, z, and alpha. I call it Real-Value(R-Value) and I want the GAN to generate fake value (G-Value) fooling the discriminator.
I have tried to implement it as below.
class UTModel:
def __init__(self):
optimizer__ = Adam(2e-4)
self.__dropout = .3
self.optimizerGenerator = Adam(1e-4)
self.optimizerDiscriminator = Adam(1e-4)
self.generator, self.discriminator = self.build()
def build(self):
# build the generator
g = Sequential()
g.add(Conv2D(512, kernel_size=3, strides=2, input_shape=(128, 128, 1), padding='same'))
g.add(BatchNormalization(momentum=0.8))
g.add(LeakyReLU(alpha=0.2))
g.add(Dropout(self.__dropout))
g.add(Conv2D(256, kernel_size=3, strides=2, padding='same'))
g.add(BatchNormalization(momentum=0.8))
g.add(LeakyReLU(alpha=0.2))
g.add(Dropout(self.__dropout))
g.add(Conv2D(128, kernel_size=3, strides=2, padding='same'))
g.add(BatchNormalization(momentum=0.8))
g.add(LeakyReLU(alpha=0.2))
g.add(Dropout(self.__dropout))
g.add(Conv2D(64, kernel_size=3, strides=1, padding='same'))
g.add(BatchNormalization(momentum=0.8))
g.add(LeakyReLU(alpha=0.2))
g.add(Dropout(self.__dropout))
g.add(Flatten())
g.add(Dense(4, activation='linear'))
# build the discriminator
d = Sequential()
d.add(Dense(128, input_shape=(4,)))
d.add(LeakyReLU(alpha=0.2))
d.add(Dropout(self.__dropout))
d.add(Dense(64))
d.add(LeakyReLU(alpha=0.2))
d.add(Dropout(self.__dropout))
d.add(Dense(64))
d.add(LeakyReLU(alpha=0.2))
d.add(Dropout(self.__dropout))
d.add(Dense(32))
d.add(LeakyReLU(alpha=0.2))
d.add(Dropout(self.__dropout))
d.add(Dense(1, activation='sigmoid'))
return g, d
def computeLosses(self, rValid, fValid):
bce = BinaryCrossentropy(from_logits=True)
# Discriminator loss
rLoss = bce(tf.ones_like(rValid), rValid)
fLoss = bce(tf.zeros_like(fValid), fValid)
dLoss = rLoss + fLoss
# Generator loss
gLoss = bce(tf.zeros_like(fValid), fValid)
return dLoss, gLoss
def train(self, images, rValues):
with tf.GradientTape() as gTape, tf.GradientTape() as dTape:
gValues = self.generator(images, training=True)
rValid = self.discriminator(rValues, training=True)
fValid = self.discriminator(gValues, training=True)
dLoss, gLoss = self.computeLosses(rValid, fValid)
dGradients = dTape.gradient(dLoss, self.discriminator.trainable_variables)
gGradients = gTape.gradient(gLoss, self.generator.trainable_variables)
self.optimizerDiscriminator.apply_gradients(zip(dGradients, self.discriminator.trainable_variables))
self.optimizerGenerator.apply_gradients(zip(gGradients, self.generator.trainable_variables))
print (dLoss, gLoss)
class UTTrainer:
def __init__(self):
self.env = 3DPatterns()
self.model = UTModel()
def start(self):
if not self.env.available:
return
batch = 32
for epoch in range(1):
# set new episod
while self.env.setEpisod():
for i in range(0, self.env.episodelen, batch):
self.model.train(self.env.episode[i:i+batch], self.env.y[i:i+batch])
But the G-Values have not generated as valid values. It converges the 1 or -1 always. The proper value should be like [-0.192798, 0.212887, -0.034519, -0.015000]. Please help me to find the right way.
Thank you.

A3C with LSTM using keras

i'm trying to implement an A3C model with LSTM using keras, i beging with this version of A3C without LSTM: "https://github.com/coreylynch/async-rl", and try to modify only the network code, but i struggle to compile the whole model:
am i missing something ?
this is my model:
state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height])
vision_model = Sequential()
vision_model.add(Conv2D(activation="relu", filters=16, kernel_size=(8, 8), name="conv1", padding="same", strides=(4, 4),input_shape=(agent_history_length,resized_width, resized_height)))
vision_model.add(Conv2D(activation="relu", filters=32, kernel_size=(4, 4), name="conv2", padding="same", strides=(2, 2)))
vision_model.add(Flatten())
vision_model.add(Dense(activation="relu", units=256, name="h1"))
# Now let's get a tensor with the output of our vision model:
state_input = Input(shape=(1,agent_history_length,resized_width,resized_height))
encoded_frame_sequence = TimeDistributed(vision_model)(state_input)
encoded_video = LSTM(256)(encoded_frame_sequence) # the output will be a vector
action_probs = Dense(activation="softmax", units=4, name="p")(encoded_video)
state_value = Dense(activation="linear", units=1, name="v")(encoded_video)
policy_network = Model(inputs=state_input, outputs=action_probs)
value_network = Model(inputs=state_input, outputs=state_value)
p_params = policy_network.trainable_weights
v_params = value_network.trainable_weights
policy_network.summary()
value_network.summary()
p_out = policy_network(state_input)
v_out = value_network(state_input)
keras-rl examples lib does NOT support more than 2D input shape !

How to reshape my input to feed it into 1D Convolutional layer for sequence classification?

I have a csv file with 339732 rows and two columns :
the first being 29 feature values, i.e. X
the second being a binary label value, i.e. Y
dataframe = pd.read_csv("features.csv", header = None)
dataset = dataframe.values
X = dataset[:, 0:29].astype(float)
Y = dataset[:,29]
X_train, y_train, X_test, y_test = train_test_split(X,Y, random_state = 42)
I am trying to train it on a 1D convolutional layer:
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(X_train.shape[0], 29)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=16, epochs=2)
score = model.evaluate(X_test, y_test, batch_size=16)
Since, the Conv1D layer expects a 3-D input, I transformed my input as follows:
X_train = np.reshape(X_train, (1, X_train.shape[0], X_train.shape[1]))
X_test = np.reshape(X_test, (1, X_test.shape[0], X_test.shape[1]))
However, this still throws error:
ValueError: Negative dimension size caused by subtracting 3 from 1 for 'conv1d_1/convolution/Conv2D' (op: 'Conv2D') with input shapes: [?,1,1,29], [1,3,29,64].
Is there any way to feed my input correctly?
As far as I know 1D Convolution layer accepts inputs of the form Batchsize x Width x Channels. You are reshaping with
X_train = np.reshape(X_train, (1, X_train.shape[0], X_train.shape[1]))
But X_train.shape[0] is your batchsize I guess.I think the problem is somewhere here. Can you please tell what is the shape of X_train before reshape?
You have to think about if your data have some progression relation between the 339732 entries or the 29 features, this means if the order matters. If not I don't think that CNN is suitable for this case.
If the 29 features "indicates the progression of something":
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1],1))
If the 29 features are independent, then is like the channels on the image, but doesn't make sense convolute with only 1.
X_train = X_train.reshape((X_train.shape[0],1, X_train.shape[1]))
If you want to pick the 339732 entries like in blocks where the order matters (clip the 339732 or add zero padding in order to be divisible by timesteps):
X_train = X_train.reshape((int(X_train.shape[0]/timesteps),timesteps, X_train.shape[1],1))

How to Implement "Multidirectional" LSTMs?

I'm trying to implement this LSTM Architecture from the paper "Dropout improves Recurrent Neural Networks for Handwriting Recognition":
In the paper, the researchers defined Multidirectional LSTM Layers as "Four LSTM layers applied in parallel, each with a particular scanning direction"
Here's how (I think) the network looks like in Keras:
from keras.layers import LSTM, Dropout, Input, Convolution2D, Merge, Dense, Activation, TimeDistributed
from keras.models import Sequential
def build_lstm_dropout(inputdim, outputdim, return_sequences=True, activation='tanh'):
net_input = Input(shape=(None, inputdim))
model = Sequential()
lstm = LSTM(output_dim=outputdim, return_sequences=return_sequences, activation=activation)(net_input)
model.add(lstm)
model.add(Dropout(0.5))
return model
def build_conv(nb_filter, nb_row, nb_col, net_input, border_mode='relu'):
return TimeDistributed(Convolution2D( nb_filter, nb_row, nb_col, border_mode=border_mode, activation='relu')(net_input))
def build_lstm_conv(lstm, conv):
model = Sequential()
model.add(lstm)
model.add(conv)
return model
def build_merged_lstm_conv_layer(lstm_conv, mode='concat'):
return Merge([lstm_conv, lstm_conv, lstm_conv, lstm_conv], mode=mode)
def build_model(feature_dim, loss='ctc_cost_for_train', optimizer='Adadelta'):
net_input = Input(shape=(1, feature_dim, None))
lstm = build_lstm_dropout(2, 6)
conv = build_conv(64, 2, 4, net_input)
lstm_conv = build_lstm_conv(lstm, conv)
first_layer = build_merged_lstm_conv_layer(lstm_conv)
lstm = build_lstm_dropout(10, 20)
conv = build_conv(128, 2, 4, net_input)
lstm_conv = build_lstm_conv(lstm, conv)
second_layer = build_merged_lstm_conv_layer(lstm_conv)
lstm = build_lstm_dropout(50, 1)
fully_connected = Dense(1, activation='sigmoid')
lstm_fc = Sequential()
lstm_fc.add(lstm)
lstm_fc.add(fully_connected)
third_layer = Merge([lstm_fc, lstm_fc, lstm_fc, lstm_fc], mode='concat')
final_model = Sequential()
final_model.add(first_layer)
final_model.add(Activation('tanh'))
final_model.add(second_layer)
final_model.add(Activation('tanh'))
final_model.add(third_layer)
final_model.compile(loss=loss, optimizer=optimizer, sample_weight_mode='temporal')
return final_model
And here are my questions:
If my implementation of the architecture is correct, how do you
implement the scanning directions for the four LSTM layers?
If my implementation is not correct, is it possible to implement
such an architecture in Keras? If not, are there any other frameworks that can help me in implementing such an architecture?
You can check this for the implementation of bidirectional LSTM. Basically, you just set go_backwards=True for the backward-LSTM.
However, in your case, you have to write a "mirror"+reshape layer to reverse the rows. A mirror layer can look like (I am using lambda layer here for convenience) : Lambda(lambda x: x[:,::-1,:])

Highway networks in keras and lasagne - significant perfomance difference

I implemented highway networks with keras and with lasagne, and the keras version consistently underperforms to the lasagne version. I am using the same dataset and metaparameters in both of them. Here is the keras version's code:
X_train, y_train, X_test, y_test, X_all = hacking_script.load_all_data()
data_dim = 144
layer_count = 32
dropout = 0.04
hidden_units = 32
nb_epoch = 10
model = Sequential()
model.add(Dense(hidden_units, input_dim=data_dim))
model.add(Dropout(dropout))
for index in range(layer_count):
model.add(Highway(activation = 'relu'))
model.add(Dropout(dropout))
model.add(Dropout(dropout))
model.add(Dense(2, activation='softmax'))
print 'compiling...'
model.compile(loss='binary_crossentropy', optimizer='adagrad')
model.fit(X_train, y_train, batch_size=100, nb_epoch=nb_epoch,
show_accuracy=True, validation_data=(X_test, y_test), shuffle=True, verbose=0)
predictions = model.predict_proba(X_test)
And here is the lasagne version's code:
class MultiplicativeGatingLayer(MergeLayer):
def __init__(self, gate, input1, input2, **kwargs):
incomings = [gate, input1, input2]
super(MultiplicativeGatingLayer, self).__init__(incomings, **kwargs)
assert gate.output_shape == input1.output_shape == input2.output_shape
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
return inputs[0] * inputs[1] + (1 - inputs[0]) * inputs[2]
def highway_dense(incoming, Wh=Orthogonal(), bh=Constant(0.0),
Wt=Orthogonal(), bt=Constant(-4.0),
nonlinearity=rectify, **kwargs):
num_inputs = int(np.prod(incoming.output_shape[1:]))
l_h = DenseLayer(incoming, num_units=num_inputs, W=Wh, b=bh, nonlinearity=nonlinearity)
l_t = DenseLayer(incoming, num_units=num_inputs, W=Wt, b=bt, nonlinearity=sigmoid)
return MultiplicativeGatingLayer(gate=l_t, input1=l_h, input2=incoming)
# ==== Parameters ====
num_features = X_train.shape[1]
epochs = 10
hidden_layers = 32
hidden_units = 32
dropout_p = 0.04
# ==== Defining the neural network shape ====
l_in = InputLayer(shape=(None, num_features))
l_hidden1 = DenseLayer(l_in, num_units=hidden_units)
l_hidden2 = DropoutLayer(l_hidden1, p=dropout_p)
l_current = l_hidden2
for k in range(hidden_layers - 1):
l_current = highway_dense(l_current)
l_current = DropoutLayer(l_current, p=dropout_p)
l_dropout = DropoutLayer(l_current, p=dropout_p)
l_out = DenseLayer(l_dropout, num_units=2, nonlinearity=softmax)
# ==== Neural network definition ====
net1 = NeuralNet(layers=l_out,
update=adadelta, update_rho=0.95, update_learning_rate=1.0,
objective_loss_function=categorical_crossentropy,
train_split=TrainSplit(eval_size=0), verbose=0, max_epochs=1)
net1.fit(X_train, y_train)
predictions = net1.predict_proba(X_test)[:, 1]
Now the keras version barely outperforms logistic regression, while the lasagne version is the best scoring algorithm so far. Any ideas as to why?
Here are some suggestions (I'm not sure if they will actually close the performance gap you are observing):
According to the Keras documentation the Highway layer is initialized using Glorot Uniform weights while in your Lasagne code you are using Orthogonal weight initialization. Unless you have another part of your code where you set the weight initialization to Orthogonal for the Keras Highway layer, this could be a source of the performance gap.
It also seems like you are using Adagrad for your Keras model, but you are using Adadelta for your Lasagne model.
Also I am not 100% sure about this, but you may also want to verify that your transform bias terms are initialized the same way.