Cosine_proximity loss function problem when using model.fit - deep-learning

I'm trying to run this RNN model for that i want to use the cosine_proximity loss function, i should say that i'm coding using google colabthe code s.o please help me figure the problem.
here is the source code of the RNN model:
import tensorflow as tf
from tensorflow import keras
from keras import Sequential
from keras.layers import LSTM
from keras.layers import Dropout
model = Sequential()
model.add(LSTM(units=512, input_shape = X_train.shape[1:],activation='relu',return_sequences= True))
model.add(Dropout(0.2)
model.add(LSTM(units=128,activation='relu',return_sequences= True))
model.add(Dropout(0.2)
model.add(LSTM(units=64,activation='relu',return_sequences=True))
model.add(Dropout(0.2)
model.add(Dense(units=10,activation='relu'))
model.add(Dropout(0.2)
model.compile(loss="cosine_proximity", optimizer='sgd', metrics = ['accuracy'])
print(model.summary())
model.fit(X_train, y_train, epochs=1, verbose=1)
and this is what i get when i run the cell
Model: "sequential_9"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_27 (LSTM) (None, 523, 512) 1052672
lstm_28 (LSTM) (None, 523, 128) 328192
lstm_29 (LSTM) (None, 523, 64) 49408
=================================================================
Total params: 1,430,272
Trainable params: 1,430,272
Non-trainable params: 0
_________________________________________________________________
None
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-44-fc8e0b2a4cd4> in <module>()
13 print(model.summary())
14
---> 15 model.fit(X_train, y_train, epochs=1, verbose=1)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 184, in __call__
self.build(y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 133, in build
self._losses = tf.nest.map_structure(self._get_loss_object, self._losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 272, in _get_loss_object
loss = losses_mod.get(loss)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 2369, in get
return deserialize(identifier)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 2328, in deserialize
printable_module_name='loss function')
File "/usr/local/lib/python3.7/dist-packages/keras/utils/generic_utils.py", line 710, in deserialize_keras_object
f'Unknown {printable_module_name}: {object_name}. Please ensure '
ValueError: Unknown loss function: cosine_proximity. Please ensure this object is passed to the `custom_objects` argument. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.
any help pls in order to fix this problem ???

Related

Mask equal None when using TimeDistributed Tensorflow 2

I have model with encoding layer. I want to use TimeDistributed to encoding layer in my model. But when I run the model with TimeDistributed, it stuck at encoding layer in mask = tf.cast(mask, dtype=self.dtype) and it mean my mask will always None when using TimeDistributed layer. How can I apply TimeDistributed layer to my encoding layer?
The encoding layer code:
class Encoding(Layer):
def __init__(self,
**kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.encoding = self.add_weight(shape=(input_shape[-2], input_shape[-1]),
trainable=True,
initializer=tf.initializers.Ones(),
constraint=None,
dtype=self.dtype, name='encoding')
super().build(input_shape)
def call(self, inputs, mask=None):
mask = tf.cast(mask, dtype=self.dtype)
mask = tf.expand_dims(mask, axis=-1)
return tf.reduce_sum(mask * self.encoding * inputs, axis=-2)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
return tf.reduce_any(mask, axis=-1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
if I build model with encoding in it like this:
encoding = Encoding(name='encoded')
story_encoded = encoding(story_embedded)
query_encoded = encoding(query_embedded)
It can run without error. But of I use TimeDistributed to my encoding:
encoding = Encoding(name='encoded')
story_encoded = TimeDistributed(encoding, name='story_encoding')(story_embedded)
query_encoded = TimeDistributed(encoding, name='query_encoding')(query_embedded)
It raise error like this:
ValueError Traceback (most recent call last)
<ipython-input-15-6c1bbfdcab4e> in <module>()
42 shuffle=True,
43 callbacks=callbacks,
---> 44 verbose=1
45 )
46
1 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
ValueError: Exception encountered when calling layer "encoded" (type Encoding).
in user code:
File "<ipython-input-4-9d7376af8f80>", line 26, in call *
mask = tf.cast(mask, dtype=self.dtype)
ValueError: None values not supported.
Call arguments received:
• inputs=tf.Tensor(shape=(30, 3, 80), dtype=float32)
• mask=None

Input 0 of layer "fc-3" is incompatible with the layer

I have tried to add additional layer to the Keras model I'm getting error
value error ValueError: in user code: File
"/usr/local/lib/python3.7/dist-packages/keras/engine/training.py",
line 878, in train_function * return step_function(self,
iterator) File
"/usr/local/lib/python3.7/dist-packages/keras/engine/training.py",
line 867, in step_function ** outputs =
model.distribute_strategy.run(run_step, args=(data,)) File
"/usr/local/lib/python3.7/dist-packages/keras/engine/training.py",
line 860, in run_step ** outputs = model.train_step(data)
File
"/usr/local/lib/python3.7/dist-packages/keras/engine/training.py",
line 808, in train_step y_pred = self(x, training=True) File
"/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py",
line 67, in error_handler raise e.with_traceback(filtered_tb)
from None ValueError: Exception encountered when calling layer
"side_ncf" (type SideNCF). in user code: File
"", line 42, in call * fc_4
= self.dense3(fc_3) File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py",
line 67, in error_handler ** raise
e.with_traceback(filtered_tb) from None File
"/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py",
line 248, in assert_input_compatibility f'Input
{input_index} of layer "{layer_name}" is ' ValueError:
Input 0 of layer "fc-3" is incompatible with the layer: expected axis
-1of input shape to have value 50, but received input with shape (None, 25) Call arguments received: •
inputs=('tf.Tensor(shape=(None, 4), dtype=float32)',
'tf.Tensor(shape=(None, 3), dtype=int64)') site:stackoverflow.com

Conolutional Neural Network: float() argument must be a string or a number?

I want to train my data with a convolutional neural network (CNN),I start with reshaping my data than creating my model:
model = Sequential()
input_traces = Input(shape=(3253,))
model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same',
activation='relu',input_dim=input_traces))
model.add(MaxPooling1D(pool_length=2))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=
['accuracy'])
print(model.summary())
model.fit(x_train, y_train, batch_size=15, nb_epoch=30, show_accuracy=True,
validation_data=(x_test, y_test))
But this code gives me this error:
CNN_Based_Attack.py:139: UserWarning: Update your `Conv1D` call to the Keras 2 API: `Conv1D(activation="relu", input_shape=(None, /in..., padding="same", filters=32, kernel_size=3)`
model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same', activation='relu',input_dim=input_traces))
Traceback (most recent call last):
File "CNN_Based_Attack.py", line 139, in <module>
model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same', activation='relu',input_dim=input_traces))
File "/home/.local/lib/python2.7/site-packages/keras/models.py", line 430, in add
layer(x)
File "/home/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 557, in __call__
self.build(input_shapes[0])
File "/home/.local/lib/python2.7/site-packages/keras/layers/convolutional.py", line 134, in build
constraint=self.kernel_constraint)
File "/home/.local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "/home/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 390, in add_weight
weight = K.variable(initializer(shape), dtype=dtype, name=name)
File "/home/.local/lib/python2.7/site-packages/keras/initializers.py", line 200, in __call__
scale /= max(1., float(fan_in + fan_out) / 2)
TypeError: float() argument must be a string or a number
I really don't understand this error. Could you please help me.
This is not how you should use Input. Input is a layer in Keras, and input_shape parameter to Convolution1D is supposed to be list of integers (and this is a reason of the error, since the code tries to use conversion to float on these integers, but you provided Input object instead, which cannot be casted to float), not Input layer.

Getting dimension error while passing the features extracted by CNN to LSTM

I am doing video classification using deep learning in keras. I have extracted the features using VGG16 model whose shape is (7,7,512).I have around 55000 images. I passed this to LSTM layer but getting error of dimensions.
Here is the code,
print len(train_data)
print train_data.shape[1:]
print train_data.shape
model = Sequential()
model.add(LSTM(128,input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
Here is the output,
55936
(7, 7, 512)
(55936, 7, 7, 512)
Traceback (most recent call last):
File "train_rnn.py", line 135, in <module>
model.add(LSTM(128,input_shape=train_data.shape[1:]))
File "/usr/local/lib/python2.7/site-packages/keras/models.py", line 430, in add
layer(x)
File "/usr/local/lib/python2.7/site- packages/keras/layers/recurrent.py", line 257, in __call__
return super(Recurrent, self).__call__(inputs, **kwargs)
File "/usr/local/lib/python2.7/site-packages/keras/engine/topology.py", line 534, in __call__
self.assert_input_compatibility(inputs)
File "/usr/local/lib/python2.7/site-packages/keras/engine/topology.py", line 433, in assert_input_compatibility
str(K.ndim(x)))
**ValueError**: Input 0 is incompatible with layer lstm_1: expected ndim=3, found ndim=4`
Input shapes
3D tensor with shape (batch_size, timesteps, input_dim), (Optional) 2D
tensors with shape (batch_size, output_dim).
Your input in 4D (including the input length).
try reshaping it to 3D:
train_data = train_data.reshape(train_data.shape[0],
train_data.shape[1] * train_data.shape[2],
train_data.shape[3])
Example (this will raise a valueError)
X = np.zeros((5, 7, 7, 512)
model = Sequential()
model.add(LSTM(128, input_shape=(7, 7, 512)))
model.add(Dense(1, activation='softmax')
model.compile(loss='binary_crossentropy', optimizer='sgd')
but this will not
X = np.zeros((5, 7, 7, 512)
X = X.reshape(5, 49, 512)
model = Sequential()
model.add(LSTM(128, input_shape=(49, 512)))
model.add(Dense(1, activation='softmax')
model.compile(loss='binary_crossentropy', optimizer='sgd')

Keras fit_generator throwing ValueError

So I'm trying to create a generator to iterate through a data set for use in training with Keras's fit_generator. Here's the definition of the generator, the model, and the call to fit_generator:
import numpy as np
from queue import Queue, deque
from keras.models import Sequential
from keras.layers import Dense
num_features = 40
len_data = 100
data = np.random.rand(len_data, num_features)
def train_generator(train_idxs):
while True:
i = train_idxs.get(block=False)
training_example = data[i,:]
training_example.shape = (1, len(training_example))
yield (training_example, training_example)
layer0_size = num_features
layer1_size = layer0_size / 2
layer2_size = layer1_size / 2
layers = []
layers.append(
Dense(input_dim=layer0_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer2_size, activation='relu'))
layers.append(
Dense(input_dim=layer2_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer0_size, activation='sigmoid'))
model = Sequential()
for layer in layers:
model.add(layer)
model.compile(optimizer='adam', loss='binary_crossentropy')
train_idxs = Queue()
train_idxs.queue = deque(range(len_data))
train_gen = train_generator(train_idxs)
max_q_size = 2
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
Keras will then successfully train 98/100 training examples and throw this error
98/100 [============================>.] - ETA: 0s - loss: 0.6930Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 429, in data_generator_task
generator_output = next(self._generator)
File "scrap.py", line 12, in train_generator
i = train_idxs.get(block=False)
File "/usr/lib/python3.5/queue.py", line 161, in get
raise Empty
queue.Empty
Traceback (most recent call last):
File "scrap.py", line 43, in <module>
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 935, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1528, in fit_generator
str(generator_output))
ValueError: output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None
It seems like what's happening is that it popped of all of the training_idxs and it's still trying to get more until Keras exhaust the training examples in its internal queue. Is there a way to get it to stop trying to get more training examples from the generator?