'NoneType' has no len() concatenating 2 neural networks - deep-learning

I'm coding a nn that has a img input and numeric, so img will be handled by cnn and numeric by dnn , then they concatenate and make calculations in a fully connected , here is code :
img_input = Input(shape = (99,4,4))
rr_input = Input(shape = (1))
price_input = Input(shape = (20))
legal_action_input = Input(shape = (9))
numeric_input = concatenate([rr_input,price_input, legal_action_input])
cnn = Conv2D(99, (4,4), activation='relu',padding='same', input_shape = (99,4,4))(img_input),
cnn = Dropout(0.2)(cnn)
cnn = Conv2D(99, (4,4), strides = (2,2), activation = 'relu', padding = 'same')(cnn)
cnn = Dropout(0.2)(cnn)
cnn = Conv2D(99, (4,4), strides = (2,2), activation = 'relu', padding = 'same')(cnn)
cnn = Dropout(0.2)(cnn)
cnn = Conv2D(99, (4,4), strides = (2,2), activation = 'relu', padding = 'same')(cnn)
cnn = MaxPooling3D(pool_size = (1,1,1),strides = 3)(cnn)
cnn = Dropout(0.2)(cnn)
cnn = Dense(64, activation = 'relu')(cnn)
cnn = Flatten()(cnn)
cnn = Model(inputs=img_input, outputs=cnn)
dnn = Dense(128, activation='relu', input_shape = (30,))(numeric_input)
dnn = Dropout(0.2)(dnn)
dnn = Dense(128, activation='relu')(dnn)
dnn = Dropout(0.2)(dnn)
dnn = Dense(64, activation='relu')(dnn)
#dnn = Flatten()(dnn)
dnn = Model(inputs=numeric_input, outputs=dnn)
merged = Concatenate()([cnn,dnn])
model = Dense(128, activation='relu')(merged)
model = Dropout(0.2)(model)
model = Dense(64, activation='relu')(model)
model = Dropout(0.2)(model)
model = Dense(9, activation='linear')(model)
model.compile(loss="mse", optimizer=Adam(args.lr))
but when I run them console prints :
--------------------------------------------------------------------------- TypeError Traceback (most recent call
last) Cell In[7], line 28
25 #dnn = Flatten()(dnn)
26 dnn = Model(inputs=numeric_input, outputs=dnn)
---> 28 merged = Concatenate()([cnn,dnn])
30 model = Dense(128, activation='relu')(merged)
31 model = Dropout(0.2)(model)
File
~/miniconda3/lib/python3.9/site-packages/keras/utils/traceback_utils.py:70,
in filter_traceback..error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.traceback)
68 # To get the full stack trace, call:
69 # tf.debugging.disable_traceback_filtering()
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File
~/miniconda3/lib/python3.9/site-packages/keras/layers/merging/concatenate.py:97,
in Concatenate.build(self, input_shape)
94 #tf_utils.shape_type_conversion
95 def build(self, input_shape):
96 # Used purely for shape validation.
---> 97 if len(input_shape) < 1 or not isinstance(input_shape[0], tuple):
98 raise ValueError(
99 "A Concatenate layer should be called on a list of "
100 f"at least 1 input. Received: input_shape={input_shape}"
101 )
102 if all(shape is None for shape in input_shape):
TypeError: object of type 'NoneType' has no len()
where is the error ?

Related

Binary_classification_using_BERT_Pytorch : Key error

I am trying to modify a network on MultiLabel Classification to Binary Classification using the Pytorch library and BERT model from Hugging Face. I am getting a 'Key error' while training. I am trying to debug but no luck so far.
Error:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3360 try:
-> 3361 return self._engine.get_loc(casted_key)
3362 except KeyError as err:
/opt/conda/lib/python3.7/site-packages/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
/opt/conda/lib/python3.7/site-packages/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item()
KeyError: 2047
The above exception was the direct cause of the following exception:
KeyError Traceback (most recent call last)
/tmp/ipykernel_1998/2908429342.py in <module>
78 LR = 1e-6
79
---> 80 train(model, df_train, df_val, LR, EPOCHS)
/tmp/ipykernel_1998/2908429342.py in train(model, train_data, val_data, learning_rate, epochs)
25 total_loss_train = 0
26
---> 27 for train_input, train_label in train_dataloader:
28
29 # train_label = train_label.to(device)
/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py in __next__(self)
528 if self._sampler_iter is None:
529 self._reset()
--> 530 data = self._next_data()
531 self._num_yielded += 1
532 if self._dataset_kind == _DatasetKind.Iterable and \
/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _next_data(self)
568 def _next_data(self):
569 index = self._next_index() # may raise StopIteration
--> 570 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
571 if self._pin_memory:
572 data = _utils.pin_memory.pin_memory(data)
/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in <listcomp>(.0)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
/tmp/ipykernel_1998/3019722733.py in __getitem__(self, idx)
21
22 batch_texts = self.get_batch_texts(idx)
---> 23 batch_y = self.get_batch_labels(idx)
24
25 return batch_texts, batch_y
/tmp/ipykernel_1998/3019722733.py in get_batch_labels(self, idx)
13
14 def get_batch_labels(self, idx):
---> 15 return np.array(self.labels[idx])
16
17 def get_batch_texts(self, idx):
/opt/conda/lib/python3.7/site-packages/pandas/core/series.py in __getitem__(self, key)
940
941 elif key_is_scalar:
--> 942 return self._get_value(key)
943
944 if is_hashable(key):
/opt/conda/lib/python3.7/site-packages/pandas/core/series.py in _get_value(self, label, takeable)
1049
1050 # Similar to Index.get_value, but we do not fall back to positional
-> 1051 loc = self.index.get_loc(label)
1052 return self.index._get_values_for_loc(self, loc, label)
1053
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3361 return self._engine.get_loc(casted_key)
3362 except KeyError as err:
-> 3363 raise KeyError(key) from err
3364
3365 if is_scalar(key) and isna(key) and not self.hasnans:
KeyError: 2047
Here is the dataset class:
class Dataset(torch.utils.data.Dataset):
def __init__(self, df):
self.labels = df['target']
self.texts = [tokenizer(text, padding='max_length', max_length = 512, truncation=True, return_tensors="pt") for text in df['text']]
def classes(self):
return self.labels
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
return np.array(self.labels[idx])
def get_batch_texts(self, idx):
return self.texts[idx]
def __getitem__(self, idx):
batch_texts = self.get_batch_texts(idx)
batch_y = self.get_batch_labels(idx)
return batch_texts, batch_y
And here are the model and training loop:
from torch import nn
from transformers import BertModel
class BertClassifier(nn.Module):
def __init__(self, dropout=0.5):
super(BertClassifier, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-cased')
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_id, mask):
_, pooled_output = self.bert(input_ids= input_id, attention_mask=mask,return_dict=False)
dropout_output = self.dropout(pooled_output)
linear_output = self.linear(dropout_output)
final_layer = self.sigmoid(linear_output)
return final_layer
from torch.optim import Adam
from tqdm import tqdm
def train(model, train_data, val_data, learning_rate, epochs):
train, val = Dataset(train_data), Dataset(val_data)
train_dataloader = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(val, batch_size=2)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr= learning_rate)
# if use_cuda:
# model = model.cuda()
# criterion = criterion.cuda()
for epoch_num in range(epochs):
total_acc_train = 0
total_loss_train = 0
for train_input, train_label in tqdm(train_dataloader):
# train_label = train_label.to(device)
mask = train_input['attention_mask']
input_id = train_input['input_ids'].squeeze(1)
output = model(input_id, mask)
train_label = (train_label.unsqueeze(1))
print(output)
print(train_label)
batch_loss = criterion(output, train_label.float())
total_loss_train += batch_loss.item()
acc = (output.argmax(dim=1) == train_label).sum().item()
total_acc_train += acc
model.zero_grad()
batch_loss.backward()
optimizer.step()
total_acc_val = 0
total_loss_val = 0
with torch.no_grad():
for val_input, val_label in tqdm(val_dataloader):
val_label = val_label.to(device)
mask = val_input['attention_mask']
input_id = val_input['input_ids'].squeeze(1)
output = model(input_id, mask)
val_label = val_label.unsqueeze(1)
batch_loss = criterion(output, val_label.float())
total_loss_val += batch_loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_val += acc
print(
f'Epochs: {epoch_num + 1} | Train Loss: {total_loss_train / len(train_data): .3f} \
| Train Accuracy: {total_acc_train / len(train_data): .3f} \
| Val Loss: {total_loss_val / len(val_data): .3f} \
| Val Accuracy: {total_acc_val / len(val_data): .3f}')
EPOCHS = 5
model = BertClassifier()
LR = 1e-6
train(model, df_train, df_val, LR, EPOCHS)
I have tried both using sigmoid at the final layer with CrossEntropyLoss and without sigmoid with BCEloss. Pardon me if it's any blunderous mistake as I am a beginner and don't have good experience! :)
This KeyError is caused by your dataframe.The index of your dataframe is not continuously.You should add df.reset_index(drop=True) before instantiating Dataset or add .to_list() in Dataset.__init__ like self.labels = df['target'].to_list().

what causes the model can not predict using other data

I am here using ResNet50 to create a regression model. I ran into a problem when I wanted to test a model using other data. The length of the dataset is 2050. Then I separate it into training and testing data. I divide it by 1500 as training data and 500 as test data. At the time of the training process, I had good results and was able to predict quite accurately. but when I want to test it using testing data, the prediction results are bad.
below is the model loss result
the code :
Insole = pd.read_csv('1119_Rwalk40s1_list.txt', header=None, low_memory=False)
SIData = np.asarray(Insole)
df = pd.read_csv('1119_Rwalk40s1.csv', low_memory=False)
columns = ['Fx','Fy','Fz','Mx','My','Mz']
selected_df = df[columns]
FCDatas = selected_df[:2050]
SmartInsole = np.array(SIData)
FCData = np.array(FCDatas)
xX = SmartInsole
yY = FCData
scaler_x = MinMaxScaler(feature_range=(0, 1))
scaler_x.fit(xX)
xscale = scaler_x.transform(xX)
scaler_y = MinMaxScaler(feature_range=(0, 1))
scaler_y.fit(yY)
yscale = scaler_y.transform(yY)
SIDataPCA = xscale
pca = PCA(n_components=12)
pca.fit(SIDataPCA)
SIdata_pca = pca.transform(SIDataPCA)
#For Training
trainX = SIdata_pca[:1500]
trainY = yscale[:1500]
#For Testing
testX = SIdata_pca[1500]
testY = yscale[1500:]
X_train, X_test, y_train, y_test = train_test_split(trainX, trainY, test_size=0.20, random_state=2)
Below is the my resnet model structure:
Below is identity blok:
def identity_block(input_tensor,units):
x = layers.Dense(units)(input_tensor)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
Below is dens_block:
def dens_block(input_tensor,units):
x = layers.Dense(units)(input_tensor)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
shortcut = layers.Dense(units)(input_tensor)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
Resnet50 model:
def ResNet50Regression():
Res_input = layers.Input(shape=(12,))
width = 32
x = dens_block(Res_input,width)
x = identity_block(x,width)
x = identity_block(x,width)
x = dens_block(x,width)
x = identity_block(x,width)
x = identity_block(x,width)
x = dens_block(x,width)
x = identity_block(x,width)
x = identity_block(x,width)
x = layers.Dense(6,activation="sigmoid")(x)
model = models.Model(inputs=Res_input, outputs=x)
return model
model = ResNet50Regression()
model.compile(loss='mse',
optimizer=Adam(),
metrics=['mse'])
history = model.fit(X_train, y_train,
batch_size=32,
epochs=50,
validation_data=(X_test, y_test),
verbose=2)
model.save('Resnet50-1203.h5')
ypred = model.predict(trainX)
x=[]
colors=['red','green','brown','teal','gray','black','maroon','orange','purple']
colors2=['green','red','orange','black','maroon','teal','blue','gray','brown']
x = np.arange(0,1500)*40/1500
for i in range(0,6):
plt.figure(figsize=(15,6))
plt.plot(x,trainY[0:1500,i],color=colors[i])
plt.plot(x,ypred[0:1500,i], markerfacecolor='none',color=colors2[i])
plt.title('Result for ResNet Regression (Training Data)')
plt.ylabel(columns[i])
plt.xlabel('Time(s)')
plt.legend(['FP Data', 'SI Prediction'], loc='best')
# plt.savefig('Regression Result.png'[i])
plt.show()
Testing Model using other data code:
new_model = load_model('Resnet50-1203.h5')
model.evaluate(testX, testY)
Test_xX_model = new_model.predict(testX)
x=[]
colors=['red','green','brown','teal','gray','black','maroon','orange','purple']
colors2=['green','red','orange','black','maroon','teal','blue','gray','brown']
x = np.arange(0,550)*40/550
for i in range(0,6):
plt.figure(figsize=(15,6))
plt.plot(x,testY[0:550,i],color=colors[i])
plt.plot(x,Test_xX_model[0:550,i], markerfacecolor='none',color=colors2[i])
plt.title('Result for ResNet Regression (Testing Data)')
plt.ylabel(columns[i])
plt.xlabel('Time(s)')
plt.legend(['FP Data', 'SI Prediction'], loc='best')
# plt.savefig('Regression Result.png'[i])
plt.show()
1 of traning data predictions results:
1 of testing data predictions results:
what should i do for this case?

ValueError: Error when checking input: expected dense_1_input to have 4 dimensions, but got array with shape (20593, 4, 1)

I am trying to follow sentdex's game ai bot tutorial(https://www.youtube.com/watch?v=G-KvpNGudLw), but instead of tflearn, I am trying to use keras for the same implementation.
Model Function
def neural_network_model(input_size):
network = Sequential()
network.add(Dense(units = 128, activation='relu', kernel_initializer = 'uniform', input_shape = [None, input_size, 1]))
network.add(Dropout(0.2))
network.add(Dense(units = 256, activation='relu', kernel_initializer = 'uniform'))
network.add(Dropout(0.2))
network.add(Dense(units = 512, activation='relu', kernel_initializer = 'uniform'))
network.add(Dropout(0.2))
network.add(Dense(units = 256, activation='relu', kernel_initializer = 'uniform'))
network.add(Dropout(0.2))
network.add(Dense(units = 128, activation='relu', kernel_initializer = 'uniform'))
network.add(Dropout(0.2))
network.add(Dense(units = 2, activation = 'softmax', kernel_initializer = 'uniform'))
adam = optimizers.Adam(lr=LR, decay=0.0)
network.compile(optimizer=adam, loss='categorical_crossentropy', metrics = ['accuracy'])
return network
Model Training Function
def train_model(training_data, model=False):
X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)
Y = [i[1] for i in training_data]
if not model:
model = neural_network_model(len(X[0]))
model.fit(X,Y, epochs = 5)
return model
where the training data is :
def initial_population():
training_data = [] # Observations and the move made, append to only when score > 50
scores = []
accepted_scores = []
for x in range(initial_games):
score = 0
game_memory = []
prev_observation = []
for x in range(goal_steps):
action = random.randrange(0,2) # 0's and 1's
observation, reward, done, info = env.step(action)
if len(prev_observation) > 0 :
game_memory.append([prev_observation,action])
prev_observation = observation
score += reward
if done:
break
if score >= score_requirement:
accepted_scores.append(score)
for data in game_memory:
if data[1] == 1:
output = [0,1]
if data[1] == 0:
output = [1,0]
training_data.append([data[0], output])
env.reset()
scores.append(score)
training_data_save = np.array(training_data)
np.save('saved.npy', training_data_save)
print('Average accepted score : ', mean(accepted_scores))
print('Median accepted scores : ', median(accepted_scores))
print(Counter(accepted_scores))
return training_data
training_data = initial_population()
The error I am getting is in the title. I am new to deep learning and I don't have a good grasp yet on the reshaping part.
So after a bit tweaking I finally got the network to work. If anyone is interested, I fixed it by doing the following:
I changed the first Dense layer to :
network.add(Dense(units = 128, activation='relu', kernel_initializer = 'uniform', input_dim = input_size))
and in the model training function, I changed the shape of the input to 2D instead of 3D :
def train_model(training_data, model=False):
X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))
Y = np.array([i[1] for i in training_data])
if not model:
model = neural_network_model(len(X[0]))
model.fit(X,Y, epochs = 5)
return model

How to read data from csv file in tensorflow?

I want to read data from csv file in tensorflow .So I've been trying out different ways of reading a CSV file with 2000 lines and each line with 93 features,and I hope to get one-hot value.
my dataset is like this:
the first column is data of 93 features,and the second column is labels of 16 one-hot .
this is my code
import tensorflow as tf
# data_input = pd.read_csv('ans_string.csv')
# data_train = pd.read_csv('ans_result.csv')
x = tf.placeholder(tf.float32,[None,93])
W = tf.Variable(tf.zeros([93,16]))
b = tf.Variable(tf.zeros([16]))
sess = tf.InteractiveSession()
filename_queue = tf.train.string_input_producer(["dataset.csv"])
reader = tf.TextLineReader()
key,value = reader.read(filename_queue)
# _,csv_row = reader.read(filename_queue)
# data = tf.decode_csv(csv_row,record_fefaults = rDeraults)
record_defaults_key = [[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1]]
record_defaults_value = [[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1]]
list_result_key = tf.decode_csv(key,record_defaults = record_defaults_key)
list_result_value = tf.decode_csv(value,record_defaults = record_defaults_value)
features = tf.stack(list_result_key)
labels = tf.stack(list_result_value)
y = tf.nn.softmax(tf.matmul(x,W)+b)
y_ = tf.placeholder(tf.float32,[None,16])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
with tf.Session() as sess:
# something happened
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord)
tf.global_variables_initializer().run()
for _ in range (1000):
example,label = sess.run([features,labels])
print(sess.run(example,label))
sess.run(train_step,feed_dict={x:example,y_:label})
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(accuracy.eval({x:example,y_:label})))
coord.request_stop()
coord.join(threads)
I want to train my model,but I got Error like this.
How can I fix it?

keras Input layer (Nnoe,200,3), Why there is None?input have 3 dimensions, but got array with shape (200, 3)

The acc gyro in model.fit is (200 * 3),in the Input layer shape is (200 * 3). Why is there such a problem? Error when checking input: expected acc_input to have 3 dimensions, but got array with shape (200, 3).This is a visualization of my model.
Here's my code:
WIDE = 20
FEATURE_DIM = 30
CHANNEL = 1
CONV_NUM = 64
CONV_LEN = 3
CONV_LEN_INTE = 3#4
CONV_LEN_LAST = 3#5
CONV_NUM2 = 64
CONV_MERGE_LEN = 8
CONV_MERGE_LEN2 = 6
CONV_MERGE_LEN3 = 4
rnn_size=128
acc_input_tensor = Input(shape=(200,3),name = 'acc_input')
gyro_input_tensor = Input(shape=(200,3),name= 'gyro_input')
Acc_input_tensor = Reshape(target_shape=(20,30,1))(acc_input_tensor)
Gyro_input_tensor = Reshape(target_shape=(20,30,1))(gyro_input_tensor)
acc_conv1 = Conv2D(CONV_NUM,(1, 1*3*CONV_LEN),strides= (1,1*3),padding='valid',activation=None)(Acc_input_tensor)
acc_conv1 = BatchNormalization(axis=1)(acc_conv1)
acc_conv1 = Activation('relu')(acc_conv1)
acc_conv1 = Dropout(0.2)(acc_conv1)
acc_conv2 = Conv2D(CONV_NUM,(1,CONV_LEN_INTE),strides= (1,1),padding='valid',activation=None)(acc_conv1)
acc_conv2 = BatchNormalization(axis=1)(acc_conv2)
acc_conv2 = Activation('relu')(acc_conv2)
acc_conv2 = Dropout(0.2)(acc_conv2)
acc_conv3 = Conv2D(CONV_NUM,(1,CONV_LEN_LAST),strides=(1,1),padding='valid',activation=None)(acc_conv2)
acc_conv3 = BatchNormalization(axis=1)(acc_conv3)
acc_conv3 = Activation('relu')(acc_conv3)
acc_conv3 = Dropout(0.2)(acc_conv3)
gyro_conv1 = Conv2D(CONV_NUM,(1, 1*3*CONV_LEN),strides=(1,1*3),padding='valid',activation=None)(Gyro_input_tensor)
gyro_conv1 = BatchNormalization(axis=1)(gyro_conv1)
gyro_conv1 = Activation('relu')(gyro_conv1)
gyro_conv1 = Dropout(0.2)(gyro_conv1)
gyro_conv2 = Conv2D(CONV_NUM,(1, CONV_LEN_INTE),strides=(1,1),padding='valid',activation=None)(gyro_conv1)
gyro_conv2 = BatchNormalization(axis=1)(gyro_conv2)
gyro_conv2 = Activation('relu')(gyro_conv2)
gyro_conv2 = Dropout(0.2)(gyro_conv2)
gyro_conv3 = Conv2D(CONV_NUM,(1, CONV_LEN_LAST),strides=(1,1),padding='valid',activation=None)(gyro_conv2)
gyro_conv3 = BatchNormalization(axis=1)(gyro_conv3)
gyro_conv3 = Activation('relu')(gyro_conv3)
gyro_conv3 = Dropout(0.2)(gyro_conv3)
sensor_conv_in = concatenate([acc_conv3, gyro_conv3], 2)
sensor_conv_in = Dropout(0.2)(sensor_conv_in)
sensor_conv1 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN),padding='SAME')(sensor_conv_in)
sensor_conv1 = BatchNormalization(axis=1)(sensor_conv1)
sensor_conv1 = Activation('relu')(sensor_conv1)
sensor_conv1 = Dropout(0.2)(sensor_conv1)
sensor_conv2 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN2),padding='SAME')(sensor_conv1)
sensor_conv2 = BatchNormalization(axis=1)(sensor_conv2)
sensor_conv2 = Activation('relu')(sensor_conv2)
sensor_conv2 = Dropout(0.2)(sensor_conv2)
sensor_conv3 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN3),padding='SAME')(sensor_conv2)
sensor_conv3 = BatchNormalization(axis=1)(sensor_conv3)
sensor_conv3 = Activation('relu')(sensor_conv3)
conv_shape = sensor_conv3.get_shape()
print conv_shape
x1 = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2]*conv_shape[3])))(sensor_conv3)
x1 = Dense(64, activation='relu')(x1)
gru_1 = GRU(rnn_size, return_sequences=True, init='he_normal', name='gru1')(x1)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, init='he_normal', name='gru1_b')(x1)
gru1_merged = merge([gru_1, gru_1b], mode='sum')
gru_2 = GRU(rnn_size, return_sequences=True, init='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, init='he_normal', name='gru2_b')(gru1_merged)
x = merge([gru_2, gru_2b], mode='concat')
x = Dropout(0.25)(x)
n_class=2
x = Dense(n_class)(x)
model = Model(input=[acc_input_tensor,gyro_input_tensor], output=x)
model.compile(loss='mean_squared_error',optimizer='adam')
model.fit(inputs=[acc,gyro],outputs=labels,batch_size=1, validation_split=0.2, epochs=2,verbose=1 ,
shuffle=False)
The acc gyro in model.fit is (200 * 3),in the Input layer shape is (200 * 3). Why is there such a problem? Error when checking input: expected acc_input to have 3 dimensions, but got array with shape (200, 3)
Shape (None, 200, 3) is used in Keras for batches, None means batch_size, because in the time of creating or reshaping input arrays, the batch size might be unknown, so if you will be using batch_size = 128 your batch input matrix will have shape (128, 200, 3)