RuntimeError: mat1 and mat2 shapes cannot be multiplied (25x340 and 360x1) - deep-learning

I get this error message and I'm not sure why. My input is (batch, 1, 312) from tabular data and this CNN is constructed for a regression prediction. I worked out the shapes for each step with the formula (input + 2*padding - filter size)/stride + 1 as in the comment below. The problem appears to occur at x = self.fc(x) and I can't figure out why. Your help is greatly appreciated. Thank you.
class CNNWeather(nn.Module):
# input (batch, 1, 312)
def __init__(self):
super(CNNWeather, self).__init__()
self.conv1 = nn.Conv1d(in_channels=1, out_channels=8, kernel_size=9, stride=1, padding='valid') # (312+2*0-9)/1 + 1 = 304
self.pool1 = nn.AvgPool1d(kernel_size=2, stride=2) # 304/2 = 302
self.conv2 = nn.Conv1d(in_channels=8, out_channels=12, kernel_size=3, stride=1, padding='valid') # (302-3)/1+1 = 300
self.pool2 = nn.AvgPool1d(kernel_size=2, stride=2) # 300/2 = 150
self.conv3 = nn.Conv1d(in_channels=12, out_channels=16, kernel_size=3, stride=1, padding='valid') # (150-3)/1+1 = 76
self.pool3 = nn.AvgPool1d(kernel_size=2, stride=2) # 76/2 = 38
self.conv4 = nn.Conv1d(in_channels=16, out_channels=20, kernel_size=3, stride=1, padding='valid') # (38-3)/1+1 = 36
self.pool4 = nn.AvgPool1d(kernel_size=2, stride=2) # 36/2 = 18 (batch, 20, 18)
self.fc = nn.Linear(in_features=20*18, out_features=1)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
print(x.size())
x = x.view(x.size(0), -1) # flatten (batch, 20*18)
x = self.fc(x)
return x

The problem seems to be related to the input size of your FC layer:
self.fc = nn.Linear(in_features=20*18, out_features=1)
The output of the previous layer is 340, so you must use in_features=340.
These are the shapes of the output for the third and fourth layers.
torch.Size([5, 16, 73]) conv3 out
torch.Size([5, 16, 36]) pool3 out
torch.Size([5, 20, 34]) conv4 out
torch.Size([5, 20, 17]) pool4 out
Notice that out of the "pool4" layer come 20x17, meaning 340 elements.

Related

PyTorch: How to calculate output size of the CNN?

I went through this PyTorch CNN implementation available here: https://machinelearningknowledge.ai/pytorch-conv2d-explained-with-examples/
I am unable to understand how they replace the '?' with some value. What is the formula for calculating the CNN layer output?
This is essential to be calculated in PyTorch; not so in Tensorflow - Keras. If there is any other blog that explains this well, please drop it in the comments.
# Implementation of CNN/ConvNet Model
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
# L1 ImgIn shape=(?, 28, 28, 1)
# Conv -> (?, 28, 28, 32)
# Pool -> (?, 14, 14, 32)
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Dropout(p=1 - keep_prob))
# L2 ImgIn shape=(?, 14, 14, 32)
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Dropout(p=1 - keep_prob))
# L3 ImgIn shape=(?, 7, 7, 64)
# Conv ->(?, 7, 7, 128)
# Pool ->(?, 4, 4, 128)
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
torch.nn.Dropout(p=1 - keep_prob))
# L4 FC 4x4x128 inputs -> 625 outputs
self.fc1 = torch.nn.Linear(4 * 4 * 128, 625, bias=True)
torch.nn.init.xavier_uniform(self.fc1.weight)
self.layer4 = torch.nn.Sequential(
self.fc1,
torch.nn.ReLU(),
torch.nn.Dropout(p=1 - keep_prob))
# L5 Final FC 625 inputs -> 10 outputs
self.fc2 = torch.nn.Linear(625, 10, bias=True)
torch.nn.init.xavier_uniform_(self.fc2.weight) # initialize parameters
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), -1) # Flatten them for FC
out = self.fc1(out)
out = self.fc2(out)
return out
#instantiate CNN model
model = CNN()
model
Thanks!
I assume you calculation is wrong because:
Pytorch support images in format C * H * W (e.g. 3x32x32 not 32x32x3)
First dimension always batch dimension and must be omitted in calculation because, all nn.Modules handle it by default
So if you want calculate input size for first Linear layer, you can use this trick:
conv = nn.Sequential(self.layer1,self.layer2, self.layer3, nn.Flatten())
out = conv(torch.randn(1,im_height,im_width).unsqueeze(0))
# fc_layer_in_channels = out.shape[1]
self.fc1 = torch.nn.Linear(out.shape[1], 625, bias=True)
but only if you know im_height,im_width
The best practice is use torch.nn.AdaptiveAvgPool2d.
With this layer you always can get output of fixed spatial size.

Neural Network cannot overfit even one sample

I am using neural network for a regression task.
My input is an gray image whose size is 100x70x1.
The gray area has a unique value 60.
The input will go through a preprocessing layer, which multiply 1./255 on every pixel value.
My output is just three double number: [0.87077969, 0.98989031, 0.98888382]
I used ResNet152 model as shown below:
class Bottleneck(tf.keras.Model):
expansion = 4
def __init__(self, in_channels, out_channels, strides=1):
super(Bottleneck, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(out_channels, 1, 1, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(out_channels, 3, strides, padding="same", use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2D(out_channels*self.expansion, 1, 1, use_bias=False)
self.bn3 = tf.keras.layers.BatchNormalization()
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2D(self.expansion*out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x,_: x
def call(self, x, training=False):
out = tf.nn.elu(self.bn1(self.conv1(x), training))
out = tf.nn.elu(self.bn2(self.conv2(out), training))
out = self.bn3(self.conv3(out), training)
out += self.shortcut(x, training)
return tf.nn.elu(out)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks):
super(ResNet, self).__init__()
self.in_channels = 64
self.conv1 = tf.keras.layers.Conv2D(64, 7, 2, padding="same", use_bias=False) # 60x60
self.bn1 = tf.keras.layers.BatchNormalization()
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same') # 30x30
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avg_pool2d = tf.keras.layers.GlobalAveragePooling2D()
self.flatten = tf.keras.layers.Flatten()
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return tf.keras.Sequential(layers)
def call(self, x, training=False):
out = self.pool1(tf.nn.elu(self.bn1(self.conv1(x), training)))
out = self.layer1(out, training=training)
out = self.layer2(out, training=training)
out = self.layer3(out, training=training)
out = self.layer4(out, training=training)
# For classification
out = self.flatten(out)
# out = tf.keras.layers.Reshape((out.shape[-1],))(out)
#out = self.linear(out)
return out
def model(self):
x = tf.keras.layers.Input(shape=(100,70,1))
return tf.keras.Model(inputs=[x], outputs=self.call(x))
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
I used elu as activation function and changed the GlobalAveragePooling layer into flatten layer at the end of ResNet.
Before output I stack two Dense layer(2048 units and 3 units) on top of the ResNet model.
For training I used adam optimizer and inital learning rate is 1e-4, which will decreasing by factor 10 when the val_loss not decreasing for 3 epoch.
The loss is just mse error.
After early stopping while learning rate is 1e-8, the mse loss is still very high:8.6225
The prediction is [2.92318237, 5.53124916, 3.00686643] which is far away from the ground truth: [0.87077969, 0.98989031, 0.98888382]
I don't know why such a deep network cannot overfit such a sample.
Is this the reason that my input image has too few information? Could someone help me?

RuntimeError: Expected 3-dimensional input for 3-dimensional weight [64, 512, 1], but got 2-dimensional input of size [4, 512] instead

Hello below is the pytorch model I am trying to run. But getting error. I have posted the error trace as well. It was running very well unless I added convolution layers. I am still new to deep learning and Pytorch. So I apologize if this is silly question. I am using conv1d so why should conv1d expect 3 dimensional input and it is also getting a 2d input which is also odd.
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(CROP_SIZE*CROP_SIZE*3, 512)
self.conv1d1 = nn.Conv1d(in_channels=512, out_channels=64, kernel_size=1, stride=2)
self.fc2 = nn.Linear(64, 128)
self.conv1d2 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=1, stride=2)
self.fc3 = nn.Linear(64, 256)
self.conv1d3 = nn.Conv1d(in_channels=256, out_channels=64, kernel_size=1, stride=2)
self.fc4 = nn.Linear(64, 256)
self.fc4 = nn.Linear(256, 128)
self.fc5 = nn.Linear(128, 64)
self.fc6 = nn.Linear(64, 32)
self.fc7 = nn.Linear(32, 64)
self.fc8 = nn.Linear(64, frame['landmark_id'].nunique())
def forward(self, x):
x = F.relu(self.conv1d1(self.fc1(x)))
x = F.relu(self.conv1d2(self.fc2(x)))
x = F.relu(self.conv1d3(self.fc3(x)))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = self.fc8(x)
return F.log_softmax(x, dim=1)
net = Net()
import torch.optim as optim
loss_function = nn.CrossEntropyLoss()
net.to(torch.device('cuda:0'))
for epoch in range(3): # 3 full passes over the data
optimizer = optim.Adam(net.parameters(), lr=0.001)
for data in tqdm(train_loader): # `data` is a batch of data
X = data['image'].to(device) # X is the batch of features
y = data['landmarks'].to(device) # y is the batch of targets.
optimizer.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
output = net(X.view(-1,CROP_SIZE*CROP_SIZE*3)) # pass in the reshaped batch
# print(np.argmax(output))
# print(y)
loss = F.nll_loss(output, y) # calc and grab the loss value
loss.backward() # apply this loss backwards thru the network's parameters
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss) # print loss. We hope loss (a measure of wrong-ness) declines!
Error trace
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-42-f5ed7999ce57> in <module>
5 y = data['landmarks'].to(device) # y is the batch of targets.
6 optimizer.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
----> 7 output = net(X.view(-1,CROP_SIZE*CROP_SIZE*3)) # pass in the reshaped batch
8 # print(np.argmax(output))
9 # print(y)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
<ipython-input-37-6d3e34d425a0> in forward(self, x)
16
17 def forward(self, x):
---> 18 x = F.relu(self.conv1d1(self.fc1(x)))
19 x = F.relu(self.conv1d2(self.fc2(x)))
20 x = F.relu(self.conv1d3(self.fc3(x)))
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
210 _single(0), self.dilation, self.groups)
211 return F.conv1d(input, self.weight, self.bias, self.stride,
--> 212 self.padding, self.dilation, self.groups)
213
214
RuntimeError: Expected 3-dimensional input for 3-dimensional weight [64, 512, 1], but got 2-dimensional input of size [4, 512] instead
You should learn how convolutions work (e.g. see this answer) and some neural network basics (this tutorial from PyTorch).
Basically, Conv1d expects inputs of shape [batch, channels, features] (where features can be some timesteps and can vary, see example).
nn.Linear expects shape [batch, features] as it is fully connected and each input feature is connected to each output feature.
You can verify those shapes by yourself, for torch.nn.Linear:
import torch
layer = torch.nn.Linear(20, 10)
data = torch.randn(64, 20) # [batch, in_features]
layer(data).shape # [64, 10], [batch, out_features]
For Conv1d:
layer = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=3, padding=1)
data = torch.randn(64, 20, 15) # [batch, channels, timesteps]
layer(data).shape # [64, 10, 15], [batch, out_features]
layer(torch.randn(32, 20, 25)).shape # [32, 10, 25]
BTW. As you are working with images, you should use torch.nn.Conv2d instead.
Most of the Pytorch functions work on batch data i.e they accept input of size (batch_size, shape). #Szymon Maszke already posted answer related to that.
So in your case, you can use unsqueeze and sqeeze functions for adding and removing extra dimensions.
Here's the sample code:
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(100, 512)
self.conv1d1 = nn.Conv1d(in_channels=512, out_channels=64, kernel_size=1, stride=2)
self.fc2 = nn.Linear(64, 128)
def forward(self, x):
x = self.fc1(x)
x = x.unsqueeze(dim=2)
x = F.relu(self.conv1d1(x))
x = x.squeeze()
x = self.fc2(x)
return x
net = Net()
bsize = 4
inp = torch.randn((bsize, 100))
out = net(inp)
print(out.shape)

Tensorflow CNN shape mismatch

def load_data(data_path, batch_size, num_workers=2):
t_m = transforms.Compose(
[transforms.Grayscale(num_output_channels=1),
transforms.Resize((400,400)),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = torchvision.datasets.ImageFolder(root = data_path, transform=t_m)
# print (np.shape(dataset))
#split
train, test = torch.utils.data.random_split(dataset, [int( len(dataset) * 0.7 ), len(dataset) - int( len(dataset) * 0.7 ) ])
trainloader = torch.utils.data.DataLoader(train, batch_size=batch_size,
shuffle=True, num_workers=num_workers,drop_last = True)
testloader = torch.utils.data.DataLoader(test, batch_size=batch_size,
shuffle=False, num_workers=num_workers, drop_last = False)
return dataset,trainloader,testloader
import torch.nn as nn
model = torch.nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, padding=2),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, kernel_size=5, padding=2),
nn.MaxPool2d(2, 2),
nn.Linear ( 7 * 7 * 64, 1000),
nn.Linear(1000, 600),
nn.Linear(600, 200),
nn.Linear(200, 10)
)
#Training
total_epochs = 5
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adadelta(model.parameters())
for epoch in tqdm(range(total_epochs)):
#initialize
batch_count = 0
gc.collect()
loop_loss = 0.0
for img in (trainloader):
input_, label_ = img
# print (input_.shape)
out = model(input_)
out= nn.functional.relu(out)
loss = criterion(out, label_)
loss.backward()
optimizer.zero_grad()
optimizer.step()
loop_loss = loop_loss + loss.item()
batch_count = batch_count + 1
print('batch_loss: ', str(loss.item()))
print('Epochs completed:', epoch+1,'\n')
print('epoch_loss = ' + loop_loss/float(batch_count))
size mismatch, m1: [25600 x 100], m2: [3136 x 1000] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:41
Please explain where the shapes went wrong? How should I fix this?
I am new to this so this might not be a good question but any detail would help
The input images are resized to 400,400 and converted to gray from rgb
Your problem is in the first Linear layer. Always code like this so that you can figure out yourself.
class MyModel(nn.Module):
def __init__(self, params):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(...)
self.fc = nn.Linear(...)
def forward(self, x):
x = self.conv1(x)
import pdb; pdb.set_trace()
x = self.fc(x)
return x
This way you can put the pdb where you want and you can check the shapes using x.shape command. Your problem is in the mismatch between shape of output of conv layer and your first Linear layer.

Custom max_pool layer: ValueError: The channel dimension of the inputs should be defined. Found `None`

I am working on tensorflow2 and I am trying to implement Max unpool with indices to implement SegNet.
When I run it I get the following problem. I am defining the def MaxUnpool2D and then calling it in the model. I suppose that the problem is given by the fact that updates and mask have got shape (None, H,W,ch).
def MaxUnpooling2D(updates, mask):
size = 2
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
output_shape = (
input_shape[0],
input_shape[1]*size,
input_shape[2]*size,
input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = tf.ones_like(mask, dtype='int32')
batch_shape = tf.concat(
[[input_shape[0]], [1], [1], [1]],
axis=0)
batch_range = tf.reshape(
tf.range(output_shape[0], dtype='int32'),
shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(
tf.stack([b, y, x, f]),
[4, updates_size]))
values = tf.reshape(updates, [updates_size])
return tf.scatter_nd(indices, values, output_shape)
def segnet_conv(
inputs,
kernel_size=3,
kernel_initializer='glorot_uniform',
batch_norm = False,
**kwargs):
conv1 = Conv2D(
filters=64,
kernel_size=kernel_size,
padding='same',
activation=None,
kernel_initializer=kernel_initializer,
name='conv_1'
)(inputs)
if batch_norm:
conv1 = BatchNormalization(name='bn_1')(conv1)
conv1 = LeakyReLU(alpha=0.3, name='activation_1')(conv1)
conv1 = Conv2D(
filters=64,
kernel_size=kernel_size,
padding='same',
activation=None,
kernel_initializer=kernel_initializer,
name='conv_2'
)(conv1)
if batch_norm:
conv1 = BatchNormalization(name='bn_2')(conv1)
conv1 = LeakyReLU(alpha=0.3, name='activation_2')(conv1)
pool1, mask1 = tf.nn.max_pool_with_argmax(
input=conv1,
ksize=2,
strides=2,
padding='SAME'
)
def segnet_deconv(
pool1,
mask1,
kernel_size=3,
kernel_initializer='glorot_uniform',
batch_norm = False,
**kwargs
):
dec = MaxUnpooling2D(pool5, mask5)
dec = Conv2D(
filters=512,
kernel_size=kernel_size,
padding='same',
activation=None,
kernel_initializer=kernel_initializer,
name='upconv_13'
)(dec)
def classifier(
dec,
ch_out=2,
kernel_size=3,
final_activation=None,
batch_norm = False,
**kwargs
):
dec = Conv2D(
filters=64,
kernel_size=kernel_size,
activation='relu',
padding='same',
name='dec_out1'
)(dec)
#tf.function
def segnet(
inputs,
ch_out=2,
kernel_size=3,
kernel_initializer='glorot_uniform',
final_activation=None,
batch_norm = False,
**kwargs
):
pool5, mask1, mask2, mask3, mask4, mask5 = segnet_conv(
inputs,
kernel_size=3,
kernel_initializer='glorot_uniform',
batch_norm = False
)
dec = segnet_deconv(
pool5,
mask1,
mask2,
mask3,
mask4,
mask5,
kernel_size=kernel_size,
kernel_initializer=kernel_initializer,
batch_norm = batch_norm
)
output = classifier(
dec,
ch_out=2,
kernel_size=3,
final_activation=None,
batch_norm = batch_norm
)
return output
inputs = Input(shape=(*params['image_size'], params['num_channels']), name='input')
outputs = segnet(inputs, n_labels=2, kernel=3, pool_size=(2, 2), output_mode=None)
# we define our U-Net to output logits
model = Model(inputs, outputs)
Can you please help me with this problem?
I have solved the problem. If someone will need here is the code for MaxUnpooling2D:
def MaxUnpooling2D(pool, ind, output_shape, batch_size, name=None):
"""
Unpooling layer after max_pool_with_argmax.
Args:
pool: max pooled output tensor
ind: argmax indices
ksize: ksize is the same as for the pool
Return:
unpool: unpooling tensor
:param batch_size:
"""
with tf.compat.v1.variable_scope(name):
pool_ = tf.reshape(pool, [-1])
batch_range = tf.reshape(tf.range(batch_size, dtype=ind.dtype), [tf.shape(pool)[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, [-1, 1])
ind_ = tf.reshape(ind, [-1, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=[batch_size, output_shape[1] * output_shape[2] * output_shape[3]])
# the reason that we use tf.scatter_nd: if we use tf.sparse_tensor_to_dense, then the gradient is None, which will cut off the network.
# But if we use tf.scatter_nd, the gradients for all the trainable variables will be tensors, instead of None.
# The usage for tf.scatter_nd is that: create a new tensor by applying sparse UPDATES(which is the pooling value) to individual values of slices within a
# zero tensor of given shape (FLAT_OUTPUT_SHAPE) according to the indices (ind_). If we ues the orignal code, the only thing we need to change is: changeing
# from tf.sparse_tensor_to_dense(sparse_tensor) to tf.sparse_add(tf.zeros((output_sahpe)),sparse_tensor) which will give us the gradients!!!
ret = tf.reshape(ret, [tf.shape(pool)[0], output_shape[1], output_shape[2], output_shape[3]])
return ret