Printing only the first weights of a neural network - deep-learning

I have my model (a VGG16, but it is not important). I want to check only some parameters of my network, for example the first ones.
To do this I do list(model.parameters()) and it prints all the parameters.
Now, considering that a VGG has this shape:
VGG16(
(block_1): Sequential(
(0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU()
(6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
...
If I want only the weights of the convolutions I do this: list(model.block_1[0].parameters()) and it prints this:
[Parameter containing:
tensor([[[[-0.3215, -0.0771, 0.4429],
[-0.6455, -0.0827, -0.4266],
[-0.2029, -0.2288, 0.1696]]],
[[[ 0.5323, -0.2418, -0.1031],
[ 0.5917, 0.2669, -0.5630],
[ 0.3064, -0.4984, -0.1288]]],
[[[ 0.3804, 0.0906, -0.2116],
[ 0.2659, -0.3325, -0.1873],
[-0.5044, 0.0900, 0.1386]]],
Now, these lists are always enormous. How can I print only the first values, for example, the first matrix?
[[[[-0.3215, -0.0771, 0.4429],
[-0.6455, -0.0827, -0.4266],
[-0.2029, -0.2288, 0.1696]]]

You can treat it as a NumPy array when it's processed correctly. In your example, this should work:
from torchvision import models
model = models.vgg16()
first_param = list(model.features[0].parameters())[0].data
The first_param will hold the tensor as:
tensor([[[[-0.3215, -0.0771, 0.4429],
[-0.6455, -0.0827, -0.4266],
[-0.2029, -0.2288, 0.1696]]],
[[[ 0.5323, -0.2418, -0.1031],
[ 0.5917, 0.2669, -0.5630],
[ 0.3064, -0.4984, -0.1288]]],
[[[ 0.3804, 0.0906, -0.2116],
[ 0.2659, -0.3325, -0.1873],
[-0.5044, 0.0900, 0.1386]]]
Then just continue as NumPy array:
print(first_param[0])
>> tensor([[[[-0.3215, -0.0771, 0.4429],
[-0.6455, -0.0827, -0.4266],
[-0.2029, -0.2288, 0.1696]]])

You can slice Tensorflow tensors with the same syntax as Python lists. For example:
import tensorflow as tf
tensor = tf.constant([[[[-0.3215, -0.0771, 0.4429],
[-0.6455, -0.0827, -0.4266],
[-0.2029, -0.2288, 0.1696]]],
[[[ 0.5323, -0.2418, -0.1031],
[ 0.5917, 0.2669, -0.5630],
[ 0.3064, -0.4984, -0.1288]]],
[[[ 0.3804, 0.0906, -0.2116],
[ 0.2659, -0.3325, -0.1873],
[-0.5044, 0.0900, 0.1386]]]])
print(tensor[0, :])
This will give you the first matrix from your example, together with related shape information. If you want to get rid of this shape information, you could, for instance, convert the sliced tensor into a numpy array with print(np.array(tensor[0, :])).

Related

Changing Learning Rate According to Layer Width in Pytroch

I am trying to train a network where the learning rate for each layer scales with 1/(layer width). Is there a way to do this in pytorch? I tried changing the learning rate in the optimizer and including it in my training loop but that didn't work. I've seen some people talk about this with Adam, but I am using SGD to train. Here are the chunks where I defined my model and training, if thats any help.
class ConvNet2(nn.Module):
def __init__(self):
super(ConvNet2, self).__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 8, 3),
nn.ReLU(),
nn.Conv2d(8,32, 3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 3),
nn.ReLU(),
nn.Conv2d(32,32, 3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(800, 10)
)
def forward(self, x):
return self.network(x)
net2 = ConvNet2().to(device)
def train(network, number_of_epochs):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(network.parameters(), lr=learning_rate)
for epoch in range(number_of_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader):
# get the inputs
inputs = inputs.to(device)
labels = labels.to(device)
outputs = network(inputs)
loss = criterion(outputs, labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = network(inputs)
loss.backward()
optimizer.step()
In the documentation you can see that you can specify "per-parameter options". Assuming you only want to specify the learning rate for the Conv2d layers (this is easily customizable in the code below) you could do something like this:
import torch
from torch import nn
from torch import optim
from pprint import pprint
class ConvNet2(nn.Module):
def __init__(self):
super(ConvNet2, self).__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 8, 3),
nn.ReLU(),
nn.Conv2d(8,32, 3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 3),
nn.ReLU(),
nn.Conv2d(32,32, 3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(800, 10)
)
def forward(self, x):
return self.network(x)
net2 = ConvNet2()
def getParameters(model):
getWidthConv2D = lambda layer: layer.out_channels
parameters = []
for layer in model.children():
paramdict = {'params': layer.parameters()}
if (isinstance(layer, nn.Conv2d)):
paramdict['lr'] = getWidthConv2D(layer) * 0.1 # Specify learning rate for Conv2D here
parameters.append(paramdict)
return parameters
optimizer = optim.SGD(getParameters(net2.network), lr=0.05)
print(optimizer)
You can do that by passing the relevant parameters with associated learning rates.
optimizer = optim.SGD(
[
{"params": network.layer[0].parameters(), "lr": 1e-1},
{"params": network.layer[1].parameters(), "lr": 1e-2},
...
],
lr=1e-3,
)

'Sequential' object has no attribute 'features' while extracting vgg19 pytorch features

I'm trying to extract the features of images using VGG19 network (the output should be of dim : [1 , 7 , 7 , 512] per frame
here is the code I have used :
deep_net = models.vgg19(pretrained=True).cuda()
deep_net = nn.Sequential(*list(deep_net.children())[:-2])
deep_net.eval()
save_file_sample_path = '/media/data1/out.npy'
input_image = torch.zeros(1, 3, 224, 224)
output_feat = np.zeros(shape=[1, 49, 512])
with torch.no_grad():
im = default_loader('/media/data1/images/frame612.jpg')
im = transform(im)
input_image[0, :, :] = im
input_image = input_image.cuda()
output_feat = deep_net(input_image)
output_feat = output_feat.features[:-2].view(1, 512, 49).transpose(1, 2)
But I get the following error :
AttributeError: 'Sequential' object has no attribute 'features'
At the line :
output_feat = output_feat.features[:-2].view(1, 512, 49).transpose(1, 2)
Any idea why this does not work anymore? and how to fix?
Thanks!
It's because you are rebuilding deep_net with nn.Sequential so it loses the attribute features.
deep_net = models.vgg19(pretrained=True)
deep_net.features
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
...
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
deep_net = nn.Sequential(*list(deep_net.children())[:-2])
deep_net.features
AttributeError: 'Sequential' object has no attribute 'features'
The equivalent you want now is this:
list(deep_net.children())[0][:-2]

Keras: model.predict for a single image

I'd like to make a prediction for a single image with Keras. I've trained my model so I'm just loading the weights.
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
import cv2
# dimensions of our images.
img_width, img_height = 150, 150
def create_model():
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
img = cv2.imread('./test1/1.jpg')
model = create_model()
model.load_weights('./weight.h5')
model.predict(img)
I'm loading the image using:
img = cv2.imread('./test1/1.jpg')
And using the predict function of the model:
model.predict(img)
But I get the error:
ValueError: Error when checking : expected conv2d_1_input to have 4 dimensions, but got array with shape (499, 381, 3)
How should I proceed to have predictions on a single image ?
Since you trained your model on mini-batches, your input is a tensor of shape [batch_size, image_width, image_height, number_of_channels].
When predicting, you have to respect this shape even if you have only one image. Your input should be of shape: [1, image_width, image_height, number_of_channels].
You can do this in numpy easily. Let's say you have a single 5x5x3 image:
>>> x = np.random.randint(0,10,(5,5,3))
>>> x.shape
>>> (5, 5, 3)
>>> x = np.expand_dims(x, axis=0)
>>> x.shape
>>> (1, 5, 5, 3)
Now x is a rank 4 tensor!
Even though this doesn't solve your error, make sure and rescale your image if you have done that previously. For instance, my training generator looks like:
train_datagen = ImageDataGenerator(
rotation_range=40,
zoom_range=[0.7, 0.9],
horizontal_flip=True,
rescale=1./255
)
So when I go to predict a single image:
from PIL import Image
import numpy as np
from skimage import transform
def load(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (256, 256, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
image = load('my_file.jpg')
model.predict(image)
I have to also rescale it by 255.
You can load the image with desired width and height, convert it to a numpy array with the shape of (image_width, image_height, number_of_channels) and then change the shape of the array to (1, image_width, image_height, number_of_channels). (batch_size =1)
import numpy as np
from keras.preprocessing import image
img_width, img_height = 150, 150
img = image.load_img('image_path/image_name.jpg', target_size = (img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
model.predict(img)
single_test = model.predict(np.expand_dims(X_test[i], axis=0))
try:
model.predict(img[None,...])

Reshaping image data in Keras to match CNN requirements

I've created a CNN designed to recognize objects.
from keras.preprocessing.image import img_to_array, load_img
img = load_img('newimage.jpg')
x = img_to_array(img)
x = x.reshape( (1,) + x.shape )
scores = model.predict(x, verbose=1)
print(scores)
However I'm getting:
expected convolution2d_input_1 to have shape (None, 3, 108, 192) but got array with shape (1, 3, 192, 108)
My model:
def create_model():
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(3, img_width, img_height)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
I've looked at related answers and the documentation, but at a loss as to how to reshape the array to match what's expected?
I guess the problem is with setting up the image width and height. As the error says:
expected convolution2d_input_1 to have shape (None, 3, 108, 192) # expected width = 108 and height = 192
but got array with shape (1, 3, 192, 108) # width = 192, height = 108
Update: I tested your code with a small change and it worked!
I am giving just changed lines:
img_width, img_height = 960, 717
model.add(Convolution2D(32, 3, 3, input_shape=(img_height, img_width, 3)))
This is the main change - input_shape=(img_height, img_width, 3)
The image i used to run this code was of width = 960 and height = 717. I have updated my previous answer as some part of the answer was wrong! Sorry for that.

Keras ImageDataGenerator not working as expected

I'm trying to build an autoencoder using Keras, based on [this example][1] from the docs. Because my data is large, I'd like to use a generator to avoid loading it into memory.
My model looks like:
model = Sequential()
model.add(Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(3, 256, 256)))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Convolution2D(8, 3, 3, activation='relu', border_mode='same'))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Convolution2D(8, 3, 3, activation='relu', border_mode='same'))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Convolution2D(8, 3, 3, activation='relu', border_mode='same'))
model.add(UpSampling2D((2, 2)))
model.add(Convolution2D(8, 3, 3, activation='relu', border_mode='same'))
model.add(UpSampling2D((2, 2)))
model.add(Convolution2D(16, 3, 3, activation='relu'))
model.add(UpSampling2D((2, 2)))
model.add(Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same'))
model.compile(optimizer='adadelta', loss='binary_crossentropy')
My generator:
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('IMAGE DIRECTORY', color_mode='rgb', class_mode='binary', batch_size=32, target_size=(256, 256))
And then fitting the model:
model.fit_generator(
train_generator,
samples_per_epoch=1,
nb_epoch=1,
verbose=1,
)
I'm getting this error:
Exception: Error when checking model target: expected convolution2d_76 to have 4 dimensions, but got array with shape (32, 1)
That looks like the size of my batch rather than a sample. What am I doing wrong?
The error is most likely due to the class_mode='binary'. It makes the generator produce binary classes, so the output has shape (batch_size, 1), while your model produces a four dimensional output (since the last layer is a convolution).
I guess that you want your label to be the image itself. Based on the source of the flow_from_directory and the DirectoryIterator it uses, it is impossible to do by just changing the class_mode. A possible solution would be along the lines of:
train_generator_ = train_datagen.flow_from_directory('IMAGE DIRECTORY', color_mode='rgb', class_mode=None, batch_size=32, target_size=(256, 256))
def train_generator():
for x in train_iterator_:
yield x, x
Note that I set class_mode to None. It makes the generator to return just the image instead of tuple(image, label). I then define a new generator, that returns the image as both the input and the label.