CNN deep model using too much RAM - deep-learning

I'm building a CNN for dectection of chest x-ray images with pneumonia, the problem I got is that when training the usage of ram just blows up, I'm using a Kaggle Notebook and the ram usage slowly grows till the 30gb limit and then shuts down, even before printing a single epoch.
I tried passing the data from float64 to float32 but no mayor results, just the rate in which reaches the 30gb limits slows down.
I don't really know where's the problem because I've seen similar codes also trained for the same dataset in Kaggle Notebooks, so I know that the data itself isn't the problem. My image dataset are 5216 images for train and just 16 for validation. Here's the code
image_size = (128,128)
data_train = tf.keras.utils.image_dataset_from_directory(train_path, color_mode ='grayscale', image_size = image_size)
data_train = data_train.map(lambda x, y: (tf.cast(x, tf.float32), y))
data_train_iterator = data_train.as_numpy_iterator()
batch_data_train = data_train_iterator.next()
data_val = tf.keras.utils.image_dataset_from_directory(val_path,color_mode ='grayscale', image_size = image_size)
data_val = data_val.map(lambda x, y: (tf.cast(x, tf.float32), y))
data_val_iterator = data_val.as_numpy_iterator()
batch_data_val = data_val_iterator.next()
data_augmentation = ImageDataGenerator(
rescale = 1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=False,
zoom_range=0.2,
fill_mode='nearest'
)
data_aug_train = data_augmentation.flow_from_directory(train_path, target_size=image_size, batch_size=32, class_mode='categorical')
data_aug_val = data_augmentation.flow_from_directory(val_path, target_size=image_size, batch_size=32, class_mode='categorical')
def pneumonia_model():
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', padding="same", input_shape=(128,128,1)))
model.add(BatchNormalization())
model.add(Conv2D(16, (3, 3), padding="same", activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding="same", input_shape=(128,128,1)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), padding="same", activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), padding="same", activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (3, 3), dilation_rate=(2, 2), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(96, (3, 3), padding="valid", activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), dilation_rate=(2, 2), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), padding="valid", activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(2 , activation='softmax'))
print(model.summary())
return model
model = pneumonia_model()
model.compile('adam', loss=tf.losses.BinaryCrossentropy(), metrics=['accuracy'])
hist = model.fit(data_aug_train,epochs=20,validation_data = tuple(data_aug_val))`
`

Related

TimeDistributedImageDataGenerator for many-to-one segmentation problem

I'm trying to implement this segmentation problem.
[https://user-images.githubusercontent.com/91024790/178153192-040ab44c-7b9f-4cfd-8e11-a3cdca2070e9.png][1]
My dataset is composed by images and masks.
In order to create a sequences of images and fed them into the network I used TimeDistributedImageDataGenerator ( https://github.com/kivijoshi/TimeDistributedImageDataGenerator/blob/master/TimeDistributedImageDataGenerator/TimeDistributedImageDataGenerator.py)
Here attached my code:
'''
seed=42
from keras.preprocessing.image import ImageDataGenerator
img_data_gen_args = dict(rescale=1./255,
rotation_range=90,
zoom_range=0.2,
brightness_range=[0.3,0.9],
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.5,
time_steps=3,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant')
mask_data_gen_args = dict(
rotation_range=90,
zoom_range=0.2,
brightness_range=[0.3,0.9],
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.5,
time_steps=1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant',
preprocessing_function = lambda x: np.where(x>0, 1, 0).astype(x.dtype)
) #Binarize the output again.
image_data_generator = TimeDistributedImageDataGenerator(**img_data_gen_args)
mask_data_generator = TimeDistributedImageDataGenerator(**mask_data_gen_args)
image_generator = image_data_generator.flow_from_directory(train_img_path,
seed=seed,
batch_size=batch_size,
color_mode = 'grayscale',
target_size=(256,256),
class_mode=None) #Very important to set this otherwise it returns multiple numpy arrays
#thinking class mode is binary.
mask_generator = mask_data_generator.flow_from_directory(train_mask_path,
seed=seed,
batch_size=batch_size,
color_mode = 'grayscale',
target_size=(256,256) , #Read masks in grayscale
class_mode=None)
valid_img_generator = image_data_generator.flow_from_directory(val_img_path,
seed=seed,
batch_size=batch_size,
color_mode = 'grayscale',
target_size=(256,256),
class_mode=None) #Default batch size 32, if not specified here
valid_mask_generator = mask_data_generator.flow_from_directory(val_mask_path,
seed=seed,
batch_size=batch_size,
target_size=(256,256),
color_mode = 'grayscale', #Read masks in grayscale
class_mode=None) #Default batch size 32, if not specified here
train_generator = zip(image_generator, mask_generator)
val_generator = zip(valid_img_generator, valid_mask_generator)
I used time_steps=3 for image_generator and time_steps=1 for mask_generator since i would predict just the last image of a sequence of three images ( as the image suggest).
Now my image generator has (3,3,256,256,1) as shape while mask generator (3,1,256,256,1) where the first dimension is the batch size the second one the time_steps and the last three are width, height and channels.
Then i built my segmentation model:
input_l = layers.Input(shape=(input_shape))
x = (layers.TimeDistributed(layers.Conv2D( 64, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu',kernel_initializer='he_normal' ) )) (input_l)
conv2 = layers.TimeDistributed( layers.Conv2D( 64, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu' ,kernel_initializer='he_normal' ) ) (x)
x=layers.TimeDistributed(layers.MaxPooling2D(pool_size=(2,2)))(conv2)
x = layers.TimeDistributed( layers.Conv2D( 128, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu' ,kernel_initializer='he_normal' ) ) (x)
conv5 = layers.TimeDistributed( layers.Conv2D( 128, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu' ,kernel_initializer='he_normal') ) (x)
x=layers.TimeDistributed(layers.MaxPooling2D(pool_size=(2,2)))(conv5)
x = layers.TimeDistributed( layers.Conv2D( 256, kernel_size=(3, 3),padding='same',strides=(1,1) ,activation='relu' ,kernel_initializer='he_normal' ) ) (x)
conv8 = layers.TimeDistributed( layers.Conv2D( 256, kernel_size=(3, 3),padding='same',strides=(1,1) ,activation='relu',kernel_initializer='he_normal' ) ) (x)
x=layers.TimeDistributed(layers.MaxPooling2D(pool_size=(2,2)))(conv8)
x=layers.Bidirectional(layers.ConvLSTM2D(256,kernel_size=(3,3),padding='same',strides=(1,1),return_sequences=True,recurrent_dropout=0.2))(x)
up1 = layers.TimeDistributed( layers.Conv2DTranspose( 512,kernel_size=(3,3),padding='same',strides=(2,2)))(x)
concat1 = layers.concatenate([up1, conv8])
x = layers.TimeDistributed( layers.Conv2D( 256, kernel_size=(3, 3),padding='same',strides=(1,1) ,activation='relu' ,kernel_initializer='he_normal' ) ) (concat1)
x = layers.TimeDistributed( layers.Conv2D( 256, kernel_size=(3, 3),padding='same',strides=(1,1) ,activation='relu' ,kernel_initializer='he_normal' ) ) (x)
up2 = layers.TimeDistributed( layers.Conv2DTranspose( 256,kernel_size=(3,3),padding='same',strides=(2,2)))(x)
concat2 = layers.concatenate([up2, conv5])
x = layers.TimeDistributed( layers.Conv2D( 128, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu',kernel_initializer='he_normal' ) ) (concat2)
x = layers.TimeDistributed( layers.Conv2D( 128, kernel_size=(3, 3),padding='same',strides=(1,1) ,activation='relu',kernel_initializer='he_normal' ) ) (x)
up3 = layers.TimeDistributed( layers.Conv2DTranspose( 128,kernel_size=(3,3),padding='same',strides=(2,2)))(x)
concat3 = layers.concatenate([up3, conv2])
x = layers.TimeDistributed( layers.Conv2D( 64, kernel_size=(3, 3),padding='same',strides=(1,1),activation='relu' ,kernel_initializer='he_normal' ) ) (concat3)
x=layers.Bidirectional(layers.ConvLSTM2D(32,kernel_size=(3,3),padding='same',strides=(1,1),return_sequences=False,recurrent_dropout=0.2))(x)
x=tf.expand_dims(x,axis=1)
out = layers.Conv2D( 1, kernel_size=(1, 1),padding='same',strides=(1,1), activation='sigmoid' ) (x)
model = models.Model(inputs=input_l, outputs=out)
model.summary()
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 3, 256, 256 0 []
, 1)]
time_distributed_17 (TimeDistr (None, 3, 256, 256, 640 ['input_2[0][0]']
ibuted) 64)
time_distributed_18 (TimeDistr (None, 3, 256, 256, 36928 ['time_distributed_17[0][0]']
ibuted) 64)
time_distributed_19 (TimeDistr (None, 3, 128, 128, 0 ['time_distributed_18[0][0]']
ibuted) 64)
time_distributed_20 (TimeDistr (None, 3, 128, 128, 73856 ['time_distributed_19[0][0]']
ibuted) 128)
time_distributed_21 (TimeDistr (None, 3, 128, 128, 147584 ['time_distributed_20[0][0]']
ibuted) 128)
time_distributed_22 (TimeDistr (None, 3, 64, 64, 1 0 ['time_distributed_21[0][0]']
ibuted) 28)
time_distributed_23 (TimeDistr (None, 3, 64, 64, 2 295168 ['time_distributed_22[0][0]']
ibuted) 56)
time_distributed_24 (TimeDistr (None, 3, 64, 64, 2 590080 ['time_distributed_23[0][0]']
ibuted) 56)
time_distributed_25 (TimeDistr (None, 3, 32, 32, 2 0 ['time_distributed_24[0][0]']
ibuted) 56)
bidirectional_2 (Bidirectional (None, 3, 32, 32, 5 9439232 ['time_distributed_25[0][0]']
) 12)
time_distributed_26 (TimeDistr (None, 3, 64, 64, 5 2359808 ['bidirectional_2[0][0]']
ibuted) 12)
concatenate_3 (Concatenate) (None, 3, 64, 64, 7 0 ['time_distributed_26[0][0]',
68) 'time_distributed_24[0][0]']
time_distributed_27 (TimeDistr (None, 3, 64, 64, 2 1769728 ['concatenate_3[0][0]']
ibuted) 56)
time_distributed_28 (TimeDistr (None, 3, 64, 64, 2 590080 ['time_distributed_27[0][0]']
ibuted) 56)
time_distributed_29 (TimeDistr (None, 3, 128, 128, 590080 ['time_distributed_28[0][0]']
ibuted) 256)
concatenate_4 (Concatenate) (None, 3, 128, 128, 0 ['time_distributed_29[0][0]',
384) 'time_distributed_21[0][0]']
time_distributed_30 (TimeDistr (None, 3, 128, 128, 442496 ['concatenate_4[0][0]']
ibuted) 128)
time_distributed_31 (TimeDistr (None, 3, 128, 128, 147584 ['time_distributed_30[0][0]']
ibuted) 128)
time_distributed_32 (TimeDistr (None, 3, 256, 256, 147584 ['time_distributed_31[0][0]']
ibuted) 128)
concatenate_5 (Concatenate) (None, 3, 256, 256, 0 ['time_distributed_32[0][0]',
192) 'time_distributed_18[0][0]']
time_distributed_33 (TimeDistr (None, 3, 256, 256, 110656 ['concatenate_5[0][0]']
ibuted) 64)
bidirectional_3 (Bidirectional (None, 256, 256, 64 221440 ['time_distributed_33[0][0]']
) )
tf.expand_dims_1 (TFOpLambda) (None, 1, 256, 256, 0 ['bidirectional_3[0][0]']
64)
conv2d_23 (Conv2D) (None, 1, 256, 256, 65 ['tf.expand_dims_1[0][0]']
1)
==================================================================================================
Total params: 16,963,009
Trainable params: 16,963,009
Non-trainable params: 0
Everything works,however dice coefficient is very low. I think that the main problem is a mismatch between masks and images.
it is possible that not having the same length of sequences the images and masks do not match
Is it possible that having different lenght of sequences results in a mismatch between images and masks? Any ideas? Thank you in advance

PyTorch - RuntimeError: Sizes of tensors must match except in dimension 2. Got 55 and 54 (The offending index is 0)

I used a 3DUnet with resblock to segment a CT image with input torch size of [1, 1, 96, 176, 176], but it throws the following error:
RuntimeError: Sizes of tensors must match except in dimension 2. Got 55 and 54 (The offending index is 0)
Hence I traced back, I found the error comes from
outputs = self.decoder_stage2(torch.cat([short_range6, long_range3], dim=1)) + short_range6
The short_range6 has torch.Size([1, 64, 24, 55, 40]) while the long_range3 has torch.Size([1, 128, 24, 54, 40]). I think this is because something not being a power of 2, but cannot find where to modify.
Below is the complete structure of the network, really thanks for any help!
class ResUNet(nn.Module):
def __init__(self, in_channel=1, out_channel=2 ,training=True):
super().__init__()
self.training = training
self.dorp_rate = 0.2
self.encoder_stage1 = nn.Sequential(
nn.Conv3d(in_channel, 16, 3, 1, padding=1),
nn.PReLU(16),
nn.Conv3d(16, 16, 3, 1, padding=1),
nn.PReLU(16),
)
self.encoder_stage2 = nn.Sequential(
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
)
self.encoder_stage3 = nn.Sequential(
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=2, dilation=2),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=4, dilation=4),
nn.PReLU(64),
)
self.encoder_stage4 = nn.Sequential(
nn.Conv3d(128, 128, 3, 1, padding=3, dilation=3),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=4, dilation=4),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=5, dilation=5),
nn.PReLU(128),
)
self.decoder_stage1 = nn.Sequential(
nn.Conv3d(128, 256, 3, 1, padding=1),
nn.PReLU(256),
nn.Conv3d(256, 256, 3, 1, padding=1),
nn.PReLU(256),
nn.Conv3d(256, 256, 3, 1, padding=1),
nn.PReLU(256),
)
self.decoder_stage2 = nn.Sequential(
nn.Conv3d(128 + 64, 128, 3, 1, padding=1),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=1),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=1),
nn.PReLU(128),
)
self.decoder_stage3 = nn.Sequential(
nn.Conv3d(64 + 32, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
)
self.decoder_stage4 = nn.Sequential(
nn.Conv3d(32 + 16, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
)
self.down_conv1 = nn.Sequential(
nn.Conv3d(16, 32, 2, 2),
nn.PReLU(32)
)
self.down_conv2 = nn.Sequential(
nn.Conv3d(32, 64, 2, 2),
nn.PReLU(64)
)
self.down_conv3 = nn.Sequential(
nn.Conv3d(64, 128, 2, 2),
nn.PReLU(128)
)
self.down_conv4 = nn.Sequential(
nn.Conv3d(128, 256, 3, 1, padding=1),
nn.PReLU(256)
)
self.up_conv2 = nn.Sequential(
nn.ConvTranspose3d(256, 128, 2, 2),
nn.PReLU(128)
)
self.up_conv3 = nn.Sequential(
nn.ConvTranspose3d(128, 64, 2, 2),
nn.PReLU(64)
)
self.up_conv4 = nn.Sequential(
nn.ConvTranspose3d(64, 32, 2, 2),
nn.PReLU(32)
)
# 256*256
self.map4 = nn.Sequential(
nn.Conv3d(32, out_channel, 1, 1),
nn.Upsample(scale_factor=(1, 1, 1), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 128*128
self.map3 = nn.Sequential(
nn.Conv3d(64, out_channel, 1, 1),
nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 64*64
self.map2 = nn.Sequential(
nn.Conv3d(128, out_channel, 1, 1),
nn.Upsample(scale_factor=(4, 4, 4), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 32*32
self.map1 = nn.Sequential(
nn.Conv3d(256, out_channel, 1, 1),
nn.Upsample(scale_factor=(8, 8, 8), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
def forward(self, inputs):
long_range1 = self.encoder_stage1(inputs) + inputs
short_range1 = self.down_conv1(long_range1)
long_range2 = self.encoder_stage2(short_range1) + short_range1
long_range2 = F.dropout(long_range2, self.dorp_rate, self.training)
short_range2 = self.down_conv2(long_range2)
long_range3 = self.encoder_stage3(short_range2) + short_range2
long_range3 = F.dropout(long_range3, self.dorp_rate, self.training)
short_range3 = self.down_conv3(long_range3)
long_range4 = self.encoder_stage4(short_range3) + short_range3
long_range4 = F.dropout(long_range4, self.dorp_rate, self.training)
short_range4 = self.down_conv4(long_range4)
outputs = self.decoder_stage1(long_range4) + short_range4
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output1 = self.map1(outputs)
short_range6 = self.up_conv2(outputs)
outputs = self.decoder_stage2(torch.cat([short_range6, long_range3], dim=1)) + short_range6
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output2 = self.map2(outputs)
short_range7 = self.up_conv3(outputs)
outputs = self.decoder_stage3(torch.cat([short_range7, long_range2], dim=1)) + short_range7
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output3 = self.map3(outputs)
short_range8 = self.up_conv4(outputs)
outputs = self.decoder_stage4(torch.cat([short_range8, long_range1], dim=1)) + short_range8
output4 = self.map4(outputs)
if self.training is True:
return output1, output2, output3, output4
else:
return output4```
You can pad your image's dimensions to be multiple of 32's. By doing this, you won't have to change the 3DUnet's parameters.
I will provide you a simple code to show you the way.
# I assume that you named your input image as img
padding1_mult = math.floor(img.shape[3] / 32) + 1
padding2_mult = math.floor(img.shape[4] / 32) + 1
pad1 = (32 * padding1_mult) - img.shape[3]
pad2 = (32 * padding2_mult) - img.shape[4]
padding = nn.ReplicationPad2d((0, pad2, pad1, 0, 0 ,0))
img = padding(img)
After this operation, your image shape must be torch.Size([1, 1, 96, 192, 192])

Errow Showing :'ResNet' object has no attribute 'classifier'

I download Resnet18 model to train a model.
When I type
model
it shows
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=512, out_features=1000, bias=True)
(classifer): Sequential(
(fc1): Linear(in_features=512, out_features=256, bias=True)
(relu): ReLU()
(fc5): Linear(in_features=128, out_features=2, bias=True)
(output): LogSoftmax()
)
)
As you can see it clearly shows the classifier
but when I do
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
it shows an error
AttributeError: 'ResNet' object has no attribute 'classifier'
I don't know what mistake I am doing, if you can help that would be great. I can provide some extra details if you want.
Remove classifier and keep it model.parameters() only.
optimizer = optim.Adam(model.parameters(), lr=0.001)
To construct an Optimizer you have to give it an iterable containing the parameters to optimize.
Assuming you want to train only the classifier, you can freeze the parameters you don't want to change.
For your case, you can do
for name, param in model.named_parameters() :
param.requires_grad = False
if name.startswith('classifier') :
param.requires_grad = True
This will freeze all the parameters except of the classifier.
And then you can do what the other answer suggests i.e. pass all parameters to the optimizer.
optimizer = optim.Adam(model.parameters(), lr=0.001)

caffe: What is ReLU split

Here is my blob shape:
data 4096 4.10e+03 (1, 2, 1, 2048)
Convolution1 130944 1.31e+05 (1, 64, 1, 2046)
ReLU1 130944 1.31e+05 (1, 64, 1, 2046)
Convolution2 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2_ReLU2_0_split_0 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2_ReLU2_0_split_1 130816 1.31e+05 (1, 64, 1, 2044)
Pooling1 65408 6.54e+04 (1, 64, 1, 1022)
Convolution3 130560 1.31e+05 (1, 128, 1, 1020)
ReLU3 130560 1.31e+05 (1, 128, 1, 1020)
Convolution4 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4_ReLU4_0_split_0 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4_ReLU4_0_split_1 130304 1.30e+05 (1, 128, 1, 1018)
Pooling2 65152 6.52e+04 (1, 128, 1, 509)
What is 2 lines of "ReLU2_0_split_0" and "ReLU2_ReLU2_0_split_1"? where they come from?
Your ReLU layer's output is used as a "bottom" for two layers. Therefore, Caffe automatically adds a "Split" layer that creates two copies of the ReLU output and feed each copy to one of the top layers. These two copies are named ReLU_split0 and ReLU_split1.

Keras with Theano: Loss decrease but accuracy not changing

This is my code. I tried to build a VGG 11 layers network, with a mix of ReLu and ELu activation and many regularizations on kernels and activities. The result is really confusing: The code is at 10th epoch. My loss on both train and val have decreased from 2000 to 1.5, but my acc on both train and val remained the same at 50%. Can somebody explain to me?
# VGG 11
from keras.regularizers import l2
from keras.layers.advanced_activations import ELU
from keras.optimizers import Adam
model = Sequential()
model.add(Conv2D(64, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
input_shape=(1, 96, 96), activation='relu'))
model.add(Conv2D(64, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001),activity_regularizer=l2(0.0001),
activation='relu'))
model.add(Conv2D(128, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(Conv2D(256, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(Conv2D(512, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(Conv2D(512, (3, 3), kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.0001),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# convert convolutional filters to flat so they can be feed to fully connected layers
model.add(Flatten())
model.add(Dense(2048, kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.01)))
model.add(ELU(alpha=1.0))
model.add(Dropout(0.5))
model.add(Dense(1024, kernel_initializer='he_normal',
kernel_regularizer=l2(0.0001), activity_regularizer=l2(0.01)))
model.add(ELU(alpha=1.0))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
adammo = Adam(lr=0.0008, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adammo, metrics=['accuracy'])
hist = model.fit(X_train, y_train, batch_size=48, epochs=20, verbose=1, validation_data=(X_val, y_val))
This is not a defect, in fact, it is entirely possible!
Categorical cross entropy loss does not require that accuracy go up with the loss decreasing. This is not a bug in keras or theano, but rather a network or data problem.
This network structure is probably over-complicated for what you might be trying to do. You should remove some of your regularization, use only ReLu, use less layers, use the standard adam optimizer, a larger batch, etc. Try first using one of keras' default models like VGG16,
If you want to see their implementation to edit it for a different VGG11 like structure. It is here:
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
You can see it is much more simple. It only uses rely (which has gotten popular these days) has no regularization, different convolution structure, etc. Modify that to your needs!