this is my code fo rthe instance segmentation on some custom dataset using tensor flow
i am creating the model
import tensorflow as tf
# Define the model architecture
model = tf.keras.models.Sequential()
# Add the convolutional layers
model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=(224, 224, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(64, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(128, (3, 3), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(128, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
# Flatten the output and add the fully connected layers
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
# Reshape the output of the fully connected layer to have a
model.add(tf.keras.layers.Reshape((1, 1, 512)))
# Add a convolutional layer with a kernel size of 1x1 and sigmoid activation
model.add(tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid'))
# Flatten the mask to a 2D array
model.add(tf.keras.layers.Reshape((224*224,1)))
i try compiling it but it is givining an error what is that i am doing wrong. i have little knowledge of computervision but i am trying to learn more an more please help me here the error is the flatenning layer
ValueError Traceback (most recent call last)
<ipython-input-8-ad4d85d788ab> in <module>
40
41 # Flatten the mask to a 2D array
---> 42 model.add(tf.keras.layers.Reshape((224*224,1)))
43 # Compile the model with the Adam optimizer and
44 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
528 self._self_setattr_tracking = False # pylint: disable=protected-access
529 try:
--> 530 result = method(self, *args, **kwargs)
531 finally:
532 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\engine\sequential.py in add(self, layer)
215 # If the model is being built continuously on top of an input layer:
216 # refresh its output.
--> 217 output_tensor = layer(self.outputs[0])
218 if len(tf.nest.flatten(output_tensor)) != 1:
219 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
975 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
976 return self._functional_construction_call(inputs, args, kwargs,
--> 977 input_list)
978
979 # Maintains info about the `Layer.call` stack.
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\engine\base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1113 # Check input assumptions set after layer building, e.g. input shape.
1114 outputs = self._keras_tensor_symbolic_call(
-> 1115 inputs, input_masks, args, kwargs)
1116
1117 if outputs is None:
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\engine\base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
846 return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
847 else:
--> 848 return self._infer_output_signature(inputs, args, kwargs, input_masks)
849
850 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\engine\base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
886 self._maybe_build(inputs)
887 inputs = self._maybe_cast_inputs(inputs)
--> 888 outputs = call_fn(inputs, *args, **kwargs)
889
890 self._handle_activity_regularization(inputs, outputs)
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\layers\core.py in call(self, inputs)
537 # Set the static shape for the result since it might lost during array_ops
538 # reshape, eg, some `None` dim in the result could be inferred.
--> 539 result.set_shape(self.compute_output_shape(inputs.shape))
540 return result
541
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\layers\core.py in compute_output_shape(self, input_shape)
528 output_shape = [input_shape[0]]
529 output_shape += self._fix_unknown_dimension(input_shape[1:],
--> 530 self.target_shape)
531 return tf.TensorShape(output_shape)
532
D:\Anaconda\envs\tensorflow\lib\site-packages\keras\layers\core.py in _fix_unknown_dimension(self, input_shape, output_shape)
516 output_shape[unknown] = original // known
517 elif original != known:
--> 518 raise ValueError(msg)
519 return output_shape
520
ValueError: total size of new array must be unchanged, input_shape = [1, 1, 1], output_shape = [50176, 1]
Related
I'm trying to create the base pre-trained model
I did use the following code:
base_model = DenseNet121(weights='/Users/awabe/Desktop/Project/PapilaDB/ClinicalData/densenet121_weights_tf_dim_ordering_tf_kernels.h5', include_top=False)
x = base_model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(x)
# and a logistic layer
predictions = Dense(len(labels), activation="sigmoid")(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='adam', loss=get_weighted_loss(pos_weights, neg_weights))
It gives me an error with message:
`` ValueError Traceback (most recent call last)
Cell In[73], line 2
1 # create the base pre-trained model
----> 2 base_model = DenseNet121(weights='/Users/awabe/Desktop/Project/PapilaDB/ClinicalData/densenet121_weights_tf_dim_ordering_tf_kernels.h5', include_top=False)
4 x = base_model.output
6 # add a global spatial average pooling layer
File /opt/anaconda3/envs/tensorflow/lib/python3.10/site-packages/keras/applications/densenet.py:358, in DenseNet121(include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation)
345 #keras_export(
346 "keras.applications.densenet.DenseNet121", "keras.applications.DenseNet121"
347 )
(...)
355 classifier_activation="softmax",
356 ):
357 """Instantiates the Densenet121 architecture."""
--> 358 return DenseNet(
359 [6, 12, 24, 16],
360 include_top,
361 weights,
362 input_tensor,
363 input_shape,
364 pooling,
365 classes,
366 classifier_activation,
367 )
File /opt/anaconda3/envs/tensorflow/lib/python3.10/site-packages/keras/applications/densenet.py:340, in DenseNet(blocks, include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation)
338 model.load_weights(weights_path)
339 elif weights is not None:
--> 340 model.load_weights(weights)
342 return model
File /opt/anaconda3/envs/tensorflow/lib/python3.10/site-packages/keras/utils/traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.traceback)
68 # To get the full stack trace, call:
69 # tf.debugging.disable_traceback_filtering()
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File /opt/anaconda3/envs/tensorflow/lib/python3.10/site-packages/keras/saving/hdf5_format.py:817, in load_weights_from_hdf5_group(f, model)
815 layer_names = filtered_layer_names
816 if len(layer_names) != len(filtered_layers):
--> 817 raise ValueError(
818 f"Layer count mismatch when loading weights from file. "
819 f"Model expected {len(filtered_layers)} layers, found "
820 f"{len(layer_names)} saved layers."
821 )
823 # We batch weight value assignments in a single backend call
824 # which provides a speedup in TensorFlow.
825 weight_value_tuples = []
ValueError: Layer count mismatch when loading weights from file. Model expected 241 layers, found 242 saved layers. ``
In this model I am receiving a runtime error of
RuntimeError: Given groups=1, weight of size [32, 4, 3, 3], expected input[8, 3, 320, 320] to have 4 channels, but got 3 channels instead
THe other issue is I would like the segmentation to display color and not grayscale.
Install Segmentation Models, Albumnntation, Opencv Contrib
!pip install segmentation-models-pytorch
!pip install -U git+https://github.com/albumentations-team/albumentations
!pip install --upgrade opencv-contrib-python
Import os and set path directory
import os, sys, torch
sys.path.append('/data/users/mtdata/fastai_seg/RUGD_files')
Print the versions
print('python v. : ', sys.version)
print('pytorch v. :', torch.__version__)
print('cuda v. :', torch.version.cuda)
python v. : 3.6.8 (default, Aug 13 2020, 07:46:32)
[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]
pytorch v. : 1.10.1+cu102
cuda v. : 10.2
import the necessary toolkits
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import helper
System configurations
CSV_FILE = 'files_path.csv'
DATA_DIR = '/fastai/RUGD_files'
DEVICE = 'cuda'
EPOCHS = 25
LR = 0.003
IMAGE_SIZE = 320
BATCH_SIZE = 8
ENCODER = 'timm-efficientnet-b1'
WEIGHTS = 'imagenet'
look at the data in the csv file
df = pd.read_csv(CSV_FILE)
df.head()
view of filecsv info
row = df.iloc[0]
image_path = row.images
mask_path = row.masks
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
view the images and labels
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
ax1.set_title('IMAGE')
ax1.imshow(image)
ax2.set_title('GROUND_TRUTH')
ax2.imshow(mask, cmap = 'gray')
image and label
[image and lable][2]
split the training set
# Split dataset to train and valid set
train_df, valid_df = train_test_split(df, test_size = 0.2, random_state = 42)
print the size of the training set
print(len(train_df))
print(len(valid_df))
5943
1486
import albumentations as A
import albumentations as A
Perform augmentation
def get_train_augs():
return A.Compose([
A.Resize(IMAGE_SIZE, IMAGE_SIZE),
A.HorizontalFlip(p = 0.5),
A.VerticalFlip(p = 0.5)
])
def get_valid_augs():
return A.Compose([
A.Resize(IMAGE_SIZE, IMAGE_SIZE)
])
Create a Custom Dataset
import torch utils
from torch.utils.data import Dataset
build the SegmentationDataset class
class SegmentationDataset(Dataset):
def __init__(self, df, augmentations):
self.df = df
self.augmentations = augmentations
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.iloc[idx]
image_path = row.images
mask_path = row.masks
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#mask = cv2.imread(mask_path)
#mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) # this have 2 dim (h,w)
# we need add new dim (h, w, c)
mask = np.expand_dims(mask, axis= -1)
if self.augmentations:
data = self.augmentations(image = image, mask = mask)
image = data['image']
mask = data['mask']
#(h, w, c) -> (c, h, w)
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
mask = np.transpose(mask, (2, 0, 1)).astype(np.float32)
# convert from np to tensor
image = torch.Tensor(image) / 255.0
mask = torch.round(torch.Tensor(mask) / 255.0)
return image, mask
Build the trainset and validset
trainset = SegmentationDataset(train_df, get_train_augs())
validset = SegmentationDataset(valid_df, get_valid_augs()
View the size
print(f"Size of Trainset : {len(trainset)}")
print(f"Size of Validset : {len(validset)}")
)
Size of Trainset : 5943
Size of Validset : 1486
Show the image and the mask
idx = 21
image, mask = trainset[idx]
helper.show_image(image, mask)
Image and mask
from torch.utils.data import DataLoader
create batches and dataloader
trainloader = DataLoader(trainset, batch_size= BATCH_SIZE, shuffle= True)
validloader = DataLoader(validset, batch_size= BATCH_SIZE)
print(f"Total number of batches in trainloader : {len(trainloader)}")
print (f"Total number of batches in validloader : {len(validloader)}")
Total number of batches in trainloader : 743
Total number of batches in validloader : 186
for image, mask in trainloader:
break
print(f"One batch image shape : {image.shape}")
print(f"One batch image shape : {mask.shape}")
One batch image shape : torch.Size([8, 3, 320, 320])
One batch image shape : torch.Size([8, 1, 320, 320])
Create Segmentation Model
from torch import nn
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.losses import DiceLoss
Define the SegmentationModel class
class SegmentationModel(nn.Module):
def __init__(self):
super(SegmentationModel, self).__init__()
self.arc = smp.Unet(
encoder_name= ENCODER,
encoder_weights= WEIGHTS,
in_channels= 3,
classes= 25,
activation= None
)
def forward(self, images, masks = None):
logits = self.arc(images)
if masks != None:
loss1 = DiceLoss(mode= 'binary')(logits, masks)
loss2 = nn.BCEWithLogitsLoss()(logits, masks)
return logits, loss1 + loss2
return logits
define model
model = SegmentationModel()
model.to(DEVICE);
Create Train and Validation Function
# In this task we create the train and validation functions to use in the training and validation model
def train_fn(data_loader, model, optimizer):
model.train()
total_loss = 0.0
for images, masks in tqdm(data_loader):
images = images.to(DEVICE)
masks = masks.to(DEVICE)
optimizer.zero_grad()
logits, loss = model(images, masks)
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(data_loader)
Define eval function
# difference in code above, we remove optimizer for no optimization
def eval_fn(data_loader, model):
model.eval()
total_loss = 0.0
with torch.no_grad():
for images, masks in tqdm(data_loader):
images = images.to(DEVICE)
masks = masks.to(DEVICE)
logits, loss = model(images, masks)
total_loss += loss.item()
return total_loss / len(data_loader)
Define the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr = LR)
Train the model
from pandas.core.algorithms import mode
best_valid_loss = np.Inf
for i in range(EPOCHS):
train_loss = train_fn(trainloader, model, optimizer)
valid_loss = eval_fn(validloader, model)
if valid_loss < best_valid_loss:
torch.save(model.state_dict(), 'best_model.pt')
print("saved model")
best_valid_loss = valid_loss
print(f"Epoch : {i+1} Train_loss : {train_loss} valid_loss : {valid_loss}")
THis is the error message
0%| | 0/743 [00:00<?, ?it/s]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-29-708661468242> in <module>
4 for i in range(EPOCHS):
5
----> 6 train_loss = train_fn(trainloader, model, optimizer)
7 valid_loss = eval_fn(validloader, model)
8
<ipython-input-26-f0794e25b184> in train_fn(data_loader, model, optimizer)
11
12 optimizer.zero_grad()
---> 13 logits, loss = model(images, masks)
14 loss.backward()
15 optimizer.step()
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-24-acd2753da2f8> in forward(self, images, masks)
13 def forward(self, images, masks = None):
14
---> 15 logits = self.arc(images)
16
17 if masks != None:
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/.local/lib/python3.6/site-packages/segmentation_models_pytorch/base/model.py in forward(self, x)
13 def forward(self, x):
14 """Sequentially pass `x` trough model`s encoder, decoder and heads"""
---> 15 features = self.encoder(x)
16 decoder_output = self.decoder(*features)
17
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/.local/lib/python3.6/site-packages/segmentation_models_pytorch/encoders/timm_efficientnet.py in forward(self, x)
117 features = []
118 for i in range(self._depth + 1):
--> 119 x = stages[i](x)
120 features.append(x)
121
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/.local/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
139 def forward(self, input):
140 for module in self:
--> 141 input = module(input)
142 return input
143
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/.local/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
444
445 def forward(self, input: Tensor) -> Tensor:
--> 446 return self._conv_forward(input, self.weight, self.bias)
447
448 class Conv3d(_ConvNd):
~/.local/lib/python3.6/site-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias)
441 _pair(0), self.dilation, self.groups)
442 return F.conv2d(input, weight, bias, self.stride,
--> 443 self.padding, self.dilation, self.groups)
444
445 def forward(self, input: Tensor) -> Tensor:
RuntimeError: Given groups=1, weight of size [32, 4, 3, 3], expected input[8, 3, 320, 320] to have 4 channels, but got 3 channels instead
in a cat and dog classification problem
I got an error in
cnn.fit(training_set, validation_data = test_set, batch_size=32, epochs = 30)
TypeError Traceback (most recent call last)
<ipython-input-20-ee0b03f0e8d6> in <module>
1 # training training set and evaluate test set
----> 2 cnn.fit(training_set, validation_data = test_set, batch_size=32, epochs = 30)
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1145 use_multiprocessing=use_multiprocessing,
1146 model=self,
-> 1147 steps_per_execution=self._steps_per_execution)
1148
1149 # Container that configures and calls `tf.keras.Callback`s.
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in get_data_handler(*args, **kwargs)
1362 if getattr(kwargs["model"], "_cluster_coordinator", None):
1363 return _ClusterCoordinatorDataHandler(*args, **kwargs)
-> 1364 return DataHandler(*args, **kwargs)
1365
1366
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution, distribute)
1164 use_multiprocessing=use_multiprocessing,
1165 distribution_strategy=ds_context.get_strategy(),
-> 1166 model=model)
1167
1168 strategy = ds_context.get_strategy()
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
937 max_queue_size=max_queue_size,
938 model=model,
--> 939 **kwargs)
940
941 #staticmethod
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, workers, use_multiprocessing, max_queue_size, model, **kwargs)
807 # Since we have to know the dtype of the python generator when we build the
808 # dataset, we have to look at a batch to infer the structure.
--> 809 peek, x = self._peek_and_restore(x)
810 peek = self._standardize_batch(peek)
811 peek = _process_tensorlike(peek)
~\anaconda\envs\TF\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in _peek_and_restore(x)
941 #staticmethod
942 def _peek_and_restore(x):
--> 943 return x[0], x
944
945 def _handle_multiprocessing(self, x, workers, use_multiprocessing,
~\anaconda\envs\TF\lib\site-packages\keras_preprocessing\image\iterator.py in __getitem__(self, idx)
63 index_array = self.index_array[self.batch_size * idx:
64 self.batch_size * (idx + 1)]
---> 65 return self._get_batches_of_transformed_samples(index_array)
66
67 def __len__(self):
~\anaconda\envs\TF\lib\site-packages\keras_preprocessing\image\iterator.py in _get_batches_of_transformed_samples(self, index_array)
229 target_size=self.target_size,
230 interpolation=self.interpolation)
--> 231 x = img_to_array(img, data_format=self.data_format)
232 # Pillow images should be closed after `load_img`,
233 # but not PIL images.
~\anaconda\envs\TF\lib\site-packages\keras_preprocessing\image\utils.py in img_to_array(img, data_format, dtype)
307 # or (channel, height, width)
308 # but original PIL image has format (width, height, channel)
--> 309 x = np.asarray(img, dtype=dtype)
310 if len(x.shape) == 3:
311 if data_format == 'channels_first':
~\anaconda\envs\TF\lib\site-packages\numpy\core\_asarray.py in asarray(a, dtype, order)
81 UPDATEIFCOPY : False
82
---> 83 >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
84 >>> y.flags
85 C_CONTIGUOUS : False
TypeError: __array__() takes 1 positional argument but 2 were given
I think there is some problem with numpy or with pillow but I am not sure, I am using tensorflow = 2.5.0 and numpy = 1.21.0, cuda = 11.0 please check if I am using the right version or there can be problem in cuda version
I am having he same question, my env is cuda 10.1 tensorflow 1.13.1 numpy 1.21.0, I think maybe something wrong with numpy, However I tested it , but not right. I search again and found maybe there is something wrong with pillow, I fixed it by pip install pillow==8.2.0
seems somethings wrong with pillow pack
Hello below is the pytorch model I am trying to run. But getting error. I have posted the error trace as well. It was running very well unless I added convolution layers. I am still new to deep learning and Pytorch. So I apologize if this is silly question. I am using conv1d so why should conv1d expect 3 dimensional input and it is also getting a 2d input which is also odd.
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(CROP_SIZE*CROP_SIZE*3, 512)
self.conv1d1 = nn.Conv1d(in_channels=512, out_channels=64, kernel_size=1, stride=2)
self.fc2 = nn.Linear(64, 128)
self.conv1d2 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=1, stride=2)
self.fc3 = nn.Linear(64, 256)
self.conv1d3 = nn.Conv1d(in_channels=256, out_channels=64, kernel_size=1, stride=2)
self.fc4 = nn.Linear(64, 256)
self.fc4 = nn.Linear(256, 128)
self.fc5 = nn.Linear(128, 64)
self.fc6 = nn.Linear(64, 32)
self.fc7 = nn.Linear(32, 64)
self.fc8 = nn.Linear(64, frame['landmark_id'].nunique())
def forward(self, x):
x = F.relu(self.conv1d1(self.fc1(x)))
x = F.relu(self.conv1d2(self.fc2(x)))
x = F.relu(self.conv1d3(self.fc3(x)))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = self.fc8(x)
return F.log_softmax(x, dim=1)
net = Net()
import torch.optim as optim
loss_function = nn.CrossEntropyLoss()
net.to(torch.device('cuda:0'))
for epoch in range(3): # 3 full passes over the data
optimizer = optim.Adam(net.parameters(), lr=0.001)
for data in tqdm(train_loader): # `data` is a batch of data
X = data['image'].to(device) # X is the batch of features
y = data['landmarks'].to(device) # y is the batch of targets.
optimizer.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
output = net(X.view(-1,CROP_SIZE*CROP_SIZE*3)) # pass in the reshaped batch
# print(np.argmax(output))
# print(y)
loss = F.nll_loss(output, y) # calc and grab the loss value
loss.backward() # apply this loss backwards thru the network's parameters
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss) # print loss. We hope loss (a measure of wrong-ness) declines!
Error trace
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-42-f5ed7999ce57> in <module>
5 y = data['landmarks'].to(device) # y is the batch of targets.
6 optimizer.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
----> 7 output = net(X.view(-1,CROP_SIZE*CROP_SIZE*3)) # pass in the reshaped batch
8 # print(np.argmax(output))
9 # print(y)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
<ipython-input-37-6d3e34d425a0> in forward(self, x)
16
17 def forward(self, x):
---> 18 x = F.relu(self.conv1d1(self.fc1(x)))
19 x = F.relu(self.conv1d2(self.fc2(x)))
20 x = F.relu(self.conv1d3(self.fc3(x)))
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
210 _single(0), self.dilation, self.groups)
211 return F.conv1d(input, self.weight, self.bias, self.stride,
--> 212 self.padding, self.dilation, self.groups)
213
214
RuntimeError: Expected 3-dimensional input for 3-dimensional weight [64, 512, 1], but got 2-dimensional input of size [4, 512] instead
You should learn how convolutions work (e.g. see this answer) and some neural network basics (this tutorial from PyTorch).
Basically, Conv1d expects inputs of shape [batch, channels, features] (where features can be some timesteps and can vary, see example).
nn.Linear expects shape [batch, features] as it is fully connected and each input feature is connected to each output feature.
You can verify those shapes by yourself, for torch.nn.Linear:
import torch
layer = torch.nn.Linear(20, 10)
data = torch.randn(64, 20) # [batch, in_features]
layer(data).shape # [64, 10], [batch, out_features]
For Conv1d:
layer = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=3, padding=1)
data = torch.randn(64, 20, 15) # [batch, channels, timesteps]
layer(data).shape # [64, 10, 15], [batch, out_features]
layer(torch.randn(32, 20, 25)).shape # [32, 10, 25]
BTW. As you are working with images, you should use torch.nn.Conv2d instead.
Most of the Pytorch functions work on batch data i.e they accept input of size (batch_size, shape). #Szymon Maszke already posted answer related to that.
So in your case, you can use unsqueeze and sqeeze functions for adding and removing extra dimensions.
Here's the sample code:
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(100, 512)
self.conv1d1 = nn.Conv1d(in_channels=512, out_channels=64, kernel_size=1, stride=2)
self.fc2 = nn.Linear(64, 128)
def forward(self, x):
x = self.fc1(x)
x = x.unsqueeze(dim=2)
x = F.relu(self.conv1d1(x))
x = x.squeeze()
x = self.fc2(x)
return x
net = Net()
bsize = 4
inp = torch.randn((bsize, 100))
out = net(inp)
print(out.shape)
I'm learn tensorflow2.0 from official tutorials.I can understand the result from below code.
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
# result
[-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]
But if I change the inputs with tensor not python code, just like this
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(tf.range(-5, 5))
I get below error!!
OperatorNotAllowedInGraphError Traceback (most recent call last)
<ipython-input-39-6c17f29a3443> in <module>
2 def square_if_positive(x):
3 return [i**2 if i > 0 else i for i in x]
----> 4 square_if_positive(tf.range(10))
5 # measure_graph_size(square_if_positive, range(10))
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
437 # This is the first call of __call__, so we have to initialize.
438 initializer_map = {}
--> 439 self._initialize(args, kwds, add_initializers_to=initializer_map)
440 if self._created_variables:
441 try:
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
380 self._concrete_stateful_fn = (
381 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 382 *args, **kwds))
383
384 def invalid_creator_scope(*unused_args, **unused_kwds):
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1793 if self.input_signature:
1794 args, kwargs = None, None
-> 1795 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1796 return graph_function
1797
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2093 graph_function = self._function_cache.primary.get(cache_key, None)
2094 if graph_function is None:
-> 2095 graph_function = self._create_graph_function(args, kwargs)
2096 self._function_cache.primary[cache_key] = graph_function
2097 return graph_function, args, kwargs
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
1984 arg_names=arg_names,
1985 override_flat_arg_shapes=override_flat_arg_shapes,
-> 1986 capture_by_value=self._capture_by_value),
1987 self._function_attributes,
1988 # Tell the ConcreteFunction to clean up its graph once it goes out of
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
851 converted_func)
852
--> 853 func_outputs = python_func(*func_args, **func_kwargs)
854
855 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
323 # __wrapped__ allows AutoGraph to swap in a converted function. We give
324 # the function a weak reference to itself to avoid a reference cycle.
--> 325 return weak_wrapped_fn().__wrapped__(*args, **kwds)
326 weak_wrapped_fn = weakref.ref(wrapped_fn)
327
~/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py in wrapper(*args, **kwargs)
841 except Exception as e: # pylint:disable=broad-except
842 if hasattr(e, "ag_error_metadata"):
--> 843 raise e.ag_error_metadata.to_exception(type(e))
844 else:
845 raise
OperatorNotAllowedInGraphError: in converted code:
<ipython-input-37-6c17f29a3443>:3 square_if_positive *
return [i**2 if i > 0 else i for i in x]
/Users/zhangpan/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:547 __iter__
self._disallow_iteration()
/Users/zhangpan/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:540 _disallow_iteration
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
/Users/zhangpan/tf2_workspace/tf2.0/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:518 _disallow_when_autograph_enabled
" decorating it directly with #tf.function.".format(task))
OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed: AutoGraph did not convert this function. Try decorating it directly with #tf.function.
I can't find any specifications about this error. I think the real reason is not "iterating over tf.Tensor is not allowed" . Becase I can write like this.
#tf.function
def square_if_positive(x):
for i in x:
if i>0:
tf.print(i**2)
else:
tf.print(i)
square_if_positive(tf.range(10))
I iterate over tensor just like above code.
So my question is what's the real reason about this error? Any suggestions will help me. I really can't understand this error through I read a lot of materials.
The root cause is that autograph doesn't yet support list comprehensions (primarily because it's difficult to determine the dtype of the result in all cases)
As a workaround, you can use tf.map_fn for the comprehension:
return tf.map_fn(lambda i: i ** 2 if i > 0 else i, x)
For more information please take a look at this issue
In case it helps someone.
I had the same problem with a code that did:
for index, image in enumerate(inputs):
... My code ...
The solution was just to do:
index = 0
for image in inputs:
.... My code ...
index += 1
I had a similar issue when using tf.range() instead of python's range() for a list comprehension inside a tensorflow graph function. I was training a 3D segmentation neural net and had to use range() for the code to work.
Check the pseudo code below:-
Y = # [Batch,Height,Width,Depth,Channels]
y_predict = # [B,H,W,D,C,MC_Runs] ; MC_Runs=Monte Carlo Runs
#tf.function
def train_loss(Y,y_predict):
# calulate loss and return scalar value
#tf.function
def train_step():
loss = [train_loss(Y, y_predict[:,:,:,:,:,id_])) for id_ in range(MC_RUNS)]
loss = tf.math.reduce_mean(loss)