I am writing a tensorflow program to find dogs vs cats prediction i got a error in this part of the code when i tried to execute this cell in juypter notebook.
2.Below is the code:
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropoutfully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1],name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
----- - ---------------------------------------------------------------------
Error for the above code below (can anyone help to resolve this error):
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-6-2ef14dea0c38> in <module>()
----> 1 import tflearn
2 from tflearn.layers.conv import conv_2d, max_pool_2d
3 from tflearn.layers.core import input_data, dropout, fully_connected
4 from tflearn.layers.estimator import regression
5
/home/aravind/anaconda3/lib/python3.6/site-packages/tflearn/__init__.py in <module>()
19
20 # Predefined ops
---> 21 from .layers import normalization
22 from . import metrics
23 from . import activations
/home/aravind/anaconda3/lib/python3.6/site-packages/tflearn/layers/__init__.py in <module>()
8 from .normalization import batch_normalization, local_response_normalization
9 from .estimator import regression
---> 10 from .recurrent import lstm, gru, simple_rnn, bidirectional_rnn, \
11 BasicRNNCell, BasicLSTMCell, GRUCell
12 from .embedding_ops import embedding
/home/aravind/anaconda3/lib/python3.6/site-packages/tflearn/layers/recurrent.py in <module>()
6 import tensorflow as tf
7 from tensorflow.python.ops import array_ops
----> 8 from tensorflow.contrib.rnn.python.ops.core_rnn import static_rnn as _rnn, \
9 static_bidirectional_rnn as _brnn
10 from tensorflow.python.ops.rnn import rnn_cell_impl as _rnn_cell, \
ModuleNotFoundError: No module named 'tensorflow.contrib.rnn.python.ops.core_rnn'
Related
Unable to read catboost into jupyter notebook. Getting an import error.
ImportError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_27572/4101664180.py in
----> 1 import catboost as ctb
~\Anaconda3\Lib\site-packages\catboost_init_.py in
----> 1 from .core import (
2 FeaturesData, EFstrType, EShapCalcType, EFeaturesSelectionAlgorithm, Pool, CatBoost,
3 CatBoostClassifier, CatBoostRegressor, CatBoostRanker, CatBoostError, cv, train, sum_models, _have_equal_features,
4 to_regressor, to_classifier, to_ranker, MultiRegressionCustomMetric, MultiRegressionCustomObjective
5 ) # noqa
~\Anaconda3\Lib\site-packages\catboost\core.py in
41 _typeof = type
42
---> 43 from .plot_helpers import save_plot_file, try_plot_offline
44 from . import _catboost
45 from .metrics import BuiltinMetric
~\Anaconda3\Lib\site-packages\catboost\plot_helpers.py in
1 import warnings
2
----> 3 from . import _catboost
4 fspath = _catboost.fspath
5
ImportError: DLL load failed while importing _catboost: The specified module could not be found.
An error occur when try to execute a custom activation function all the commands work until reaching the last one hits an error!
Tensorflow version is: 2.9.1
keras version is: 2.9.0
Thanks in advance.
The code
import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras import backend as K
from keras.utils.generic_utils import get_custom_objects
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
# Custom activation function
def custom_activation(x):
return K.cast(K.x**2) # I also tried the Square(x)
# Before creating the model, I update Keras' custom objects:
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
# Model configuration
img_width, img_height = 28, 28
batch_size = 32
no_epochs = 5
no_classes = 10
verbosity = 1
# Load MNIST dataset
(input_train, target_train), (input_test, target_test) = mnist.load_data()
# Reshape data
input_train = input_train.reshape(input_train.shape[0], img_width, img_height, 1)
input_test = input_test.reshape(input_test.shape[0], img_width, img_height, 1)
input_shape = (img_width, img_height, 1)
# Parse numbers as floats
input_train = input_train.astype('float32')
input_test = input_test.astype('float32')
# Normalize data: [0, 1].
input_train = input_train / 255
input_test = input_test / 255
# Convert target vectors to categorical targets
target_train = tensorflow.keras.utils.to_categorical(target_train, no_classes)
target_test = tensorflow.keras.utils.to_categorical(target_test, no_classes)
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation=custom_activation, input_shape=((input_shape))))
The Error
When trying to execute the following line:
model.add(Conv2D(32, kernel_size=(3, 3), activation=custom_activation, input_shape=((input_shape))))
This error appears:
AttributeError: Exception encountered when calling layer "conv2d_4" (type Conv2D).
module 'keras.api._v2.keras.backend' has no attribute 'x'
Call arguments received by layer "conv2d_4" (type Conv2D):
• inputs=tf.Tensor(shape=(None, 28, 28, 1), dtype=float32)
I created a function build_model to tune hyperparameters. However, the function fails to create objects within it, the rlr object (ReduceLROnPlateau). I know the function has run because I tested it by inserting some print statements. Why are the objects in the function not being created?
NameError: name 'rlr' is not defined
#error:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-34-00e7981884ae> in <module>()
56 validation_freq=1,
57 epochs=1, #run 1 EPOCH TRIAL FIRST! originally 50
---> 58 callbacks=[rlr,ckpt,es])
59
60 # save weights
NameError: name 'rlr' is not defined
#My Code:
from tensorflow.keras.callbacks import ReduceLROnPlateau,ModelCheckpoint,EarlyStopping
from keras.models import Sequential
from tensorflow import keras
from tensorflow.keras.applications import EfficientNetB0
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from kerastuner.tuners import RandomSearch
from tensorflow.keras.applications.resnet50 import ResNet50
model_fn = EfficientNetB0(include_top=False, input_shape= (224,224,3), pooling='avg') # , we
def build_model(hp):
model = keras.Sequential()
model.add(model_fn)
#for i in range(hp.Int('num_layers', 2, 20)):
model.add(layers.Dense(units=hp.Int('units_' + str(i),
min_value=32,
max_value=512,
step=32),
activation='relu'))
model.add(keras.layers.Dropout(0.4))
model.add(layers.Dense(2, activation='linear'))
model.summary()
patience = hp.Int('patience', 1, 3, default=1)
callbacks = tf.keras.callbacks.ReduceLROnPlateau(patience=patience)
rlr=ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=5, min_lr=0.00001, min_delta=0.001)
ckpt=ModelCheckpoint('models/checkpoint_female', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
es=EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20, min_delta=0.0001)
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
loss='mean_squared_error',
metrics=['mean_absolute_error'])
return model
tuner = RandomSearch(
build_model,
objective='val_mean_absolute_error',
max_trials=2,#5
executions_per_trial=2,#3
directory='tuner',
project_name='Tuner Output')
tuner.search_space_summary()
tuner.search(train_generator_F, steps_per_epoch=200, epochs=2, validation_data=valid_generator_F)
TModel=tuner.get_best_models(num_models=1)[0]
#summary of best model
TModel.summary()
history=TModel.fit_generator(generator= train_generator_F,
steps_per_epoch=STEP_SIZE_TRAIN_F,
validation_data=valid_generator_F,
validation_steps=STEP_SIZE_VALID_F,
validation_freq=1,
epochs=1,
callbacks=[rlr,ckpt,es])
TModel.save_weights('models/TunedEnet100v1.h5')
I am new to PyTorch and neural networks in general. I was trying to implement the resnet-50 model from torchvision on the CIFAR-10 dataset.
import torchvision
import torch
import torch.nn as nn
from torch import optim
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
transformations=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
trainset=torchvision.datasets.CIFAR10(root='./CIFAR10',download=True,transform=transformations,train=True)
testset=torchvision.datasets.CIFAR10(root='./CIFAR10',download=True,transform=transformations,train=False)
trainloader=DataLoader(dataset=trainset,batch_size=4)
testloader=DataLoader(dataset=testset,batch_size=4)
inputs,labels=next(iter(trainset))
inputs.size()
resnet=torchvision.models.resnet50(pretrained=True)
if torch.cuda.is_available():
resnet=resnet.cuda()
inputs,labels=inputs.cuda(),torch.Tensor(labels).cuda()
outputs=resnet(inputs)
OUTPUT
--------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-904acb410fe4> in <module>()
6 inputs,labels=inputs.cuda(),torch.Tensor(labels).cuda()
7
----> 8 outputs=resnet(inputs)
5 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight)
344 _pair(0), self.dilation, self.groups)
345 return F.conv2d(input, weight, self.bias, self.stride,
--> 346 self.padding, self.dilation, self.groups)
347
348 def forward(self, input):
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 7, 7], but got 3-dimensional input of size [3, 32, 32] instead
Is there a problen with the dataset for some reason and if not, how do i give a 4 dimensional input? Is the torchvision implementation of ResNet-50 not usable for CIFAR-10?
Currently you are iterating over dataset that's why you are getting a (3-dimensional) single image. You actually need to iterate over dataloader to get a 4-dimensional image batch. Therefore, you just need to change the following line:
inputs,labels=next(iter(trainset))
to
inputs,labels=next(iter(trainloader))
I am new to Transfer learning and Cnn's,was just playing around with cnn and got this error.Tried many solutions but none of them works.
import numpy as np
import keras
from keras import backend as k
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling2D
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
%matplotlib inline
mobile = keras.applications.mobilenet.MobileNet()
#mobile.summary()
train_path = 'chest_xray/train'
val_path = 'chest_xray/val'
test_path = 'chest_xray/test'
train_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
train_path,
target_size = (224,224),
batch_size = 10)
test_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
test_path,
target_size = (224,224),
batch_size = 10,
shuffle = False)
val_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
val_path,
target_size = (224,224),
batch_size = 10)
def prepare_image(file):
image_path = ''
img = image.load_img(image_path+file,target_size = (224,224))
img_array = image.img_to_array(img)
img_array_dims = np.expand_dims(img_array,axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_dims)
x = mobile.layers[-60].output
predictions = Dense(1,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
print(mobile.input)
#model.summary()
for layer in model.layers[:-5]:
layer.trainable = False
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit_generator(train_batch,
steps_per_epoch=4,
validation_data=val_batch,
validation_steps=2,
epochs = 30)
I am using mobilenet for transfer learning and an error is spotted every time.None of the solutins seems to work.Tried playing with the Flatten() then 2dmaxpooling() but no results.
ERROR:
ValueError Traceback (most recent call last)
<ipython-input-187-08820ea8d15a> in <module>()
3 validation_data=val_batch,
4 validation_steps=2,
----> 5 epochs = 30)
Value-error: Error when checking target: expected dense_39 to have 4 dimensions, but got array with shape (10, 2)
The layer of the MobileNet at which you are chopping of (-60) is conv_dw_5_relu which has output dimensions (None, 28, 28, 256). So you will have to flatten it before connecting a Dense layer to it.
Working code
mobile = keras.applications.mobilenet.MobileNet()
x = mobile.layers[-60].output
x = Flatten()(x)
predictions = Dense(2,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
#model.summary()
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(np.random.rand(10, 224, 224, 3), np.random.rand(10,2))