An error occur when try to execute a custom activation function all the commands work until reaching the last one hits an error!
Tensorflow version is: 2.9.1
keras version is: 2.9.0
Thanks in advance.
The code
import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras import backend as K
from keras.utils.generic_utils import get_custom_objects
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
# Custom activation function
def custom_activation(x):
return K.cast(K.x**2) # I also tried the Square(x)
# Before creating the model, I update Keras' custom objects:
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
# Model configuration
img_width, img_height = 28, 28
batch_size = 32
no_epochs = 5
no_classes = 10
verbosity = 1
# Load MNIST dataset
(input_train, target_train), (input_test, target_test) = mnist.load_data()
# Reshape data
input_train = input_train.reshape(input_train.shape[0], img_width, img_height, 1)
input_test = input_test.reshape(input_test.shape[0], img_width, img_height, 1)
input_shape = (img_width, img_height, 1)
# Parse numbers as floats
input_train = input_train.astype('float32')
input_test = input_test.astype('float32')
# Normalize data: [0, 1].
input_train = input_train / 255
input_test = input_test / 255
# Convert target vectors to categorical targets
target_train = tensorflow.keras.utils.to_categorical(target_train, no_classes)
target_test = tensorflow.keras.utils.to_categorical(target_test, no_classes)
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation=custom_activation, input_shape=((input_shape))))
The Error
When trying to execute the following line:
model.add(Conv2D(32, kernel_size=(3, 3), activation=custom_activation, input_shape=((input_shape))))
This error appears:
AttributeError: Exception encountered when calling layer "conv2d_4" (type Conv2D).
module 'keras.api._v2.keras.backend' has no attribute 'x'
Call arguments received by layer "conv2d_4" (type Conv2D):
• inputs=tf.Tensor(shape=(None, 28, 28, 1), dtype=float32)
Related
I tried running an example on SHAP Deep Explainer from this link using this Titanic dataset. This is the code from the example:
# import package
import shap
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras import optimizers
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import os
# load data
os.chdir('/titanic/')
train_data = pd.read_csv('./train.csv', index_col=0)
test_data = pd.read_csv('./test.csv', index_col=0)
train_data.head()
def data_preprocessing(df):
df = df.drop(columns=['Name', 'Ticket', 'Cabin'])
# fill na
df[['Age']] = df[['Age']].fillna(value=df[['Age']].mean())
df[['Embarked']] = df[['Embarked']].fillna(value=df['Embarked'].value_counts().idxmax())
df[['Fare']] = df[['Fare']].fillna(value=df[['Fare']].mean())
# categorical features into numeric
df['Sex'] = df['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
# one-hot encoding
embarked_one_hot = pd.get_dummies(df['Embarked'], prefix='Embarked')
df = df.drop('Embarked', axis=1)
df = df.join(embarked_one_hot)
return df
# train data processing
train_data = data_preprocessing(train_data)
train_data.isnull().sum()
# create data for training
x_train = train_data.drop(['Survived'], axis=1).values
# Check test data
test_data.isnull().sum()
# scale
scale = StandardScaler()
x_train = scale.fit_transform(x_train)
# prepare y_train
y_train = train_data['Survived'].values
test_data = data_preprocessing(test_data)
x_test = test_data.values.astype(float)
# scaling
x_test = scale.transform(x_test)
# Check test data
test_data.isnull().sum()
# build mlp
model = Sequential()
model.add(Dense(32, input_dim=x_train.shape[1], activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(8, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
# compile model
model.compile(loss='sparse_categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
# fit model
model.fit(x_train, y_train, epochs=10, batch_size=64)
# compute SHAP values
explainer = shap.DeepExplainer(model, x_train)
shap_values = explainer.shap_values(x_test)
shap.summary_plot(shap_values[0], plot_type = 'bar', feature_names = test_data.columns)
shap.initjs()
shap.force_plot(explainer.expected_value[0].numpy(), shap_values[0][0], features = test_data.columns)
shap.decision_plot(explainer.expected_value[0].numpy(), shap_values[0][0], features = test_data.iloc[0,:], feature_names = test_data.columns.tolist())
shap.plots._waterfall.waterfall_legacy(explainer.expected_value[0].numpy(), shap_values[0][0], feature_names = test_data.columns)
There is no code for generating a beeswarm plot in the example, but I used
shap.summary_plot(shap_values[0], feature_names = test_data.columns)
and got a beeswarm plot. From my understanding, the color of the dots displays the original value of each feature, falling along a gradient of blue to red. However, the plot I got only has blue dots and doesn't have a gradient ruler on the side.
Here is the plot I got:
And here is what I expected (photo from https://shap.readthedocs.io/en/latest/example_notebooks/api_examples/plots/beeswarm.html):
Any suggestions on what could have caused this and what I can do to get the colors would be greatly appreciated. Thank you!
I am new to Transfer learning and Cnn's,was just playing around with cnn and got this error.Tried many solutions but none of them works.
import numpy as np
import keras
from keras import backend as k
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling2D
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
%matplotlib inline
mobile = keras.applications.mobilenet.MobileNet()
#mobile.summary()
train_path = 'chest_xray/train'
val_path = 'chest_xray/val'
test_path = 'chest_xray/test'
train_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
train_path,
target_size = (224,224),
batch_size = 10)
test_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
test_path,
target_size = (224,224),
batch_size = 10,
shuffle = False)
val_batch = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(
val_path,
target_size = (224,224),
batch_size = 10)
def prepare_image(file):
image_path = ''
img = image.load_img(image_path+file,target_size = (224,224))
img_array = image.img_to_array(img)
img_array_dims = np.expand_dims(img_array,axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_dims)
x = mobile.layers[-60].output
predictions = Dense(1,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
print(mobile.input)
#model.summary()
for layer in model.layers[:-5]:
layer.trainable = False
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit_generator(train_batch,
steps_per_epoch=4,
validation_data=val_batch,
validation_steps=2,
epochs = 30)
I am using mobilenet for transfer learning and an error is spotted every time.None of the solutins seems to work.Tried playing with the Flatten() then 2dmaxpooling() but no results.
ERROR:
ValueError Traceback (most recent call last)
<ipython-input-187-08820ea8d15a> in <module>()
3 validation_data=val_batch,
4 validation_steps=2,
----> 5 epochs = 30)
Value-error: Error when checking target: expected dense_39 to have 4 dimensions, but got array with shape (10, 2)
The layer of the MobileNet at which you are chopping of (-60) is conv_dw_5_relu which has output dimensions (None, 28, 28, 256). So you will have to flatten it before connecting a Dense layer to it.
Working code
mobile = keras.applications.mobilenet.MobileNet()
x = mobile.layers[-60].output
x = Flatten()(x)
predictions = Dense(2,activation='softmax')(x)
model = Model(inputs = mobile.input,outputs = predictions)
#model.summary()
model.compile(Adam(lr=.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(np.random.rand(10, 224, 224, 3), np.random.rand(10,2))
Using below code I'm attempting to encode image from mnist into a lower dimension representation :
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import metrics
import datetime
from sklearn.preprocessing import MultiLabelBinarizer
import seaborn as sns
sns.set_style("darkgrid")
from ast import literal_eval
import numpy as np
from sklearn.preprocessing import scale
import seaborn as sns
sns.set_style("darkgrid")
import torch
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
%matplotlib inline
low_dim_rep = 32
epochs = 2
cuda = torch.cuda.is_available() # True if cuda is available, False otherwise
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
print('Training on %s' % ('GPU' if cuda else 'CPU'))
# Loading the MNIST data set
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))])
mnist = torchvision.datasets.MNIST(root='../data/', train=True, transform=transform, download=True)
# Loader to feed the data batch by batch during training.
batch = 100
data_loader = torch.utils.data.DataLoader(mnist, batch_size=batch, shuffle=True)
encoder = nn.Sequential(
# Encoder
nn.Linear(28 * 28, 64),
nn.PReLU(64),
nn.BatchNorm1d(64),
# Low-dimensional representation
nn.Linear(64, low_dim_rep),
nn.PReLU(low_dim_rep),
nn.BatchNorm1d(low_dim_rep))
decoder = nn.Sequential(
# Decoder
nn.Linear(low_dim_rep, 64),
nn.PReLU(64),
nn.BatchNorm1d(64),
nn.Linear(64, 28 * 28))
autoencoder = nn.Sequential(encoder, decoder)
encoder = encoder.type(FloatTensor)
decoder = decoder.type(FloatTensor)
autoencoder = autoencoder.type(FloatTensor)
optimizer = torch.optim.Adam(params=autoencoder.parameters(), lr=0.00001)
data_size = int(mnist.train_labels.size()[0])
print('data_size' , data_size)
for i in range(epochs):
for j, (images, _) in enumerate(data_loader):
images = images.view(images.size(0), -1) # from (batch 1, 28, 28) to (batch, 28, 28)
images = Variable(images).type(FloatTensor)
autoencoder.zero_grad()
reconstructions = autoencoder(images)
loss = torch.dist(images, reconstructions)
loss.backward()
optimizer.step()
print('Epoch %i/%i loss %.2f' % (i + 1, epochs, loss.data[0]))
print('Optimization finished.')
# Get the encoded images here
encoded_images = []
for j, (images, _) in enumerate(data_loader):
images = images.view(images.size(0), -1)
images = Variable(images).type(FloatTensor)
encoded_images.append(encoder(images))
Upon completion of this code
len(encoded_images) is 600 when I expect the length to match the number of images in mnist : len(mnist) - 60'000.
How to encode the images to a lower dimension representation of 32 ( low_dim_rep = 32 ) ? I've defined the network parameters incorrectly ?
You have 60000 images in mnist and your batch = 100. That is why your len(encoded_images)=600 because you do 60000/100=600 iterations when generating encoded image. You end up with a list of 600 elements where each element has shape [100, 32]. You can do the following
encoded_images = torch.zeros(len(mnist), 32)
for j, (images, _) in enumerate(data_loader):
images = images.view(images.size(0), -1)
images = Variable(images).type(FloatTensor)
encoded_images[j * batch : (j+1) * batch] = encoder(images)
Using Keras DL library with Tensorflow backend, I'm attempting to extract features from the intermediate layer of VGG-16 to perform binary classification on my task of interest.
The data set contains 1000 (500 for each class) training samples and 200 (100 for each class) test samples. I'm training a small fully connected model on top of the extracted features. On running the code, I could see that the size of the train_data is (10000,6,6,512), validation_data is (2000,6,6,512) (float32), train_labels is (1000,2) and validation_labels is (200,2) (int32).
Here is the code:
########################load libraries#########################################
import numpy as np
import time
from keras.layers import Dense, Dropout, Flatten
from keras.models import Model
from keras import applications
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
#########################image characteristics#################################
img_rows=100 #dimensions of image, to be varied suiting the input requirements of the pre-trained model
img_cols=100
channel = 3 #RGB
num_classes = 2
batch_size = 10
nb_epoch = 10
###############################################################################
''' This code uses VGG-16 as a feature extractor'''
feature_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(img_rows, img_cols, 3))
#get the model summary
feature_model.summary()
#extract feature from the intermediate layer
feature_model = Model(input=feature_model.input, output=feature_model.get_layer('block5_conv2').output)
#get the model summary
feature_model.summary()
#declaring image data generators
train_datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
generator = train_datagen.flow_from_directory(
'f1_100/train',
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode=None,
shuffle=False)
train_data = feature_model.predict_generator(generator, 1000)
train_labels = np.array([[1, 0]] * 500 + [[0, 1]] * 500)
generator = val_datagen.flow_from_directory(
'f1_100/test',
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode=None,
shuffle=False)
validation_data = feature_model.predict_generator(generator, 200)
validation_labels = np.array([[1,0]] * 100 + [[0,1]] * 100)
###############################################################################
#addding the top layers and training them on the extracted features
from keras.models import Sequential
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
print('-'*30)
print('Start Training the top layers on the extracted features...')
print('-'*30)
#measure the time and train the model
t=time.time()
hist = model.fit(train_data, train_labels, nb_epoch=nb_epoch, batch_size=batch_size,
validation_data=(validation_data, validation_labels),
verbose=2)
#print the history of the trained model
print(hist.history)
print('Training time: %s' % (time.time()-t))
###############################################################################
However, on running the code, I'm getting the following error:
Traceback (most recent call last):
File "<ipython-input-14-cc5b1b34cc67>", line 46, in <module>
verbose=2)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\models.py", line 960, in fit
validation_steps=validation_steps)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1581, in fit
batch_size=batch_size)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1426, in _standardize_user_data
_check_array_lengths(x, y, sample_weights)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 250, in _check_array_lengths
'and ' + str(list(set_y)[0]) + ' target samples.')
ValueError: Input arrays should have the same number of samples as target arrays. Found 10000 input samples and 1000 target samples.
You see, you have a batch_size of 10.
The feature_model.predict_generator() uses the steps param (in your case 1000) a total of 10 (batch_size) times. So a total of 10000 training samples are generated in that.
But in the next line, you are declaring the labels to be only 1000. (500 1s and 500 0s).
So you have two options:
1) Either change the steps in predict_generator() like this (which I believe is what you want, to generate 1000 samples in train and 200 samples in validation):
train_data = feature_model.predict_generator(generator, 100)
validation_data = feature_model.predict_generator(generator, 20)
2) Or you can change the numbers in labels:
train_labels = np.array([[1, 0]] * 5000 + [[0, 1]] * 5000)
validation_labels = np.array([[1,0]] * 1000 + [[0,1]] * 1000)
I have trained LeNet for MNIST using Caffe and now I would like to export this model to be used within Keras.
To this end I tried to extract weights from caffe.Net and use them to initialize Keras's network. However, I received different predictions from the two models. So I have tried to debug them layer by layer, starting with the first one. The code I have tested is as follows:
# import caffe and load facce.Net from prototxt and caffemodel files
import sys
sys.path.append('/opt/caffe/python')
import caffe
net = caffe.Net('lenet_train_test.prototxt', 'save/mnist_iter_500000.caffemodel', caffe.TEST)
# this should be the kernel weights and bias for the first convolution layer 'conv1'
c1_w = net.params['conv1'][0].data
c1_b = net.params['conv1'][1].data
# import Keras and build a convolution layer using the same parameters as in lenet_train_test.prototxt and instantiate it with the weights above
import keras
from keras.layers import Convolution2D
conv1 = Convolution2D(20, 5, 5, border_mode='valid', input_shape=(1, 28, 28), weights=[c1_w,c1_b], activation='linear')
from keras.models import Sequential
model=Sequential()
model.add(conv1)
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# load MNIST data and do scaling like I did when training Caffe model
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_test = X_test.astype('float32')
X_test *= 0.00392157
# set the first test example and get the prediction
net.blobs['data'].data[0] = X_test[0][0]
net.forward()
out0 = net.blobs['conv1'].data[0] # this is the prediction for 20 kernels
m0 = out0[0] # just consider the first one
m0.shape # >>> (24,24)
# pass the same example through the conv1 layer in Keras model
import numpy as np
a = np.zeros( (1,1,28,28) )
a[0] = X_test[0]
out1 = model.predict_on_batch(a) # this is the prediction for 20 kernels
m1 = out1[0][0] # just consider the first one
m1.shape # >>> (24,24) the same size
# I get a lots of 'False'
m0 == m1
Have I done anything wrong in the layer construction? or maybe Caffe and Keras implement the convolution2D differently?