The built-in VGG16 network in MxNet is not working - deep-learning

I would like to test the trained built-in VGG16 network in MxNet. The experiment is to feed the network with an image from ImageNet. Then, I would like to see whether the result is correct.
However, the results are always error! Hi, how stupid the network is! Well, that cannot be true. I must do something wrong.
from mxnet.gluon.model_zoo.vision import vgg16
from mxnet.image import color_normalize
import mxnet as mx
import numpy as np
import cv2
path=‘http://data.mxnet.io/models/imagenet-11k/’
data_dir = ‘F:/Temps/Models_tmp/’
k = ‘synset.txt’
#gluon.utils.download(path+k, data_dir+k)
img_dir = ‘F:/Temps/DataSets/ImageNet/’
img = cv2.imread(img_dir + ‘cat.jpg’)
img = mx.nd.array(img)
img,_ = mx.image.center_crop(img,(224,224))
img = img/255
img = color_normalize(img,mean=mx.nd.array([0.485, 0.456, 0.406]),std=mx.nd.array([0.229, 0.224, 0.225]))
img = mx.nd.transpose(img, axes=(2, 0, 1))
img = img.expand_dims(axis=0)
with open(data_dir + ‘synset.txt’, ‘r’) as f:
labels = [l.rstrip() for l in f]
aVGG = vgg16(pretrained=True,root=‘F:/Temps/Models_tmp/’)
features = aVGG.forward(img)
features = mx.ndarray.softmax(features)
features = features.asnumpy()
features = np.squeeze(features)
a = np.argsort(features)[::-1]
for i in a[0:5]:
print(‘probability=%f, class=%s’ %(features[i], labels[i]))
The outputs from color_normalize seems not right for the absolute values of some numbers are greater than one.
This is my figure of cat which is downloaded from the ImageNet. 
These are my outputs.
probability=0.218258, class=n01519563 cassowary probability=0.172373,
class=n01519873 emu, Dromaius novaehollandiae, Emu novaehollandiae
probability=0.128973, class=n01521399 rhea, Rhea americana
probability=0.105253, class=n01518878 ostrich, Struthio camelus
probability=0.051424, class=n01517565 ratite, ratite bird, flightless
bird

Reading your code:
path=‘http://data.mxnet.io/models/imagenet-11k/’
I think you might be using the synset of the ImageNet 11k (11000 classes) rather than the 1k (1000) classes. That would explain the mismatch.
The correct synset is here: http://data.mxnet.io/models/imagenet/synset.txt

Related

How to increase Emotion Detection Validation Accuracy on VGG16 model ? [Transfer Learning]

import pandas as pd
import numpy as np
import keras
import tensorflow
from keras.models import Model
from keras.layers import Dense
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="path",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="path", target_size=(224,224))
from keras.applications.vgg16 import VGG16
vggmodel = VGG16(weights='imagenet', include_top=True)
vggmodel.summary()
for layers in (vggmodel.layers)[:19]:
print(layers)
layers.trainable = False
#flatten_out = tensorflow.keras.layers.Flatten()(vggmodel.output)
#fc1 = tensorflow.keras.layers.Dense(units=4096,activation="relu")(flatten_out)
#fc2 = tensorflow.keras.layers.Dense(units=4096,activation="relu")(fc1)
#fc3 = tensorflow.keras.layers.Dense(units=256,activation="relu")(fc2)
#predictions = tensorflow.keras.layers.Dense(units=3, activation="softmax")(fc3)
X= vggmodel.layers[-2].output
predictions = Dense(units=3, activation="softmax")(X)
model_final = Model(vggmodel.input, predictions)
model_final.compile(loss = "categorical_crossentropy", optimizer = optimizers.SGD(lr=0.001, momentum=0.9), metrics=["accuracy"])
model_final.summary()
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= traindata, steps_per_epoch= 95, epochs= 100, validation_data= testdata, validation_steps=7, callbacks=[checkpoint,early])
i am classifying emotion in positive, negative and neutral.
i a, using Vgg16 transfer learning model.
though i m still not getting better validation accuracy.
things i've tried:
increase the number of training data
layers.trainable=False/True
learning rate:0.0001,0.001,0.01
Activation function= relu/softmax
batch size= 64
optimizers= adam/sgd
loss fn= categoricalcrossentrpy / sparsecategoricalcrossentrpy
momentum =0.09 /0.9
also, i tried to change my dataset color to GRAY and somehow it gave better accuracy than previous COLOR IMAGE but it is still not satisfactory.
i also changed my code and add dropout layers but still no progress.
i tried with FER2013 dataset it was giving me pretty decent accuracy.
these are the results on the FER dataset:
accuracy: 0.9997 - val_accuracy: 0.7105
but on my own dataset(which is pretty good) validation accuracy is not increasing more than 66%.
what else can I do to increase val_accuracy?
I think your model is more complex than necessary. I would remove the fc1 and fc2 layers. I would include regularization in the fc3 layer. I would add a dropout layer after the fc3 . In your early stopping callback change patience to 4. I recommend you use the Keras callback Reduce Learning rate on plateau. Full recommendations are in the code below
#flatten_out = tensorflow.keras.layers.Flatten()(vggmodel.output)
#fc3 = tensorflow.keras.layers.Dense(kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu'))(flatten_out)
x=Dropout(rate=.4, seed=123)
#predictions = tensorflow.keras.layers.Dense(units=3, activation="softmax")(x)
rlronp=tf.keras.callbacks.ReduceLROnPlateau( monitor='val_loss',
factor=0.4,patience=2,
verbose=0, mode='auto')
callbacks=[rlronp, checkpoint, early]
X= vggmodel.layers[-2].output
predictions = Dense(units=3, activation="softmax")(X)
model_final.fit_generator(generator= traindata, steps_per_epoch= 95, epochs= 100, validation_data= testdata, validation_steps=7, callbacks=callbacks)
I do not like VGG it is a very large model and is a bit old and slow. I think you will get better and faster result using EfficientNet models, EfficientNetB3 should work fine.
If you want to try that get rid of all code for VGG and use
lr=.001
img_size=(256,256)
base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False,
weights="imagenet",input_shape=img_shape, pooling='max')
base_model.trainable=True
x=base_model.output
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)
x = Dense(256, kernel_regularizer = regularizers.l2(l =
0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)
x=Dropout(rate=.4, seed=123)(x)
output=Dense(class_count, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=
['accuracy'])
NOTE: EfficientNet models expect pixels in the range 0 to 255 so don't scale the pixels. Also note I make the base model trainable. They tell you NOT to do that but in many experiments I find training the base model from the outset leads to faster convergence and net lower validation loss.

How to normalize pytorch model output to be in range [0,1]

lets say I have model called UNet
output = UNet(input)
that output is a vector of grayscale images shape: (batch_size,1,128,128)
What I want to do is to normalize each image to be in range [0,1].
I did it like this:
for i in range(batch_size):
output[i,:,:,:] = output[i,:,:,:]/torch.amax(output,dim=(1,2,3))[i]
now every image in the output is normalized, but when I'm training such model, pytorch claim it cannot calculate the gradients in this procedure, and I understand why.
my question is what is the right way to normalize image without killing the backpropogation flow?
something like
output = UNet(input)
output = output.normalize
output2 = some_model(output)
loss = ..
loss.backward()
optimize.step()
my only option right now is adding a sigmoid activation at the end of the UNet but i dont think its a good idea..
update - code (gen2,disc = unet,discriminator models. est_bias is some output):
update 2x code:
with torch.no_grad():
est_bias_for_disc = gen2(input_img)
est_bias_for_disc /= est_bias_for_disc.amax(dim=(1,2,3), keepdim=True)
disc_fake_hat = disc(est_bias_for_disc.detach())
disc_fake_loss = BCE(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(bias_ref)
disc_real_loss = BCE(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
if epoch<=epochs_till_gen2_stop:
disc_loss.backward(retain_graph=True) # Update gradients
opt_disc.step() # Update optimizer
then theres seperate training:
opt_gen2.zero_grad()
est_bias = gen2(input_img)
est_bias /= est_bias.amax(dim=(1,2,3), keepdim=True)
disc_fake = disc(est_bias)
ADV_loss = BCE(disc_fake, torch.ones_like(disc_fake))
gen2_loss = ADV_loss
gen2_loss.backward()
opt_gen2.step()
You can use the normalize function:
>>> import torch
>>> import torch.nn.functional as F
>>> x = torch.tensor([[3.,4.],[5.,6.],[7.,8.]])
>>> x = F.normalize(x, dim = 0)
>>> print(x)
tensor([[0.3293, 0.3714],
[0.5488, 0.5571],
[0.7683, 0.7428]])
This will give a differentiable tensor as long as out is not used.
You are overwriting the tensor's value because of the indexing on the batch dimension. Instead, you can perform the operation in vectorized form:
output = output / output.amax(dim=(1,2,3), keepdim=True)
The keepdim=True argument keeps the shape of torch.Tensor.amax's output equal to that of its inputs allowing you to perform an in-place operation with it.

How to feed an LSTM/GRU model multiple independent Time Series?

In Order to explain it simply: I have 53 Oil Producing wells measurements, each well has been measured each day for 6 years, we recorded multiple variables (Pressure, water production, gas production...etc), and our main component(The one we want to study and forecast) is the Oil production rate. How can I Use all the data to train my model of LSTM/GRU knowing that the Oil wells are independent and that the measurments have been done in the same time for each one?
The knowledge that "the measurments have been done in the same time for each [well]" is not necessary if you want to assume that the wells are independent. (Why do you think that that knowledge is useful?)
So if the wells are considered independent, treat them as individual samples. Split them into a training set, validation set, and test set, as usual. Train a usual LSTM or GRU on the training set.
By the way, you might want to use the attention mechanism instead of recurrent networks. It is easier to train and usually yields comparable results.
Even convolutional networks might be good enough. See methods like WaveNet if you suspect long-range correlations.
These well measurements sound like specific and independent events. I work in the finance sector. We always look at different stocks, and each stocks specific time neries using LSTM, but not 10 stocks mashed up together. Here's some code to analyze a specific stock. Modify the code to suit your needs.
from pandas_datareader import data as wb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
from sklearn.preprocessing import MinMaxScaler
start = '2019-06-30'
end = '2020-06-30'
tickers = ['GOOG']
thelen = len(tickers)
price_data = []
for ticker in tickers:
prices = wb.DataReader(ticker, start = start, end = end, data_source='yahoo')[['Open','Adj Close']]
price_data.append(prices.assign(ticker=ticker)[['ticker', 'Open', 'Adj Close']])
#names = np.reshape(price_data, (len(price_data), 1))
df = pd.concat(price_data)
df.reset_index(inplace=True)
for col in df.columns:
print(col)
#used for setting the output figure size
rcParams['figure.figsize'] = 20,10
#to normalize the given input data
scaler = MinMaxScaler(feature_range=(0, 1))
#to read input data set (place the file name inside ' ') as shown below
df['Adj Close'].plot()
plt.legend(loc=2)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
ntrain = 80
df_train = df.head(int(len(df)*(ntrain/100)))
ntest = -80
df_test = df.tail(int(len(df)*(ntest/100)))
#importing the packages
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
#dataframe creation
seriesdata = df.sort_index(ascending=True, axis=0)
new_seriesdata = pd.DataFrame(index=range(0,len(df)),columns=['Date','Adj Close'])
length_of_data=len(seriesdata)
for i in range(0,length_of_data):
new_seriesdata['Date'][i] = seriesdata['Date'][i]
new_seriesdata['Adj Close'][i] = seriesdata['Adj Close'][i]
#setting the index again
new_seriesdata.index = new_seriesdata.Date
new_seriesdata.drop('Date', axis=1, inplace=True)
#creating train and test sets this comprises the entire data’s present in the dataset
myseriesdataset = new_seriesdata.values
totrain = myseriesdataset[0:255,:]
tovalid = myseriesdataset[255:,:]
#converting dataset into x_train and y_train
scalerdata = MinMaxScaler(feature_range=(0, 1))
scale_data = scalerdata.fit_transform(myseriesdataset)
x_totrain, y_totrain = [], []
length_of_totrain=len(totrain)
for i in range(60,length_of_totrain):
x_totrain.append(scale_data[i-60:i,0])
y_totrain.append(scale_data[i,0])
x_totrain, y_totrain = np.array(x_totrain), np.array(y_totrain)
x_totrain = np.reshape(x_totrain, (x_totrain.shape[0],x_totrain.shape[1],1))
#LSTM neural network
lstm_model = Sequential()
lstm_model.add(LSTM(units=50, return_sequences=True, input_shape=(x_totrain.shape[1],1)))
lstm_model.add(LSTM(units=50))
lstm_model.add(Dense(1))
lstm_model.compile(loss='mean_squared_error', optimizer='adadelta')
lstm_model.fit(x_totrain, y_totrain, epochs=10, batch_size=1, verbose=2)
#predicting next data stock price
myinputs = new_seriesdata[len(new_seriesdata) - (len(tovalid)+1) - 60:].values
myinputs = myinputs.reshape(-1,1)
myinputs = scalerdata.transform(myinputs)
tostore_test_result = []
for i in range(60,myinputs.shape[0]):
tostore_test_result.append(myinputs[i-60:i,0])
tostore_test_result = np.array(tostore_test_result)
tostore_test_result = np.reshape(tostore_test_result,(tostore_test_result.shape[0],tostore_test_result.shape[1],1))
myclosing_priceresult = lstm_model.predict(tostore_test_result)
myclosing_priceresult = scalerdata.inverse_transform(myclosing_priceresult)
totrain = df_train
tovalid = df_test
#predicting next data stock price
myinputs = new_seriesdata[len(new_seriesdata) - (len(tovalid)+1) - 60:].values
# Printing the next day’s predicted stock price.
print(len(tostore_test_result));
print(myclosing_priceresult);
Final result:
1
[[1396.532]]

Train Caffe CNN to output multidimensional features

I'd like to build a feature extractor using Caffe's CNNs and I already have a large sample of input features and desired output features.
Now I need to train some convolutional layers to learn how to transform the input features into the output.
My question is: How can I achieve this on Caffe?
As a minimal example, suppose I wanted to train a CNN that inverts the values of a 2D array.
For example, if my input is
[[0,1,0],
[1,1,1],
[0,1,0]]
the CNN should output
[[1,0,1],
[0,0,0],
[1,0,1]].
For
[[0,0,0],
[0,1,0],
[0,0,0]]
the output should be
[[1,1,1],
[1,0,1],
[1,1,1]]
and so on.
Of course this is just a minimal example to share, the actual problem is nearly impossible to tackle without the use of multiple convolutions.
I was able to create this code for this problem. I used the Euclidean Loss at the end, but unfortunately the CNN is not learning anything.
ROOT_DIR = '/home'
from os.path import join
import numpy as np
import h5py
from itertools import product
import caffe
from caffe import layers
from caffe.proto import caffe_pb2
#%% GENERATE DATA
data_in = np.array([np.array(seq).reshape(1,3,3) for seq in product([0,1], repeat=9)])
data_out = np.array([-1*array+1 for array in data_in])
with open(join(ROOT_DIR, 'data.txt'), 'w') as ftxt:
with h5py.File(join(ROOT_DIR, 'data.hdf5'), 'w') as fhdf5:
fhdf5['data'] = data_in.astype(np.float32)
fhdf5['label'] = data_out.astype(np.float32)
ftxt.write(join(ROOT_DIR, 'data.hdf5'))
#%%DEFINE NET
net = caffe.NetSpec()
net.data, net.label = layers.HDF5Data(batch_size=64, source=join(ROOT_DIR, 'data.txt'), ntop=2)
net.conv1 = layers.Convolution(net.data, kernel_size=1, num_output=128)
net.relu1 = layers.ReLU(net.conv1, in_place=True)
net.conv2 = layers.Convolution(net.relu1, kernel_size=1, num_output=1)
net.relu2 = layers.ReLU(net.conv2, in_place=True)
net.loss = layers.EuclideanLoss(net.relu2, net.label)
net.to_proto()
with open(join(ROOT_DIR, 'invert_net.prototxt'), 'w') as f:
f.write(str(net.to_proto()))
#%% DEFINE SOLVER
solver = caffe_pb2.SolverParameter()
solver.train_net = join(ROOT_DIR, 'invert_net.prototxt')
solver.max_iter = 10000
solver.base_lr = 0.01
solver.lr_policy = 'fixed'
with open(join(ROOT_DIR, 'solver.prototxt'), 'w') as f:
f.write(str(solver))
#%% TRAIN NET
caffe.set_mode_cpu()
solver = caffe.SGDSolver(join(ROOT_DIR, 'solver.prototxt'))
solver.solve()

FiPy Simple Convection

I am trying to understand how FiPy works by working an example, in particular I would like to solve the following simple convection equation with periodic boundary:
$$\partial_t u + \partial_x u = 0$$
If initial data is given by $u(x, 0) = F(x)$, then the analytical solution is $u(x, t) = F(x - t)$. I do get a solution, but it is not correct.
What am I missing? Is there a better resource for understanding FiPy than the documentation? It is very sparse...
Here is my attempt
from fipy import *
import numpy as np
# Generate mesh
nx = 20
dx = 2*np.pi/nx
mesh = PeriodicGrid1D(nx=nx, dx=dx)
# Generate solution object with initial discontinuity
phi = CellVariable(name="solution variable", mesh=mesh)
phiAnalytical = CellVariable(name="analytical value", mesh=mesh)
phi.setValue(1.)
phi.setValue(0., where=x > 1.)
# Define the pde
D = [[-1.]]
eq = TransientTerm() == ConvectionTerm(coeff=D)
# Set discretization so analytical solution is exactly one cell translation
dt = 0.01*dx
steps = 2*int(dx/dt)
# Set the analytical value at the end of simulation
phiAnalytical.setValue(np.roll(phi.value, 1))
for step in range(steps):
eq.solve(var=phi, dt=dt)
print(phi.allclose(phiAnalytical, atol=1e-1))
As addressed on the FiPy mailing list, FiPy is not great at handling convection only PDEs (absent diffusion, pure hyperbolic) as it's missing higher order convection schemes. It is better to use CLAWPACK for this class of problem.
FiPy does have one second order scheme that might help with this problem, the VanLeerConvectionTerm, see an example.
If the VanLeerConvectionTerm is used in the above problem, it does do a better job of preserving the shock.
import numpy as np
import fipy
# Generate mesh
nx = 20
dx = 2*np.pi/nx
mesh = fipy.PeriodicGrid1D(nx=nx, dx=dx)
# Generate solution object with initial discontinuity
phi = fipy.CellVariable(name="solution variable", mesh=mesh)
phiAnalytical = fipy.CellVariable(name="analytical value", mesh=mesh)
phi.setValue(1.)
phi.setValue(0., where=mesh.x > 1.)
# Define the pde
D = [[-1.]]
eq = fipy.TransientTerm() == fipy.VanLeerConvectionTerm(coeff=D)
# Set discretization so analytical solution is exactly one cell translation
dt = 0.01*dx
steps = 2*int(dx/dt)
# Set the analytical value at the end of simulation
phiAnalytical.setValue(np.roll(phi.value, 1))
viewer = fipy.Viewer(phi)
for step in range(steps):
eq.solve(var=phi, dt=dt)
viewer.plot()
raw_input('stopped')
print(phi.allclose(phiAnalytical, atol=1e-1))