I am working on RNN. After training, I got a high accuracy on the test data set. However, when I make a prediction with some external data, it predicts so poorly. Also, I used the same data set, which has over 300,000 texts and 57 classes, on artificial neural networks, it's still predicting very poorly. When I tried the same data set on a machine learning model, it worked fine.
Here is my code:
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, LSTM, BatchNormalization
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
df = pd.read_excel("data.xlsx", usecols=["X", "y"])
df = df.sample(frac = 1)
X = np.array(df["X"])
y = np.array(df["y"])
le = LabelEncoder()
y = le.fit_transform(y)
y = y.reshape(-1,1)
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y)
num_words = 100000
token = Tokenizer(num_words=num_words)
token.fit_on_texts(X)
seq = token.texts_to_sequences(X)
X = sequence.pad_sequences(seq, padding = "pre", truncating = "pre")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = Sequential()
model.add(Embedding(num_words, 96, input_length = X.shape[1]))
model.add(LSTM(108, activation='relu', dropout=0.1, recurrent_dropout = 0.2))
model.add(BatchNormalization())
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=4, batch_size=64, validation_data = (X_test, y_test))
loss, accuracy = model.evaluate(X_test, y_test)
Here are the history plots of the model:
After doing some research, I have realized that the model was actually working fine. The problem was using Keras Tokenizer wrongly.
At the end of the code, I used the following code:
sentence = ["Example Sentence to Make Prediction."]
token.fit_on_texts(sentence) # <- This row is redundant.
seq = token.texts_to_sequences(sentence)
cx = sequence.pad_sequences(seq, maxlen = X.shape[1])
sx = np.argmax(model.predict(cx), axis=1)
The problem occurs when I want to fit Tokenizer again, on the new data. So, removing that code line solved the problem for me.
I am trying to use a custom Keras loss function that apart from the usual signature (y_true, y_pred) takes another parameter sigma (which is also produced by the last layer of the network).
The training works fine, but then I am not sure how to perform forward propagation and return sigma (while muis the output of the model.predict method).
This is the code I am using, which features a custom layer GaussianLayer that returns the list [mu, sigma].
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Dense, Layer, Dropout
from keras.models import Model
from keras.initializers import glorot_normal
import numpy as np
def custom_loss(sigma):
def gaussian_loss(y_true, y_pred):
return tf.reduce_mean(0.5*tf.log(sigma) + 0.5*tf.div(tf.square(y_true - y_pred), sigma)) + 10
return gaussian_loss
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(30, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(30, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim, ),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim, ),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
output_mu = K.dot(x, self.kernel_1) + self.bias_1
output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]
# This returns a tensor
inputs = Input(shape=(1,))
x = Dense(30, activation='relu')(inputs)
x = Dropout(0.3)(x)
x = Dense(30, activation='relu')(x)
x = Dense(40, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(30, activation='relu')(x)
mu, sigma = GaussianLayer(1)(x)
model = Model(inputs, mu)
model.compile(loss=custom_loss(sigma), optimizer='adam')
model.fit(train_x, train_y, epochs=150)
Since your model returns two tensors as output, you also need to pass a list of two arrays as the output when calling fit() method. That's essentially what the error is trying to convey:
Error when checking model target:
So the error is in targets (i.e. labels). What is wrong?
the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 2 array(s), but instead got the following list of 1 arrays:
I may have found the answer among Keras FAQs.
I found out that it is possible to retrieve intermediate steps' output using the code snippet below:
layer_name = 'main_output'
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model.predict(train_x[0])
intermediate_output
In this case intermediate_output is a list of two values [mu, sigma] (just needed to name the output layer main_output and retrieve it later)
I’m trying to generate time-series data with an LSTM and a Mixture Density Network as described in https://arxiv.org/pdf/1308.0850.pdf
Here is a link to my implementation: https://github.com/NeoVand/MDNLSTM
The repository contains a toy dataset to train the network.
On training, the LSTM layer returns nan for its hidden state after one iteration. A similar issue is reported here.
For your convenience, here is the code:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ts = torch.load('LDS_Toy_Data.pt')
def detach(states):
return [state.detach() for state in states]
class MDNLSTM(nn.Module):
def __init__(self, d_obs, d_lat=2, n_gaussians=2, n_layers=1):
super(MDNLSTM, self).__init__()
self.d_obs = d_obs
self.d_lat = d_lat
self.n_gaussians = n_gaussians
self.n_layers = n_layers
self.lstm = nn.LSTM(d_obs, d_lat, n_layers, batch_first=True)
self.fcPi = nn.Linear(d_lat, n_gaussians*d_obs)
self.fcMu = nn.Linear(d_lat, n_gaussians*d_obs)
self.fcSigma = nn.Linear(d_lat, n_gaussians*d_obs)
def get_mixture_coef(self, y):
time_steps = y.size(1)
pi, mu, sigma = self.fcPi(y), self.fcMu(y), self.fcSigma(y)
pi = pi.view(-1, time_steps, self.n_gaussians, self.d_obs)
mu = mu.view(-1, time_steps, self.n_gaussians, self.d_obs)
sigma = sigma.view(-1, time_steps, self.n_gaussians, self.d_obs)
pi = F.softmax(pi, 2)
sigma = torch.exp(sigma)
return pi, mu, sigma
def forward(self, x, h):
y, (h, c) = self.lstm(x, h)
#print(h)
pi, mu, sigma = self.get_mixture_coef(y)
return (pi, mu, sigma), (h, c)
def init_hidden(self, bsz):
return (torch.zeros(self.n_layers, bsz, self.d_lat).to(device),
torch.zeros(self.n_layers, bsz, self.d_lat).to(device))
def mdn_loss_fn(y, pi, mu, sigma):
m = torch.distributions.Normal(loc=mu, scale=sigma)
loss = torch.exp(m.log_prob(y))
loss = torch.sum(loss * pi, dim=2)
loss = -torch.log(loss)
return loss.mean()
def criterion(y, pi, mu, sigma):
y = y.unsqueeze(2)
return mdn_loss_fn(y, pi, mu, sigma)
DOBS = 10
DLAT = 2
INSTS = 100
seqlen = 30
epochs = 200
mdnlstm = MDNLSTM(DOBS, DLAT).to(device)
optimizer = torch.optim.Adam(mdnlstm.parameters())
z = torch.from_numpy(ts[:INSTS,:,:]).float().to(device)
# hiddens=[]
# Train the model
for epoch in range(epochs):
# Set initial hidden and cell states
hidden = mdnlstm.init_hidden(INSTS)
for i in range(0, z.size(1) - seqlen, seqlen):
# Get mini-batch inputs and targets
inputs = z[:, i:i+seqlen, :]
targets = z[:, (i+1):(i+1)+seqlen, :]
hidden = detach(hidden)
# hiddens.append(hidden)
(pi, mu, sigma), hidden = mdnlstm(inputs, hidden)
loss = criterion(targets, pi, mu, sigma)
mdnlstm.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch, epochs, loss.item()))
I would appreciate any help on this.
The issue was caused by the log-sum-exp operation not being done in a stable way. Here is an implementation of a weighted log-sum-exp trick that I used and could fix the problem:
def weighted_logsumexp(x,w, dim=None, keepdim=False):
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
x = torch.where(
# to prevent nasty nan's
(xm == float('inf')) | (xm == float('-inf')),
xm,
xm + torch.log(torch.sum(torch.exp(x - xm)*w, dim, keepdim=True)))
return x if keepdim else x.squeeze(dim)
and using that implemented the stable loss function:
def mdn_loss_stable(y,pi,mu,sigma):
m = torch.distributions.Normal(loc=mu, scale=sigma)
m_lp_y = m.log_prob(y)
loss = -weighted_logsumexp(m_lp_y,pi,dim=2)
return loss.mean()
This worked like a charm. In general, the problem is that torch won't report under-flows.
The Sklearn documentation contains an example of a polynomial regression which beautifully illustrates the idea of overfitting (link).
The third plot shows a 15th order polynomial that overfits the simulated data. I replicated this model in TensorFlow, but I cannot get it to overfit.
Even when tuning the learning rate and the numbers of learning epochs, I cannot get the model to overfit. What am I missing?
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def true_fun(X):
return np.cos(1.5 * np.pi * X)
# Generate dataset
n_samples = 30
np.random.seed(0)
x_train = np.sort(np.random.rand(n_samples)) # Draw from uniform distribution
y_train = true_fun(x_train) + np.random.randn(n_samples) * 0.1
x_test = np.linspace(0, 1, 100)
y_true = true_fun(x_test)
# Helper function
def run_dir(base_dir, dirname='run'):
"Number log directories incrementally"
import os
import re
pattern = re.compile(dirname+'_(\d+)')
try:
previous_runs = os.listdir(base_dir)
except FileNotFoundError:
previous_runs = []
run_number = 0
for name in previous_runs:
match = pattern.search(name)
if match:
number = int(match.group(1))
if number > run_number:
run_number = number
run_number += 1
logdir = os.path.join(base_dir, dirname + '_%02d' % run_number)
return(logdir)
# Define the polynomial model
def model(X, w):
"""Polynomial model
param X: data
param y: coeficients in the polynomial regression
returns: Polynomial function Y(X, w)
"""
terms = []
for i in range(int(w.shape[0])):
term = tf.multiply(w[i], tf.pow(X, i))
terms.append(term)
return(tf.add_n(terms))
# Create the computation graph
order = 15
tf.reset_default_graph()
X = tf.placeholder("float")
Y = tf.placeholder("float")
w = tf.Variable([0.]*order, name="parameters")
lambda_reg = tf.placeholder('float', shape=[])
learning_rate_ph = tf.placeholder('float', shape=[])
y_model = model(X, w)
loss = tf.div(tf.reduce_mean(tf.square(Y-y_model)), 2) # Square error
loss_rg = tf.multiply(lambda_reg, tf.reduce_sum(tf.square(w))) # L2 pentalty
loss_total = tf.add(loss, loss_rg)
loss_hist1 = tf.summary.scalar('loss', loss)
loss_hist2 = tf.summary.scalar('loss_rg', loss_rg)
loss_hist3 = tf.summary.scalar('loss_total', loss_total)
summary = tf.summary.merge([loss_hist1, loss_hist2, loss_hist3])
train_op = tf.train.GradientDescentOptimizer(learning_rate_ph).minimize(loss_total)
init = tf.global_variables_initializer()
def train(sess, x_train, y_train, lambda_val=0, epochs=2000, learning_rate=0.01):
feed_dict={X: x_train, Y: y_train, lambda_reg: lambda_val, learning_rate_ph: learning_rate}
logdir = run_dir("logs/polynomial_regression2/")
writer = tf.summary.FileWriter(logdir)
sess.run(init)
for epoch in range(epochs):
_, summary_str = sess.run([train_op, summary], feed_dict=feed_dict)
writer.add_summary(summary_str, global_step=epoch)
final_cost, final_cost_rg, w_learned = sess.run([loss, loss_rg, w], feed_dict=feed_dict)
return final_cost, final_cost_rg, w_learned
def plot_test(w_learned, x_test, x_train, y_train):
y_learned = calculate_y(x_test, w_learned)
plt.scatter(x_train, y_train)
plt.plot(x_test, y_true, label="true function")
plt.plot(x_test, y_learned,'r', label="learned function")
#plt.title('$\lambda = {:03.2f}$'.format(lambda_values[i]))
plt.ylabel('y')
plt.xlabel('x')
plt.legend()
plt.show()
def calculate_y(x, w):
y = 0
for i in range(w.shape[0]):
y += w[i] * np.power(x, i)
return y
sess = tf.Session()
final_cost, final_cost_rg, w_learned = train(sess, x_train, y_train, lambda_val=0,
learning_rate=0.3, epochs=2000)
sess.close()
plot_test(w_learned, x_test, x_train, y_train)
I have same problem about this. When I do polynomial regression, I also can't overfit the data by using GD in Tensorflow.
Then I compare the coefficients(weights) of the model by using sklearn LinearRegression, I found when the polynomial degree is larger the coefficient of high order is very smaller(i.e. 1e-4), and the low order is relative large(i.e. 0.1).
That's mean when you using GD algorithm for searching the best value of weights, the high order coefficient become extreme sensitive about the value change, and the low order coefficient is not.
And I guess the best coefficient(overfit with data) of low order term is large, and of high order term is tiny. When you set large learning rate, it's impossible to find the right answer, and when you set tiny learning rate, you need lots of iterations.
It's obvious when you using GD algorithm with small data set to make overfit.
I am trying, just for practising with Keras, to train a network to learn a very easy function.
The input of the network is 2Dimensional . The output is one dimensional.
The function can indeed represented with an image, and the same is for the approximate function.
At the moment I'm not looking for any good generalization, I just want that the network is at least good in representing the training set.
Here I place my code:
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import random as rnd
import math
m = [
[1,1,1,1,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,1,1],
[1,0,0,0,1,1,0,1,0,0],
[1,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,1],
[0,0,1,0,1,1,0,0,0,1],
[1,1,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,1,1,1,1]] #A representation of the function that I would like to approximize
matrix = np.matrix(m)
evaluation = np.zeros((100,100))
x_train = np.zeros((10000,2))
y_train = np.zeros((10000,1))
for x in range(0,100):
for y in range(0,100):
x_train[x+100*y,0] = x/100. #I normilize the input of the function, between [0,1)
x_train[x+100*y,1] = y/100.
y_train[x+100*y,0] = matrix[int(x/10),int(y/10)] +0.0
#Here I show graphically what I would like to have
plt.matshow(matrix, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
#Here I built the model
model = Sequential()
model.add(Dense(20, input_dim=2, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, init='uniform'))
model.add(Activation('sigmoid'))
#Here I train it
sgd = SGD(lr=0.5)
model.compile(loss='mean_squared_error', optimizer=sgd)
model.fit(x_train, y_train,
nb_epoch=100,
batch_size=100,
show_accuracy=True)
#Here (I'm not sure), I'm using the network over the given example
x = model.predict(x_train,batch_size=1)
#Here I show the approximated function
print x
print x_train
for i in range(0, 10000):
evaluation[int(x_train[i,0]*100),int(x_train[i,1]*100)] = x[i]
plt.matshow(evaluation, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
plt.colorbar()
plt.show()
As you can see, the two function are completely different, and I can't understand why.
I think that maybe model.predict doesn't work as I axpect.
Your understanding is correct; it's just a question of hyperparameter tuning.
I just tried your code, and it looks like you're not giving your training enough time:
Look at the loss, under 100 epochs, it's stuck at around 0.23. But try using the 'adam' otimizer instead of SGD, and increase the number of epochs up to 10,000: the loss now decreases down to 0.09 and your picture looks much better.
If it's still not precise enough for you, you may also want to try increasing the number of parameters: just add a few layers; this will make overfitting much easier ! :-)
I have changed just your network structure and added a training dataset. The loss decreases down to 0.01.
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 15:26:52 2017
#author: Administrator
"""
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import random as rnd
import math
from keras.optimizers import Adam,SGD
m = [
[1,1,1,1,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,1,1],
[1,0,0,0,1,1,0,1,0,0],
[1,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,1],
[0,0,1,0,1,1,0,0,0,1],
[1,1,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,1,1,1,1]] #A representation of the function that I would like to approximize
matrix = np.matrix(m)
evaluation = np.zeros((1000,1000))
x_train = np.zeros((1000000,2))
y_train = np.zeros((1000000,1))
for x in range(0,1000):
for y in range(0,1000):
x_train[x+1000*y,0] = x/1000. #I normilize the input of the function, between [0,1)
x_train[x+1000*y,1] = y/1000.
y_train[x+1000*y,0] = matrix[int(x/100),int(y/100)] +0.0
#Here I show graphically what I would like to have
plt.matshow(matrix, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
#Here I built the model
model = Sequential()
model.add(Dense(50, input_dim=2, init='uniform'))## init是关键字,’uniform’表示用均匀分布去初始化权重
model.add(Activation('tanh'))
model.add(Dense(20, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, init='uniform'))
model.add(Activation('sigmoid'))
#Here I train it
#sgd = SGD(lr=0.01)
adam = Adam(lr = 0.01)
model.compile(loss='mean_squared_error', optimizer=adam)
model.fit(x_train, y_train,
nb_epoch=100,
batch_size=100,
show_accuracy=True)
#Here (I'm not sure), I'm using the network over the given example
x = model.predict(x_train,batch_size=1)
#Here I show the approximated function
print (x)
print (x_train)
for i in range(0, 1000000):
evaluation[int(x_train[i,0]*1000),int(x_train[i,1]*1000)] = x[i]
plt.matshow(evaluation, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
plt.colorbar()
plt.show()