Related
I am solving a magnetostatic problem using PINN.
I have succeeded in solving a simple Poisson equation. However, in the analysis considering the geometry, a problem was found in which the interface condition loss did not converge.
I've tried numerous things including changing the mini-batch addition model.
I'd appreciate it if you could let me know what's wrong with my code.
class ironmaxwell(Model):
def __init__(self):
super(ironmaxwell, self).__init__()
initializer = tf.keras.initializers.GlorotUniform
self.id1 = tf.keras.layers.Dropout(rate=0.2)
self.ih1 = Dense(40, activation='elu', kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.id2 = tf.keras.layers.Dropout(rate=0.2)
self.ih2 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.id3 = tf.keras.layers.Dropout(rate=0.2)
self.ih3 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.id4 = tf.keras.layers.Dropout(rate=0.2)
self.ih4 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.id5 = tf.keras.layers.Dropout(rate=0.2)
self.ih5 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.id6 = tf.keras.layers.Dropout(rate=0.2)
self.ih6 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.iu1 = Dense(40, activation='linear',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.iw1 = Dense(40, activation='linear',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.iu = Dense(1, activation='linear')
def call(self, state):
ix = self.id1(state)
iy = self.iu1(state)
iz = self.iw1(state)
ix = (1-self.ih1(ix))*iy + self.ih1(ix)*iz
ix = self.id2(ix)
ix = (1-self.ih2(ix))*iy + self.ih2(ix)*iz
ix = self.id3(ix)
ix = (1-self.ih3(ix))*iy + self.ih4(ix)*iz
ix = self.id4(ix)
ix = (1-self.ih4(ix))*iy + self.ih4(ix)*iz
ix = self.id5(ix)
ix = (1-self.ih5(ix))*iy + self.ih5(ix)*iz
ix = self.id6(ix)
ix = (1-self.ih6(ix))*iy + self.ih6(ix)*iz
iout = self.iu(ix)
return iout
class coilmaxwell(Model):
def __init__(self):
super(coilmaxwell, self).__init__()
initializer = tf.keras.initializers.GlorotUniform
self.d1 = tf.keras.layers.Dropout(rate=0.2)
self.h1 = Dense(40, activation='elu', kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.d2 = tf.keras.layers.Dropout(rate=0.2)
self.h2 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.d3 = tf.keras.layers.Dropout(rate=0.2)
self.h3 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.d4 = tf.keras.layers.Dropout(rate=0.2)
self.h4 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.d5 = tf.keras.layers.Dropout(rate=0.2)
self.h5 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.d6 = tf.keras.layers.Dropout(rate=0.2)
self.h6 = Dense(40, activation='elu',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.u1 = Dense(40, activation='linear',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.w1 = Dense(40, activation='linear',kernel_regularizer=tf.keras.regularizers.L2(0.001))
self.u = Dense(1, activation='linear')
def call(self, state):
x = self.d1(state)
y = self.u1(state)
z = self.w1(state)
x = (1-self.h1(x))*y + self.h1(x)*z
x = self.d2(x)
x = (1-self.h2(x))*y + self.h2(x)*z
x = self.d3(x)
x = (1-self.h3(x))*y + self.h4(x)*z
x = self.d4(x)
x = (1-self.h4(x))*y + self.h4(x)*z
x = self.d5(x)
x = (1-self.h5(x))*y + self.h5(x)*z
x = self.d6(x)
x = (1-self.h6(x))*y + self.h6(x)*z
out = self.u(x)
return out
##############################################################################################################################
class MaxwellPinn(object):
def __init__(self):
self.lr = 0.001
#self.lr = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=.001, decay_steps=10, decay_rate=.01)
self.opt_iron = Adam(self.lr)
self.opt_coil = Adam(self.lr)
self.ironmaxwell = ironmaxwell()
self.ironmaxwell.build(input_shape=(None, 2))
self.coilmaxwell = coilmaxwell()
self.coilmaxwell.build(input_shape=(None, 2))
self.train_loss_history = []
self.iter_count = 0
self.instant_loss = 0
self.bnd_loss = 0
self.ic_loss = 0
self.lamda = 0.1
self.pde_loss = 0
self.max_value = 0.012315021035034
self.iron_loss = 0
self.coil_loss = 0
################################################################################################################################
#tf.function
def physics_net_iron(self, xt,jmu):
x_i = xt[:, 0:1]
t_i = xt[:, 1:2]
with tf.GradientTape(persistent=True) as tape:
tape.watch(t_i)
tape.watch(x_i)
xt_t_i = tf.concat([x_i,t_i], axis=1)
u_i = self.ironmaxwell(xt_t_i)
u_x_i = tape.gradient(u_i, x_i)
u_t_i = tape.gradient(u_i, t_i)
u_xx_i = tape.gradient(u_x_i, x_i)
u_tt_i = tape.gradient(u_t_i, t_i)
del tape
return (u_xx_i+u_tt_i+jmu)
#tf.function
def physics_net_coil(self, xt,jmu):
x_c = xt[:, 0:1]
t_c = xt[:, 1:2]
with tf.GradientTape(persistent=True) as tape:
tape.watch(t_c)
tape.watch(x_c)
xt_t_c = tf.concat([x_c,t_c], axis=1)
u_c = self.coilmaxwell(xt_t_c)
u_x_c = tape.gradient(u_c, x_c)
u_t_c = tape.gradient(u_c, t_c)
u_xx_c = tape.gradient(u_x_c, x_c)
u_tt_c = tape.gradient(u_t_c, t_c)
del tape
return (u_xx_c+u_tt_c+jmu)
####################################################################################################################
#tf.function
def physics_net_for_ic(self, xt,in_mu,nom,out_mu): # 경계조건 물리정보
x = xt[:, 0:1]
t = xt[:, 1:2]
with tf.GradientTape(persistent=True) as tape:
tape.watch(t)
tape.watch(x)
xt_t = tf.concat([x,t], axis=1)
out_u = self.ironmaxwell(xt_t)
out_u_x = tape.gradient(out_u, x)
out_u_y = tape.gradient(out_u, t)
in_u = self.coilmaxwell(xt_t)
in_u_x = tape.gradient(in_u, x)
in_u_y = tape.gradient(in_u, t)
del tape
out_b_x = out_u_y
out_b_y = out_u_x
out_h_x = out_b_x/out_mu
out_h_y = out_b_y/out_mu
in_b_x = in_u_y
in_b_y = in_u_x
in_h_x = in_b_x/in_mu
in_h_y = in_b_y/in_mu
loss_b = tf.add(tf.multiply((in_b_x-out_b_x),nom),tf.multiply((in_b_y-out_b_y),(1-nom)))
loss_h = tf.add(tf.multiply((in_h_x-out_h_x),(1-nom)),tf.multiply((in_h_y-out_h_y),nom))
return loss_b, loss_h
#############################################################################################################
def save_weights(self, path):
self.ironmaxwell.save_weights(path + 'ironmaxwell.h5')
self.coilmaxwell.save_weights(path + 'coilmaxwell.h5')
#############################################################################################################
def load_weights(self, path):
self.ironmaxwell.load_weights(path + 'ironmaxwell.h5')
self.coilmaxwell.load_weights(path + 'coilmaxwell.h5')
#############################################################################################################
def compute_loss_iron(self, f, u_bnd_hat, u_bnd_sol,penalty,loss_b,loss_h):
loss_col = tf.reduce_mean(tf.square(f))
loss_bnd = tf.reduce_mean(tf.square(u_bnd_hat - u_bnd_sol))
loss_mag = tf.reduce_mean(tf.square(loss_b))
loss_field =tf.reduce_mean(tf.square(loss_h))
loss = loss_col + loss_bnd + loss_mag + loss_field
self.iron_loss = loss.numpy()
return loss
def compute_loss_coil(self, f,penalty,loss_b,loss_h):
loss_col = tf.reduce_mean(tf.square(f))
loss_mag = tf.reduce_mean(tf.square(loss_b))
loss_field =tf.reduce_mean(tf.square(loss_h))
loss = loss_col + loss_mag + loss_field
self.coil_loss = loss.numpy()
return loss
#############################################################################################################
def compute_grad(self, xt_col_iron,xt_col_coil, xt_bnd, u_bnd_sol,ic,nom):
with tf.GradientTape(persistent=True) as tape:
J_coil = 9800
J_iron = 0
mu_coil = 1.2566e-06
mu_iron = mu_coil*2000
f_iron = self.physics_net_iron(xt_col_iron,J_iron*mu_iron) # iron의 PDE 손실
f_coil = self.physics_net_coil(xt_col_coil,J_coil*mu_coil) # coil의 PDE 손실
u_bnd_hat = self.ironmaxwell(xt_bnd) # IRON이 OUT
loss_b, loss_h = self.physics_net_for_ic(ic,mu_coil,nom,mu_iron)
loss_iron = self.compute_loss_iron(f_iron, u_bnd_hat, u_bnd_sol,1,loss_b,loss_h)
loss_coil = self.compute_loss_coil(f_coil,1,loss_b,loss_h)
iron_grads = tape.gradient(loss_iron, self.ironmaxwell.trainable_variables)
coil_grads = tape.gradient(loss_coil, self.coilmaxwell.trainable_variables)
loss = loss_iron + loss_coil
return loss, iron_grads, coil_grads
#############################################################################################################
def callback(self, arg=None):
if self.iter_count % 10 == 0:
print('iter=', self.iter_count, ', loss=', self.instant_loss,'iron_loss=',self.iron_loss,'coil_loss=',self.coil_loss)
self.train_loss_history.append([self.iter_count, self.instant_loss])
self.iter_count += 1
#############################################################################################################
def train_with_adam(self,xt_col_iron,xt_col_coil, xt_bnd, u_bnd_sol, adam_num,ic,nom):
def learn():
loss, iron_grads, coil_grads = self.compute_grad(xt_col_iron,xt_col_coil, xt_bnd, u_bnd_sol,ic,nom)
self.opt_iron.apply_gradients(zip(iron_grads, self.ironmaxwell.trainable_variables))
self.opt_coil.apply_gradients(zip(coil_grads, self.coilmaxwell.trainable_variables))
return loss
for iter in range(int(adam_num)):
loss = learn()
self.instant_loss = loss.numpy()
self.opt_iron = Adam(self.lr/(1+0.001*iter))
self.opt_coil = Adam(self.lr/(1+0.001*iter))
self.callback()
#############################################################################################################
def train_with_lbfgs(self, xt_col, xt_bnd, u_bnd_sol, lbfgs_num,J,mu,penalty,ii,ic,ii_mu,ic_mu,nom):
def vec_weight():
# vectorize weights
weight_vec = []
# Loop over all weights
for v in self.burgers.trainable_variables:
weight_vec.extend(v.numpy().flatten())
weight_vec = tf.convert_to_tensor(weight_vec)
return weight_vec
w0 = vec_weight().numpy()
def restore_weight(weight_vec):
# restore weight vector to model weights
idx = 0
for v in self.burgers.trainable_variables:
vs = v.shape
# weight matrices
if len(vs) == 2:
sw = vs[0] * vs[1]
updated_val = tf.reshape(weight_vec[idx:idx + sw], (vs[0], vs[1]))
idx += sw
# bias vectors
elif len(vs) == 1:
updated_val = weight_vec[idx:idx + vs[0]]
idx += vs[0]
# assign variables (Casting necessary since scipy requires float64 type)
v.assign(tf.cast(updated_val, dtype=tf.float32))
def loss_grad(w):
# update weights in model
restore_weight(w)
loss, grads, loss_bnd = self.compute_grad(xt_col, xt_bnd, u_bnd_sol,J,mu,penalty,ii,ic,ii_mu,ic_mu,nom)
# vectorize gradients
grad_vec = []
for g in grads:
grad_vec.extend(g.numpy().flatten())
# gradient list to array
# scipy-routines requires 64-bit floats
loss = loss.numpy().astype(np.float64)
self.instant_loss = loss
grad_vec = np.array(grad_vec, dtype=np.float64)
return loss, grad_vec
return scipy.optimize.minimize(fun=loss_grad,
x0=w0,
jac=True,
method='L-BFGS-B',
callback=self.callback,
options={'maxiter': lbfgs_num,
'maxfun': 5000,
'maxcor': 500,
'maxls': 500,
'ftol': 1.0 * np.finfo(float).eps}) #1.0 * np.finfo(float).eps
########################################################################################################################
def predict_iron(self, xt):
u_pred = self.ironmaxwell(xt)
return u_pred
def predict_coil(self, xt):
u_pred = self.coilmaxwell(xt)
return u_pred
#############################################################################################################
def train(self, adam_num, lbfgs_num):
iron_x = scipy.io.loadmat('iron_x.mat') # iron 좌표
iron_y = scipy.io.loadmat('iron_y.mat')
coil_x = scipy.io.loadmat('coil_x.mat') # coil 좌표
coil_y = scipy.io.loadmat('coil_y.mat')
iron_J = scipy.io.loadmat('iron_J.mat')
iron_mu = scipy.io.loadmat('iron_mu.mat')
coil_J = scipy.io.loadmat('iron_J.mat')
coil_mu = scipy.io.loadmat('iron_mu.mat')
ini = scipy.io.loadmat('bnd.mat')
inter_coil_x = scipy.io.loadmat('inter_coil_x.mat')
inter_coil_y = scipy.io.loadmat('inter_coil_y.mat')
icx = inter_coil_x['coil_inter_x']
icy = inter_coil_y['coil_inter_y']
ic = np.concatenate([icx, icy], axis=1) # interface 코일 데이터
inter_coil_mu = scipy.io.loadmat('inter_coil_mu.mat')
ic_mu = np.transpose(inter_coil_mu['mu_inter_coil'])
nomvec = scipy.io.loadmat('nom_vec.mat')
nom = nomvec['nom_vec']
nom = tf.convert_to_tensor(nom, dtype=tf.float32)
x_ini = np.transpose(ini['iron_bnd_x'])
y_ini = np.transpose(ini['iron_bnd_y'])
xt_bnd_data = np.concatenate([x_ini, y_ini], axis=1)
tu_bnd_data = []
for xt in xt_bnd_data:
tu_bnd_data.append(0)
tu_bnd_data = np.transpose(tu_bnd_data)
# collocation point iron
x_col_data = (iron_x['x'])
y_col_data = (iron_y['y'])
xy_col_data_iron = np.concatenate([x_col_data, y_col_data], axis=1)
# coil
x_col_data = (coil_x['coil_x'])
y_col_data = (coil_y['coil_y'])
xy_col_data_coil = np.concatenate([x_col_data, y_col_data], axis=1)
xt_col_iron = tf.convert_to_tensor(xy_col_data_iron, dtype=tf.float32)
xt_col_coil = tf.convert_to_tensor(xy_col_data_coil, dtype=tf.float32)
xt_bnd = tf.convert_to_tensor(xt_bnd_data, dtype=tf.float32)
u_bnd_sol = tf.convert_to_tensor(tu_bnd_data, dtype=tf.float32)
ic = tf.convert_to_tensor(ic, dtype=tf.float32)
ic_mu = tf.convert_to_tensor(ic_mu, dtype=tf.float32)
# Start timer
self.load_weights("C:/Users/user/Desktop/1-Cars (2 cases)/save_weights/maxwell/new_test/")
t0 = time()
self.train_with_adam(xt_col_iron,xt_col_coil, xt_bnd, u_bnd_sol, adam_num,ic,nom)
print('\nComputation time of adam: {} seconds'.format(time() - t0))
self.save_weights("C:/Users/user/Desktop/1-Cars (2 cases)/save_weights/maxwell/new_test/")
np.savetxt('C:/Users/user/Desktop/1-Cars (2 cases)/save_weights/maxwell/new_test/loss.txt', self.train_loss_history)
train_loss_history = np.array(self.train_loss_history)
plt.plot(train_loss_history[:, 0], train_loss_history[:, 1])
plt.yscale("log")
plt.show()
#############################################################################################################
# main
def main():
adam_num = 100000
lbfgs_num = 1000
agent = MaxwellPinn()
agent.train(adam_num, lbfgs_num)
if __name__=="__main__":
main()
It's my code.
Including mini-batch. Changing the model
I am training a REINFORCE algorithm on the CartPole environment. Due to the simple nature of the environment, I expect it to learn quickly. However, that doesn't happen.
Here is the main portion of the algorithm -
for i in range(episodes):
print("i = ", i)
state = env.reset()
done = False
transitions = []
tot_rewards = 0
while not done:
act_proba = model(torch.from_numpy(state))
action = np.random.choice(np.array([0,1]), p = act_proba.data.numpy())
next_state, reward, done, info = env.step(action)
tot_rewards += 1
transitions.append((state, action, tot_rewards))
state = next_state
if i%50==0:
print("i = ", i, ",reward = ", tot_rewards)
score.append(tot_rewards)
reward_batch = torch.Tensor([r for (s,a,r) in transitions])
disc_rewards = discount_rewards(reward_batch)
nrml_disc_rewards = normalize_rewards(disc_rewards)
state_batch = torch.Tensor([s for (s,a,r) in transitions])
action_batch = torch.Tensor([a for (s,a,r) in transitions])
pred_batch = model(state_batch)
prob_batch = pred_batch.gather(dim=1, index=action_batch.long().view(-1, 1)).squeeze()
loss = -(torch.sum(torch.log(prob_batch)*nrml_disc_rewards))
opt.zero_grad()
loss.backward()
opt.step()
Here is the entire algorithm -
#I referred to this when writing the code - https://github.com/DeepReinforcementLearning/DeepReinforcementLearningInAction/blob/master/Chapter%204/Ch4_book.ipynb
import numpy as np
import gym
import torch
from torch import nn
env = gym.make('CartPole-v0')
learning_rate = 0.0001
episodes = 10000
def discount_rewards(reward, gamma = 0.99):
return torch.pow(gamma, torch.arange(len(reward)))*reward
def normalize_rewards(disc_reward):
return disc_reward/(disc_reward.max())
class NeuralNetwork(nn.Module):
def __init__(self, state_size, action_size):
super(NeuralNetwork, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.linear_relu_stack = nn.Sequential(
nn.Linear(state_size, 300),
nn.ReLU(),
nn.Linear(300, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, action_size),
nn.Softmax()
)
def forward(self,x):
x = self.linear_relu_stack(x)
return x
model = NeuralNetwork(env.observation_space.shape[0], env.action_space.n)
opt = torch.optim.Adam(params = model.parameters(), lr = learning_rate)
score = []
for i in range(episodes):
print("i = ", i)
state = env.reset()
done = False
transitions = []
tot_rewards = 0
while not done:
act_proba = model(torch.from_numpy(state))
action = np.random.choice(np.array([0,1]), p = act_proba.data.numpy())
next_state, reward, done, info = env.step(action)
tot_rewards += 1
transitions.append((state, action, tot_rewards))
state = next_state
if i%50==0:
print("i = ", i, ",reward = ", tot_rewards)
score.append(tot_rewards)
reward_batch = torch.Tensor([r for (s,a,r) in transitions])
disc_rewards = discount_rewards(reward_batch)
nrml_disc_rewards = normalize_rewards(disc_rewards)
state_batch = torch.Tensor([s for (s,a,r) in transitions])
action_batch = torch.Tensor([a for (s,a,r) in transitions])
pred_batch = model(state_batch)
prob_batch = pred_batch.gather(dim=1, index=action_batch.long().view(-1, 1)).squeeze()
loss = -(torch.sum(torch.log(prob_batch)*nrml_disc_rewards))
opt.zero_grad()
loss.backward()
opt.step()
Your computation for discounting the reward is where your mistake is.
In REINFORCE (and many other algorithms) you need to compute the sum of future discounted rewards for every step onward.
This means that the sum of discounted rewards for the first step should be:
G_1 = r_1 + gamma * r_2 + gamma ^ 2 * r_3 + ... + gamma ^ (T-1) * r_T
G_2 = r_2 + gamma * r_3 + gamma ^ 2 * r_4 + ... + gamma ^ (T-1) * r_T
And so on...
This gives you an array containing all the sum of future rewards for every step (i.e. [G_1, G_2, G_3, ... , G_T])
However, what you compute currently is only applying a discount on the current step's reward:
G_1 = r_1
G_2 = gamma * r_2
G_3 = gamma ^ 2 * r_3
And so on...
Here is the Python code fixing your problem. We compute from the back of the list of reward to the front to be more computationally efficient.
def discount_rewards(reward, gamma=0.99):
R = 0
returns = []
reward = reward.tolist()
for r in reward[::-1]:
R = r + gamma * R
returns.append(R)
returns = torch.tensor(returns[::-1])
return returns
Here is a figure showing the progression of the algorithm's score over the first 5000 steps.
I'm using "summary" from torchsummary, but some of the layers and parameters for the deep learning model are missing in the outcome of the print.
Here is the code:
from torchvision import models
from torchsummary import summary
import torch
import torch.nn as nn
device1 = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device1)
Params = {}
Params['Basic_filters_num'] = 32
Params['Levels'] = 6
Params['Duplication_of_filters'] = 2
Params['Blocks_in_level'] = 2
Params['Skip'] = True
Params['Act'] = "ReLU"
Params['Last_layer_act'] = "ReLU"
Params['Kernel_size_encoder'] = (8,8)
Params['Kernel_size_decoder'] = (8,8)
Params['Kernel_size_deconvlayer'] = (2,2)
Params['padding_conv_layer'] = 'same'
Params['Norm_layer'] = True
Params['Norm_layer_kind'] = "Batch"
Params['Pool_kind'] = "Max"
Params['Pool_size'] = (2,2)
Params['Pool_stride'] = (2,2)
Params['Stride_size_encoder'] = (1,1)
Params['Stride_size_decoder'] = (1,1)
Params['Dropout_encoder'] = True
Params['Droput_decoder'] = True
Params['Droput'] = 0.5
Params['Basic_CH'] = 1
def BringAct(ACT):
if ACT == "ReLU":
Act = nn.ReLU()
return Act
def BringNorm(NORM_KIND):
if NORM_KIND == "Batch":
NORM = nn.BatchNorm2d
return NORM
def BringPool(POOL_KIND,POOL_SIZE,STRIDE_SIZE):
if POOL_KIND == "Max":
pool = nn.MaxPool2d(POOL_SIZE,STRIDE_SIZE)
return pool
class Concatenate(nn.Module):
def __init__(self):
super(Concatenate,self).__init__()
def forward(self,data1,data2):
return torch.cat((data1, data2),1)
class BASICCONVORDECONVLAYER(nn.Module):
def __init__(self,C_IN,C_OUT,Params,DEOREN,CONV):
super(BASICCONVORDECONVLAYER,self).__init__()
self.isconv = CONV
self.act = BringAct(Params['Act'])
self.NORM = Params['Norm_layer']
self.NORMLayer = BringNorm(Params['Norm_layer_kind'])
self.NORMLayer = self.NORMLayer(int(C_OUT))
if DEOREN:
str1 = 'encoder'
else:
str1 = 'decoder'
KERNEL_SIZE = Params['Kernel_size_'+str1]
STRIDE_SIZE = Params['Stride_size_'+str1]
self.conv = nn.Conv2d(in_channels=int(C_IN), out_channels = int(C_OUT), kernel_size = KERNEL_SIZE, padding = Params['padding_conv_layer'],stride = STRIDE_SIZE)
self.deconv = nn.ConvTranspose2d(in_channels=int(C_IN), out_channels = int(C_OUT), kernel_size = Params['Kernel_size_deconvlayer'],stride = Params['Kernel_size_deconvlayer'])
self.C_IN = C_IN
self.C_OUT = C_OUT
def forward(self,x):
if self.isconv:
out = self.conv(x)
else:
out = self.deconv(x)
out = self.act(out)
if self.NORM:
out = self.NORMLayer(out)
return out
class EncoderBlock(nn.Module):
def __init__(self,number_of_level,Params,W_POOL):
super(EncoderBlock,self).__init__()
self.convs = {}
self.NUMBER_OF_CONV = Params['Blocks_in_level']
self.num_of_lev = number_of_level
if number_of_level == -1:
C_IN = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(Params['Levels']-2))
C_OUT = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(Params['Levels']-1))
if number_of_level == 0:
C_IN = Params['Basic_CH']
C_OUT = Params['Basic_filters_num']
if number_of_level > 0:
C_IN = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(number_of_level-1))
C_OUT = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(number_of_level))
self.convs[0] = BASICCONVORDECONVLAYER(C_IN,C_OUT,Params,True,True)
for i in range(self.NUMBER_OF_CONV-1):
self.convs[i+1] = BASICCONVORDECONVLAYER(C_OUT,C_OUT,Params,True,True)
self.W_POOL = W_POOL
self.Pool = BringPool(Params['Pool_kind'],Params['Pool_size'] ,Params['Pool_stride'])
self.DROPOUT = Params['Dropout_encoder']
if self.DROPOUT:
self.drop = nn.Dropout2d(Params['Droput'])
self.C_IN = C_IN
self.C_OUT = C_OUT
def forward(self,x):
out = self.convs[0](x)
for i in range(self.NUMBER_OF_CONV-1):
out = self.convs[i+1](out)
if self.DROPOUT:
out = self.drop(out)
if self.W_POOL:
filt = torch.clone(out)
out = self.Pool(out)
return out,filt
return out
class DecoderBlock(nn.Module):
def __init__(self,number_of_level,Params):
super(DecoderBlock,self).__init__()
self.convs = {}
if number_of_level == 0:
C_IN = Params['Basic_filters_num']
C_OUT = Params['Basic_CH']
else:
C_OUT = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(number_of_level-1))
C_IN = Params['Basic_filters_num']*(Params['Duplication_of_filters']**(number_of_level))
self.NUMBER_OF_CONV = Params['Blocks_in_level']
self.doconc = Params['Skip']
if self.doconc:
C_TAG = C_OUT
else:
C_TAG = C_IN
self.convs[0] = BASICCONVORDECONVLAYER(C_IN,C_OUT,Params,False,True)
for i in range(self.NUMBER_OF_CONV-1):
self.convs[i+1] = BASICCONVORDECONVLAYER(C_OUT,C_OUT,Params,False,True)
self.deconv = BASICCONVORDECONVLAYER(C_IN,C_TAG,Params,False,False)
self.concat = Concatenate()
self.DROPOUT = Params['Droput_decoder']
if self.DROPOUT:
self.drop = nn.Dropout2d(Params['Droput'])
self.C_IN = C_IN
self.C_OUT = C_OUT
self.C_TAG = C_TAG
def forward(self,x,data):
out = self.deconv(x)
if self.doconc:
out = self.concat(out,data)
for i in range(self.NUMBER_OF_CONV):
out = self.convs[i](out)
if self.DROPOUT:
out = self.drop(out)
return out
class Encoder(nn.Module):
def __init__(self,Params):
super(Encoder,self).__init__()
self.EncoderBlocks = {}
self.NUM_OF_LEVELS = Params['Levels']
self.filts = {}
for i in range(self.NUM_OF_LEVELS-1):
self.EncoderBlocks[i] = EncoderBlock(i,Params,True)
def Filts(self):
return self.filts
def forward(self,x):
out,filt = self.EncoderBlocks[0](x)
self.filts[0] = filt
for i in range(self.NUM_OF_LEVELS-2):
out,filt = self.EncoderBlocks[i+1](out)
self.filts[i+1] = filt
return out
class Decoder(nn.Module):
def __init__(self,Params):
super(Decoder,self).__init__()
self.DecoderBlocks = {}
self.NUM_OF_LEVELS = Params['Levels']
for i in range(self.NUM_OF_LEVELS-1):
self.DecoderBlocks[i] = DecoderBlock(Params['Levels']-i-1,Params)
def forward(self,x,filts):
lenfilts = len(filts)
out = self.DecoderBlocks[0](x,filts[lenfilts-1])
for i in range(self.NUM_OF_LEVELS-2):
out = self.DecoderBlocks[i+1](out,filts[lenfilts-2-i])
return out
class Bottleneck(nn.Module):
def __init__(self,Params):
super(Bottleneck,self).__init__()
self.convlayer = EncoderBlock(-1,Params,False)
def forward(self,x):
out = self.convlayer(x)
return out
class Unet(nn.Module):
def __init__(self,Params):
super(Unet,self).__init__()
self.Params = Params
self.encoder = Encoder(self.Params)
self.bottleneck = Bottleneck(self.Params)
self.decoder = Decoder(self.Params)
self.finallayer = nn.Conv2d(in_channels = Params['Basic_filters_num'], out_channels = Params['Basic_CH'], kernel_size = (1,1), padding = 'same',stride = (1,1))
self.finalact = BringAct(Params['Last_layer_act'])
def forward(self,x):
out = self.encoder(x)
out = self.bottleneck(out)
out = self.decoder(out,self.encoder.Filts())
out = self.finallayer(out)
out = self.finalact(out)
return out
MusicNet = Unet(Params).cuda()
summary(Unet(Params), ( 1, 64, 64),device = 'cpu')
The outcome is this:
Encoder-1 \[-1, 512, 2, 2\] 0
Dropout2d-2 \[-1, 1024, 2, 2\] 0
EncoderBlock-3 \[-1, 1024, 2, 2\] 0
Bottleneck-4 \[-1, 1024, 2, 2\] 0
Decoder-5 \[-1, 32, 64, 64\] 0
Conv2d-6 \[-1, 1, 64, 64\] 33
ReLU-7 \[-1, 1, 64, 64\] 0
Right now, it looks like the layers that are in the dictionaries don't print out as layers, but they are affect the shape. What did I do worng?
I have been trying to replicate the alex graves handwriting synthesis model, and I did this with tensorflow, and python on a 1080Ti GPU with cuda,
I exactly replicated all of the features explained in the paper and even clipped the respective gradient values in place, but I have real difficulty training it.
I also preproccessed the data in the way explained in the paper, including normalizing the X and y offsets, but the problem is that the training usually can't lower the negative log likelihood more than 1000 which in the paper it reaches -1000, and after that i see NaN weights.
The only extra thing I did was to add 0.0000001 to the conditional probability of every stroke to prevent NaN values in log likelihood.
Any tips or suggestions or experience with such a task?
this is the cell code I use,
class Custom_Cell(RNNCell):
def __init__(self,forget_bias,bias,one_hot_vector, hidden_layer_nums=[700,700,700], mixture_num=10, attention_num=4):
self.bias = bias
self.lstms = []
for i in hidden_layer_nums:
self.lstms.append(LSTMCell(num_units=i, initializer=tf.truncated_normal_initializer(0.075), dtype=tf.float32, forget_bias=forget_bias))
self.attention_num = attention_num
self.mixture_num = mixture_num
self.state_size = 2*sum(hidden_layer_nums) + 3*self.attention_num
self.attention_var_num = 3*self.attention_num
self.output_size = 6*self.mixture_num + 1 + 1
self.one_hot_vector = one_hot_vector
self.lstm_num = len(hidden_layer_nums)
self.hidden_layer_nums = hidden_layer_nums
temp_shape = self.one_hot_vector.shape
self.char_num = temp_shape[2]
self.i_to_h = []
self.w_to_h = []
self.h_to_h = []
self.prev_h_to_h = []
self.lstm_bias = []
self.lstm_to_attention_weights = tf.get_variable("lstms/first_to_attention_mtrx",shape=[hidden_layer_nums[0],self.attention_var_num],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
self.lstm_to_attention_bias = tf.get_variable("lstms/first_to_attention_bias",shape=[self.attention_var_num],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
self.all_to_output_mtrx = []
for i in range(self.lstm_num):
self.all_to_output_mtrx.append( tf.get_variable("lstms/to_output_mtrx_" + str(i), shape=[hidden_layer_nums[i],self.output_size-1],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.all_to_output_bias = tf.get_variable("lstms/output_bias",shape=[self.output_size-1],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
for i in range(self.lstm_num):
self.i_to_h.append(tf.get_variable("lstms/i_to_h_"+str(i),shape=[3,hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.w_to_h.append(tf.get_variable("lstms/w_to_h_"+str(i),shape=[self.char_num,hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.h_to_h.append(tf.get_variable("lstms/h_to_h_"+str(i),shape=[hidden_layer_nums[i],hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.lstm_bias.append(tf.get_variable("lstms/bias_" + str(i),shape=[hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
if not i == 0:
self.prev_h_to_h.append(
tf.get_variable("lstms/prev_h_to_h_" + str(i), shape=[hidden_layer_nums[i-1], hidden_layer_nums[i]],
dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.075),
trainable=True))
def __call__(self, inputs, state, scope=None):
# Extracting previous configuration and vectors
splitarray = []
for i in self.hidden_layer_nums:
splitarray.append(i)
splitarray.append(i)
splitarray.append(3*self.attention_num)
splitted = tf.split(state,splitarray,axis=1)
prev_tuples = []
for i in range(self.lstm_num):
newtuple = LSTMStateTuple(splitted[2*i],splitted[2*i + 1])
prev_tuples.append(newtuple)
prev_attention_vec = splitted[2*self.lstm_num]
new_attention_vec = 0
next_states = []
most_attended = 0
last_output = 0
for i in range(self.lstm_num):
prev_c, prev_h = prev_tuples[i]
cell = self.lstms[i]
if i == 0:
with tf.name_scope("layer_1"):
w, most_attended = self.gaussian_attention(self.one_hot_vector,prev_attention_vec)
input_vec = tf.matmul(inputs,self.i_to_h[0]) + tf.matmul(prev_h,self.h_to_h[0]) + tf.matmul(w,self.w_to_h[0]) + self.lstm_bias[0]
_, new_state = cell(input_vec, prev_tuples[0])
new_c, new_h = new_state
next_states.append(new_c)
next_states.append(new_h)
last_output = tf.matmul(new_h,self.all_to_output_mtrx[0])
with tf.name_scope("attention_layer"):
temp_attention = tf.matmul(new_h,self.lstm_to_attention_weights) + self.lstm_to_attention_bias
new_alpha, new_beta, new_kappa = tf.split(temp_attention,[self.attention_num,self.attention_num,self.attention_num],axis=1)
old_alpha, old_beta, old_kappa = tf.split(prev_attention_vec,[self.attention_num,self.attention_num,self.attention_num], axis=1)
new_alpha = tf.exp(new_alpha)
new_beta = tf.exp(new_beta)
new_kappa = tf.exp(new_kappa) + old_kappa
new_attention_vec = tf.concat([new_alpha,new_beta,new_kappa],axis=1)
else:
with tf.name_scope("layer_" + str(i)):
w, most_attended = self.gaussian_attention(self.one_hot_vector,new_attention_vec)
input_vec = tf.matmul(inputs,self.i_to_h[i]) + tf.matmul(next_states[-1],self.prev_h_to_h[i-1]) + tf.matmul(prev_h,self.h_to_h[i]) + tf.matmul(w,self.w_to_h[i]) + self.lstm_bias[i]
_,new_state = cell(input_vec,prev_tuples[i])
new_c, new_h = new_state
next_states.append(new_c)
next_states.append(new_h)
last_output = last_output + tf.matmul(new_h, self.all_to_output_mtrx[i])
with tf.name_scope("output"):
last_output = last_output + self.all_to_output_bias
next_states.append(new_attention_vec)
state_to_return = tf.concat(next_states,axis=1)
output_split_param = [1,self.mixture_num,2*self.mixture_num,2*self.mixture_num,self.mixture_num]
binomial_param, pi, mu, sigma, rho = tf.split(last_output,output_split_param,axis=1)
binomial_param = tf.divide(1.,1.+tf.exp(binomial_param))
pi = tf.nn.softmax(tf.multiply(pi,1.+self.bias),axis=1)
mu = mu
sigma = tf.exp(sigma-self.bias)
rho = tf.tanh(rho)
output_to_return = tf.concat([most_attended, binomial_param, pi, mu, sigma, rho],axis=1)
return output_to_return, state_to_return
def state_size(self):
return self.state_size
def output_size(self):
return self.output_size
def gaussian_attention(self,sequence,params):
with tf.name_scope("attention_calculation"):
alpha, beta, kappa = tf.split(params,[self.attention_num,self.attention_num,self.attention_num],axis=1)
seq_shape = sequence.shape
seq_length = seq_shape[1]
temp_vec = 20*np.asarray(range(seq_length),dtype=float)
final_result = 0
alpha = tf.split(alpha,self.attention_num,1)
beta = tf.split(beta,self.attention_num,1)
kappa = tf.split(kappa,self.attention_num,1)
for i in range(self.attention_num):
alpha_now = alpha[i]
beta_now = beta[i]
kappa_now = kappa[i]
result = kappa_now - temp_vec
result = tf.multiply(tf.square(result),tf.negative(beta_now))
result = tf.multiply(tf.exp(result),alpha_now)
final_result = final_result+result
most_attended = tf.argmax(final_result,axis=1)
most_attended = tf.reshape(tf.cast(most_attended,dtype=tf.float32),shape=[-1,1])
final_result = tf.tile(tf.reshape(final_result,[-1,seq_shape[1],1]),[1,1,seq_shape[2]])
to_return = tf.reduce_sum(tf.multiply(final_result,sequence),axis=1)
return to_return, most_attended
and this is the rnn with loss network:
`to_write_one_hot = tf.placeholder(dtype=tf.float32,shape=(None,line_length,dict_length))
sequence = tf.placeholder(dtype=tf.float32,shape=(None,None,3))
sequence_shift = tf.placeholder(dtype=tf.float32,shape=(None,None,3))
bias = tf.placeholder(shape=[1],dtype=tf.float32)
sequence_length = tf.placeholder(shape=(None),dtype=tf.int32)
forget_bias_placeholder = tf.placeholder(shape=(None),dtype=tf.float32)
graves_cell = Custom_Cell(forget_bias=1,one_hot_vector=to_write_one_hot,hidden_layer_nums=hidden_layer_nums,mixture_num=mixture_num,bias=bias,attention_num=attention_num)
output, state = tf.nn.dynamic_rnn(graves_cell,sequence,dtype=tf.float32,sequence_length=sequence_length)
with tf.name_scope("loss_layer"):
mask = tf.sign(tf.reduce_max(tf.abs(output), 2))
most_attended, binomial_param, pi, mu, sigma, rho = tf.split(output,[1,1,mixture_num,2*mixture_num,2*mixture_num,mixture_num], axis=2)
pi = tf.split(pi,mixture_num,axis=2)
mu = tf.split(mu,mixture_num,axis=2)
sigma = tf.split(sigma,mixture_num,axis=2)
rho = tf.split(rho,mixture_num,axis=2)
negative_log_likelihood = 0
probability = 0
x1, x2, e = tf.split(sequence_shift,3,axis=2)
for i in range(mixture_num):
pi_now = pi[i]
mu_now = tf.split(mu[i],2,axis=2)
mu_1 = mu_now[0]
mu_2 = mu_now[1]
sigma_now = tf.split(sigma[i],2,axis=2)
sigma_1 = sigma_now[0] + (1-tf.reshape(mask, [-1,max_len,1]))
sigma_2 = sigma_now[1] + (1-tf.reshape(mask, [-1,max_len,1]))
rho_now = rho[i]
Z = tf.divide(tf.square(x1-mu_1),tf.square(sigma_1)) + tf.divide(tf.square(x2-mu_2),tf.square(sigma_2)) - tf.divide(tf.multiply(tf.multiply(x1-mu_1,x2-mu_2),2*rho_now),tf.multiply(sigma_1,sigma_2))
prob = tf.exp(tf.div(tf.negative(Z),2*(1-tf.square(rho_now))))
Normalizing_factor = 2*np.pi*tf.multiply(sigma_1,sigma_2)
Normalizing_factor = tf.multiply(Normalizing_factor,tf.sqrt(1-tf.square(rho_now)))
prob = tf.divide(prob,Normalizing_factor)
prob = tf.multiply(pi_now,prob)
probability = probability + prob
binomial_likelihood = tf.multiply(binomial_param,e) + tf.multiply(1-binomial_param,1-e)
probability = tf.multiply(probability,binomial_likelihood)
probability = probability + (1-tf.reshape(mask,[-1,max_len,1]))
temp_tensor = tf.multiply(mask, tf.log(tf.reshape(probability,[-1,max_len]) + mask*0.00001))
negative_log_likelihood_0 = tf.negative(tf.reduce_sum(temp_tensor,axis=1))
negative_log_likelihood_1 = tf.divide(negative_log_likelihood_0,tf.reshape(tf.cast(sequence_length, dtype=tf.float32), shape=[-1,1]))
negative_log_likelihood_1 = tf.reduce_mean(negative_log_likelihood_1)
tf.summary.scalar("average_per_timestamp_log_likelihood", negative_log_likelihood_1)
negative_log_likelihood = tf.reduce_mean(negative_log_likelihood_0)
with tf.name_scope("train_op"):
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001,momentum=0.9, decay=0.95,epsilon=0.0001)
gvs = optimizer.compute_gradients(negative_log_likelihood)
capped_gvs = []
for grad, var in gvs:
if var.name.__contains__("rnn"):
capped_gvs.append((tf.clip_by_value(grad,-10,10),var))
else:
capped_gvs.append((tf.clip_by_value(grad,-100,100),var))
train_op = optimizer.apply_gradients(capped_gvs)
`
Edit.1. I discovered that I was clipping gradients in a wrong way, the correct way was to introduce a new 'op' as explained by https://github.com/tensorflow/tensorflow/issues/2793 to clip only the output gradients of the whole network and lstm cells.
#tf.custom_gradient
def clip_gradient(x, clip):
def grad(dresult):
return [tf.clip_by_norm(dresult, clip)]
return x, grad
add the lines above to your code and use the function on any variable you want to clip the gradient in back propagation!
I should still see my results.
Edit 2.
The changed Model code is:
from tensorflow.contrib.rnn import RNNCell
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.rnn import LSTMStateTuple
import tensorflow as tf
import numpy as np
#tf.custom_gradient
def clip_gradient_lstm(x):
def grad(dresult):
return [tf.clip_by_value(dresult,-10,10)]
return x, grad
#tf.custom_gradient
def clip_gradient_output(x):
def grad(dresult):
return [tf.clip_by_value(dresult,-100,100)]
return x, grad
def length_of(seq):
used = tf.sign(tf.reduce_max(tf.abs(seq),axis=2))
length = tf.reduce_sum(used,1)
length = tf.cast(length,tf.int32)
return length
class Custom_Cell(RNNCell):
def __init__(self,forget_bias,bias,one_hot_vector, hidden_layer_nums=[700,700,700], mixture_num=10, attention_num=4):
self.bias = bias
self.lstms = []
for i in hidden_layer_nums:
self.lstms.append(LSTMCell(num_units=i, initializer=tf.truncated_normal_initializer(0.075), dtype=tf.float32, forget_bias=forget_bias))
self.attention_num = attention_num
self.mixture_num = mixture_num
self.state_size = 2*sum(hidden_layer_nums) + 3*self.attention_num
self.attention_var_num = 3*self.attention_num
self.output_size = 6*self.mixture_num + 1 + 1
self.one_hot_vector = one_hot_vector
self.lstm_num = len(hidden_layer_nums)
self.hidden_layer_nums = hidden_layer_nums
temp_shape = self.one_hot_vector.shape
self.char_num = temp_shape[2]
self.i_to_h = []
self.w_to_h = []
self.h_to_h = []
self.prev_h_to_h = []
self.lstm_bias = []
self.lstm_to_attention_weights = tf.get_variable("lstms/first_to_attention_mtrx",shape=[hidden_layer_nums[0],self.attention_var_num],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
self.lstm_to_attention_bias = tf.get_variable("lstms/first_to_attention_bias",shape=[self.attention_var_num],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
self.all_to_output_mtrx = []
for i in range(self.lstm_num):
self.all_to_output_mtrx.append( tf.get_variable("lstms/to_output_mtrx_" + str(i), shape=[hidden_layer_nums[i],self.output_size-1],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.all_to_output_bias = tf.get_variable("lstms/output_bias",shape=[self.output_size-1],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True)
for i in range(self.lstm_num):
self.i_to_h.append(tf.get_variable("lstms/i_to_h_"+str(i),shape=[3,hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.w_to_h.append(tf.get_variable("lstms/w_to_h_"+str(i),shape=[self.char_num,hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.h_to_h.append(tf.get_variable("lstms/h_to_h_"+str(i),shape=[hidden_layer_nums[i],hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
self.lstm_bias.append(tf.get_variable("lstms/bias_" + str(i),shape=[hidden_layer_nums[i]],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.075),trainable=True))
if not i == 0:
self.prev_h_to_h.append(
tf.get_variable("lstms/prev_h_to_h_" + str(i), shape=[hidden_layer_nums[i-1], hidden_layer_nums[i]],
dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.075),
trainable=True))
def __call__(self, inputs, state, scope=None):
# Extracting previous configuration and vectors
splitarray = []
for i in self.hidden_layer_nums:
splitarray.append(i)
splitarray.append(i)
splitarray.append(3*self.attention_num)
splitted = tf.split(state,splitarray,axis=1)
prev_tuples = []
for i in range(self.lstm_num):
newtuple = LSTMStateTuple(splitted[2*i],splitted[2*i + 1])
prev_tuples.append(newtuple)
prev_attention_vec = splitted[2*self.lstm_num]
new_attention_vec = 0
next_states = []
most_attended = 0
last_output = 0
for i in range(self.lstm_num):
prev_c, prev_h = prev_tuples[i]
cell = self.lstms[i]
if i == 0:
with tf.name_scope("layer_1"):
w, most_attended = self.gaussian_attention(self.one_hot_vector,prev_attention_vec)
input_vec = tf.matmul(inputs,self.i_to_h[0]) + tf.matmul(prev_h,self.h_to_h[0]) + tf.matmul(w,self.w_to_h[0]) + self.lstm_bias[0]
_, new_state = cell(input_vec, prev_tuples[0])
new_c, new_h = new_state
new_h = clip_gradient_lstm(new_h)
next_states.append(new_c)
next_states.append(new_h)
last_output = tf.matmul(new_h,self.all_to_output_mtrx[0])
with tf.name_scope("attention_layer"):
temp_attention = tf.matmul(new_h,self.lstm_to_attention_weights) + self.lstm_to_attention_bias
new_alpha, new_beta, new_kappa = tf.split(temp_attention,[self.attention_num,self.attention_num,self.attention_num],axis=1)
old_alpha, old_beta, old_kappa = tf.split(prev_attention_vec,[self.attention_num,self.attention_num,self.attention_num], axis=1)
new_alpha = tf.exp(new_alpha)
new_beta = tf.exp(new_beta)
new_kappa = tf.exp(new_kappa) + old_kappa
new_attention_vec = tf.concat([new_alpha,new_beta,new_kappa],axis=1)
else:
with tf.name_scope("layer_" + str(i)):
w, most_attended = self.gaussian_attention(self.one_hot_vector,new_attention_vec)
input_vec = tf.matmul(inputs,self.i_to_h[i]) + tf.matmul(next_states[-1],self.prev_h_to_h[i-1]) + tf.matmul(prev_h,self.h_to_h[i]) + tf.matmul(w,self.w_to_h[i]) + self.lstm_bias[i]
_,new_state = cell(input_vec,prev_tuples[i])
new_c, new_h = new_state
new_h = clip_gradient_lstm(new_h)
next_states.append(new_c)
next_states.append(new_h)
last_output = last_output + tf.matmul(new_h, self.all_to_output_mtrx[i])
with tf.name_scope("output"):
last_output = last_output + self.all_to_output_bias
last_output = clip_gradient_output(last_output)
next_states.append(new_attention_vec)
state_to_return = tf.concat(next_states,axis=1)
output_split_param = [1,self.mixture_num,2*self.mixture_num,2*self.mixture_num,self.mixture_num]
binomial_param, pi, mu, sigma, rho = tf.split(last_output,output_split_param,axis=1)
binomial_param = tf.divide(1.,1.+tf.exp(binomial_param))
pi = tf.nn.softmax(tf.multiply(pi,1.+self.bias),axis=1)
mu = mu
sigma = tf.exp(sigma-self.bias)
rho = tf.tanh(rho)
output_to_return = tf.concat([most_attended, binomial_param, pi, mu, sigma, rho],axis=1)
return output_to_return, state_to_return
def state_size(self):
return self.state_size
def output_size(self):
return self.output_size
def gaussian_attention(self,sequence,params):
with tf.name_scope("attention_calculation"):
alpha, beta, kappa = tf.split(params,[self.attention_num,self.attention_num,self.attention_num],axis=1)
seq_shape = sequence.shape
seq_length = seq_shape[1]
temp_vec = np.asarray(range(seq_length),dtype=float)
final_result = 0
alpha = tf.split(alpha,self.attention_num,1)
beta = tf.split(beta,self.attention_num,1)
kappa = tf.split(kappa,self.attention_num,1)
for i in range(self.attention_num):
alpha_now = alpha[i]
beta_now = beta[i]
kappa_now = kappa[i]
result = kappa_now - temp_vec
result = tf.multiply(tf.square(result),tf.negative(beta_now))
result = tf.multiply(tf.exp(result),alpha_now)
final_result = final_result+result
most_attended = tf.argmax(final_result,axis=1)
most_attended = tf.reshape(tf.cast(most_attended,dtype=tf.float32),shape=[-1,1])
final_result = tf.tile(tf.reshape(final_result,[-1,seq_shape[1],1]),[1,1,seq_shape[2]])
to_return = tf.reduce_sum(tf.multiply(final_result,sequence),axis=1)
return to_return, most_attended
and the Training is done by
with tf.name_scope("train_op"):
optimizer =
tf.train.RMSPropOptimizer(learning_rate=0.0001,momentum=0.9, decay=0.95,epsilon=0.0001,centered=True)
train_op = optimizer.minimize(negative_log_likelihood)
and right now is still in training, but it is now as low as -10.
The acc gyro in model.fit is (200 * 3),in the Input layer shape is (200 * 3). Why is there such a problem? Error when checking input: expected acc_input to have 3 dimensions, but got array with shape (200, 3).This is a visualization of my model.
Here's my code:
WIDE = 20
FEATURE_DIM = 30
CHANNEL = 1
CONV_NUM = 64
CONV_LEN = 3
CONV_LEN_INTE = 3#4
CONV_LEN_LAST = 3#5
CONV_NUM2 = 64
CONV_MERGE_LEN = 8
CONV_MERGE_LEN2 = 6
CONV_MERGE_LEN3 = 4
rnn_size=128
acc_input_tensor = Input(shape=(200,3),name = 'acc_input')
gyro_input_tensor = Input(shape=(200,3),name= 'gyro_input')
Acc_input_tensor = Reshape(target_shape=(20,30,1))(acc_input_tensor)
Gyro_input_tensor = Reshape(target_shape=(20,30,1))(gyro_input_tensor)
acc_conv1 = Conv2D(CONV_NUM,(1, 1*3*CONV_LEN),strides= (1,1*3),padding='valid',activation=None)(Acc_input_tensor)
acc_conv1 = BatchNormalization(axis=1)(acc_conv1)
acc_conv1 = Activation('relu')(acc_conv1)
acc_conv1 = Dropout(0.2)(acc_conv1)
acc_conv2 = Conv2D(CONV_NUM,(1,CONV_LEN_INTE),strides= (1,1),padding='valid',activation=None)(acc_conv1)
acc_conv2 = BatchNormalization(axis=1)(acc_conv2)
acc_conv2 = Activation('relu')(acc_conv2)
acc_conv2 = Dropout(0.2)(acc_conv2)
acc_conv3 = Conv2D(CONV_NUM,(1,CONV_LEN_LAST),strides=(1,1),padding='valid',activation=None)(acc_conv2)
acc_conv3 = BatchNormalization(axis=1)(acc_conv3)
acc_conv3 = Activation('relu')(acc_conv3)
acc_conv3 = Dropout(0.2)(acc_conv3)
gyro_conv1 = Conv2D(CONV_NUM,(1, 1*3*CONV_LEN),strides=(1,1*3),padding='valid',activation=None)(Gyro_input_tensor)
gyro_conv1 = BatchNormalization(axis=1)(gyro_conv1)
gyro_conv1 = Activation('relu')(gyro_conv1)
gyro_conv1 = Dropout(0.2)(gyro_conv1)
gyro_conv2 = Conv2D(CONV_NUM,(1, CONV_LEN_INTE),strides=(1,1),padding='valid',activation=None)(gyro_conv1)
gyro_conv2 = BatchNormalization(axis=1)(gyro_conv2)
gyro_conv2 = Activation('relu')(gyro_conv2)
gyro_conv2 = Dropout(0.2)(gyro_conv2)
gyro_conv3 = Conv2D(CONV_NUM,(1, CONV_LEN_LAST),strides=(1,1),padding='valid',activation=None)(gyro_conv2)
gyro_conv3 = BatchNormalization(axis=1)(gyro_conv3)
gyro_conv3 = Activation('relu')(gyro_conv3)
gyro_conv3 = Dropout(0.2)(gyro_conv3)
sensor_conv_in = concatenate([acc_conv3, gyro_conv3], 2)
sensor_conv_in = Dropout(0.2)(sensor_conv_in)
sensor_conv1 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN),padding='SAME')(sensor_conv_in)
sensor_conv1 = BatchNormalization(axis=1)(sensor_conv1)
sensor_conv1 = Activation('relu')(sensor_conv1)
sensor_conv1 = Dropout(0.2)(sensor_conv1)
sensor_conv2 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN2),padding='SAME')(sensor_conv1)
sensor_conv2 = BatchNormalization(axis=1)(sensor_conv2)
sensor_conv2 = Activation('relu')(sensor_conv2)
sensor_conv2 = Dropout(0.2)(sensor_conv2)
sensor_conv3 = Conv2D(CONV_NUM2,kernel_size=(2, CONV_MERGE_LEN3),padding='SAME')(sensor_conv2)
sensor_conv3 = BatchNormalization(axis=1)(sensor_conv3)
sensor_conv3 = Activation('relu')(sensor_conv3)
conv_shape = sensor_conv3.get_shape()
print conv_shape
x1 = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2]*conv_shape[3])))(sensor_conv3)
x1 = Dense(64, activation='relu')(x1)
gru_1 = GRU(rnn_size, return_sequences=True, init='he_normal', name='gru1')(x1)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, init='he_normal', name='gru1_b')(x1)
gru1_merged = merge([gru_1, gru_1b], mode='sum')
gru_2 = GRU(rnn_size, return_sequences=True, init='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, init='he_normal', name='gru2_b')(gru1_merged)
x = merge([gru_2, gru_2b], mode='concat')
x = Dropout(0.25)(x)
n_class=2
x = Dense(n_class)(x)
model = Model(input=[acc_input_tensor,gyro_input_tensor], output=x)
model.compile(loss='mean_squared_error',optimizer='adam')
model.fit(inputs=[acc,gyro],outputs=labels,batch_size=1, validation_split=0.2, epochs=2,verbose=1 ,
shuffle=False)
The acc gyro in model.fit is (200 * 3),in the Input layer shape is (200 * 3). Why is there such a problem? Error when checking input: expected acc_input to have 3 dimensions, but got array with shape (200, 3)
Shape (None, 200, 3) is used in Keras for batches, None means batch_size, because in the time of creating or reshaping input arrays, the batch size might be unknown, so if you will be using batch_size = 128 your batch input matrix will have shape (128, 200, 3)