Batch Normalization Inverse Computation using Pytorch - deep-learning

I have implemented a BatchNorm class for computing both BatchNormalization and its inverse, when I test it with tensors with 1 batch, it works properly, but when I test it for multi-batch tensors it doesn't work right.
Code:
class BatchNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.zeros(1, dim), requires_grad=True)
self.beta = nn.Parameter(torch.zeros(1, dim), requires_grad=True)
self.batch_mean = None
self.batch_var = None
def forward(self, x, reverse=False):
B, C, W, H = x.shape
if(reverse == True):
return self.reverse(x)
if self.training:
if(B>1):
m = x.mean(dim=0)
v = x.var(dim=0) + self.eps # torch.mean((x - m) ** 2, axis=0) + self.eps
else:
m = torch.zeros(C, W, H)
v = torch.zeros(C, W, H) + self.eps
self.batch_mean = None
else:
if self.batch_mean is None:
self.set_batch_stats_func(x)
m = self.batch_mean.clone()
v = self.batch_var.clone()
B, C, W, H = x.shape
gamma = self.gamma.unsqueeze(2).unsqueeze(3)
gamma = torch.repeat_interleave(gamma, H, dim=2)
gamma = torch.repeat_interleave(gamma, W, dim=3)
beta = self.beta.unsqueeze(2).unsqueeze(3)
beta = torch.repeat_interleave(beta, H, dim=2)
beta = torch.repeat_interleave(beta, W, dim=3)
#print('x_hat:', x_hat)
x_hat = (x - m) / torch.sqrt(v)
x_hat = x_hat * torch.exp(gamma) + beta
x_2 = (x_hat - beta) * torch.exp(-gamma) * torch.sqrt(v) + m
#print('forward: dist:', torch.dist(x, x_2))
#print('forward: x:', x[0,0,:3,:3])
#print('forward: x_2:', x_2[0,0,:3,:3])
#print('forward: x_hat:', x_hat[0,0,:3,:3])
log_det = torch.sum(gamma - 0.5 * torch.log(v))
return x_hat, log_det
def reverse(self, x):
B, C, W, H = x.shape
if self.training:
if(B>1):
m = x.mean(dim=0)
v = x.var(dim=0) + self.eps # torch.mean((x - m) ** 2, axis=0) + self.eps
else:
m = torch.zeros(C, W, H)
v = torch.zeros(C, W, H) + self.eps
self.batch_mean = None
else:
if self.batch_mean is None:
self.set_batch_stats_func(x)
m = self.batch_mean
v = self.batch_var
B, C, W, H = x.shape
gamma = self.gamma.unsqueeze(2).unsqueeze(3)
gamma = torch.repeat_interleave(gamma, H, dim=2)
gamma = torch.repeat_interleave(gamma, W, dim=3)
beta = self.beta.unsqueeze(2).unsqueeze(3)
beta = torch.repeat_interleave(beta, H, dim=2)
beta = torch.repeat_interleave(beta, W, dim=3)
x_hat = (x - beta) * torch.exp(-gamma) * torch.sqrt(v) + m
#print('reverse: dist:', torch.dist(x, x_hat))
#print('reverse: x:', x[0,0,:3,:3])
#print('reverse: x_hat:', x_hat[0,0,:3,:3])
log_det = torch.sum(-gamma + 0.5 * torch.log(v))
return x_hat, log_det
def set_batch_stats_func(self, x):
print("setting batch stats for validation")
self.batch_mean = x.mean(dim=0)
self.batch_var = x.var(dim=0) + self.eps
Test on one batch tensor:
x = torch.rand(1,10,100,100)
Batch = BatchNorm(10)
x1,_ = Batch(x, False)
x2,_ = Batch(x1, True)
torch.dist(x,x2)
and the output is about zero, it means that both forward and backward paths are working properly, but for multi-batch tensors:
x = torch.rand(3,10,100,100)
Batch = BatchNorm(10)
x1,_ = Batch(x, False)
x2,_ = Batch(x1, True)
torch.dist(x,x2)
In this case, the result(difference between input and reconstructed input) is a huge number. However, it must be near zero.

Related

How i can use dqn and ddpg to successfully train an agent excellent in customized environment?

I'm new in AI, and i want to get in the field, i have spent some time finishing a program to train an agent for a simple customized environment, but when i perform the training in colab for 10000 episodes, it still can not get well performance. I guess whether there is something wrong with the customized env or there is something wrong with the training process.
Env: a helicopter tries to get throw the continous flow of birds (max num: 10), the birds moves from the right to the left, and there is fuel randomly. If the helicopter is still alive, i.e., it has not collided with a bird and still has fuel (initialized by 1000, when it collides with the fuel icon (max num: 2), fuel_left will be reset to 1000), its rewards plus 1.
the environment is shown in the figure:
after 10000 episode in ddpg/dqn, the agent still can not play more than 15 seconds, could you point out where the problem is?
Action space(1 dim): 0, 1, 2, 3, 4 -> helicopter moves up, down, left, right and keep static.
State space(28 dim): (x,y) for 10 birds, 2 fuel, and 1 helicopter. Besides, there is fuel left and rewards obtained.
Rewards: If the helicopter is alive, rewards plus 1.
the env settings code is as follwos (custom.py):
import numpy as np
import cv2
import matplotlib.pyplot as plt
import random
import math
import time
from gym import Env, spaces
import time
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
class ChopperScape(Env):
def __init__(self):
super(ChopperScape,self).__init__()
self.maxbirdnum = 10
self.maxfuelnum = 2
self.observation_shape = (28,)
self.canvas_shape = (600,800,3)
self.action_space = spaces.Discrete(5,)
self.last_action = 0
self.obs = np.zeros(self.observation_shape)
self.canvas = np.ones(self.canvas_shape) * 1
self.elements = []
self.maxfuel = 1000
self.y_min = int (self.canvas_shape[0] * 0.1)
self.x_min = 0
self.y_max = int (self.canvas_shape[0] * 0.9)
self.x_max = self.canvas_shape[1]
def draw_elements_on_canvas(self):
self.canvas = np.ones(self.canvas_shape) * 1
for elem in self.elements:
elem_shape = elem.icon.shape
x,y = elem.x, elem.y
self.canvas[y : y + elem_shape[1], x:x + elem_shape[0]] = elem.icon
text = 'Fuel Left: {} | Rewards: {}'.format(self.fuel_left, self.ep_return)
self.canvas = cv2.putText(self.canvas, text, (10,20), font, 0.8, (0,0,0), 1, cv2.LINE_AA)
def reset(self):
self.fuel_left = self.maxfuel
self.ep_return = 0
self.obs = np.zeros(self.observation_shape)
self.obs[26] = self.maxfuel
self.bird_count = 0
self.fuel_count = 0
x = random.randrange(int(self.canvas_shape[0] * 0.05), int(self.canvas_shape[0] * 0.90))
y = random.randrange(int(self.canvas_shape[1] * 0.05), int(self.canvas_shape[1] * 0.90))
self.chopper = Chopper("chopper", self.x_max, self.x_min, self.y_max, self.y_min)
self.chopper.set_position(x,y)
self.obs[24] = x
self.obs[25] = y
self.elements = [self.chopper]
self.canvas = np.ones(self.canvas_shape) * 1
self.draw_elements_on_canvas()
return self.obs
def get_action_meanings(self):
return {0: "Right", 1: "Left", 2: "Down", 3: "Up", 4: "Do Nothing"}
def has_collided(self, elem1, elem2):
x_col = False
y_col = False
elem1_x, elem1_y = elem1.get_position()
elem2_x, elem2_y = elem2.get_position()
if 2 * abs(elem1_x - elem2_x) <= (elem1.icon_w + elem2.icon_w):
x_col = True
if 2 * abs(elem1_y - elem2_y) <= (elem1.icon_h + elem2.icon_h):
y_col = True
if x_col and y_col:
return True
return False
def step(self, action):
done = False
reward = 1
assert self.action_space.contains(action), "invalid action"
if action == 4:
self.chopper.move(0,5)
elif action == 1:
self.chopper.move(0,-5)
elif action == 2:
self.chopper.move(5,0)
elif action == 0:
self.chopper.move(-5,0)
elif action == 3:
self.chopper.move(0,0)
if random.random() < 0.1 and self.bird_count<self.maxbirdnum:
spawned_bird = Bird("bird_{}".format(self.bird_count), self.x_max, self.x_min, self.y_max, self.y_min)
self.bird_count += 1
bird_y = random.randrange(self.y_min, self.y_max)
spawned_bird.set_position(self.x_max, bird_y)
self.elements.append(spawned_bird)
if random.random() < 0.05 and self.fuel_count<self.maxfuelnum:
spawned_fuel = Fuel("fuel_{}".format(self.bird_count), self.x_max, self.x_min, self.y_max, self.y_min)
self.fuel_count += 1
fuel_x = random.randrange(self.x_min, self.x_max)
fuel_y = self.y_max
spawned_fuel.set_position(fuel_x, fuel_y)
self.elements.append(spawned_fuel)
for elem in self.elements:
if isinstance(elem, Bird):
if elem.get_position()[0] <= self.x_min:
self.elements.remove(elem)
self.bird_count -= 1
else:
elem.move(-5,0)
if self.has_collided(self.chopper, elem):
done = True
reward = -100000.0*(1.0/self.ep_return+1)
if isinstance(elem, Fuel):
flag1 = False
flag2 = False
if self.has_collided(self.chopper, elem):
self.fuel_left = self.maxfuel
flag1 = True
reward += 2
# time.sleep(0.5)
if elem.get_position()[1] <= self.y_min:
flag2 = True
self.fuel_count -= 1
else:
elem.move(0, -5)
if flag1 == True or flag2 == True:
self.elements.remove(elem)
self.fuel_left -= 1
if self.fuel_left == 0:
done = True
self.draw_elements_on_canvas()
self.ep_return += 1
birdnum = 0
fuelnum = 0
x_, y_ = self.chopper.get_position()
dis = 0.0
for elem in self.elements:
x,y = elem.get_position()
if isinstance(elem,Bird):
self.obs[2*birdnum] = x
self.obs[2*birdnum+1] = y
birdnum += 1
dis += math.hypot(x_-x,y_-y)
if isinstance(elem,Fuel):
base = self.maxbirdnum*2
self.obs[base+2*fuelnum] = x
self.obs[base+2*fuelnum+1] = y
fuelnum += 1
self.obs[24] = x_
self.obs[25] = y_
self.obs[26] = self.fuel_left
self.obs[27] = self.ep_return
if x_ == self.x_min or x_ == self.x_max or y_ == self.y_max or y_ == self.y_min:
reward -= random.random()
for i in range(26):
if i%2 == 0:
self.obs[i]/=800.0
else:
self.obs[i]/=600.0
self.obs[26]/=1000.0
self.obs[27]/=100.0
# print('reward:',reward)
# if done == True:
# time.sleep(1)
return self.obs, reward, done, {}
def render(self, mode = "human"):
assert mode in ["human", "rgb_array"], "Invalid mode, must be either \"human\" or \"rgb_array\""
if mode == "human":
cv2.imshow("Game", self.canvas)
cv2.waitKey(10)
elif mode == "rgb_array":
return self.canvas
def close(self):
cv2.destroyAllWindows()
class Point(object):
def __init__(self, name, x_max, x_min, y_max, y_min):
self.x = 0
self.y = 0
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.name = name
def set_position(self, x, y):
self.x = self.clamp(x, self.x_min, self.x_max - self.icon_w)
self.y = self.clamp(y, self.y_min, self.y_max - self.icon_h)
def get_position(self):
return (self.x, self.y)
def move(self, del_x, del_y):
self.x += del_x
self.y += del_y
self.x = self.clamp(self.x, self.x_min, self.x_max - self.icon_w)
self.y = self.clamp(self.y, self.y_min, self.y_max - self.icon_h)
def clamp(self, n, minn, maxn):
return max(min(maxn, n), minn)
class Chopper(Point):
def __init__(self, name, x_max, x_min, y_max, y_min):
super(Chopper, self).__init__(name, x_max, x_min, y_max, y_min)
self.icon = cv2.imread("chopper1.jpg") / 255.0
self.icon_w = 64
self.icon_h = 64
self.icon = cv2.resize(self.icon, (self.icon_h, self.icon_w))
class Bird(Point):
def __init__(self, name, x_max, x_min, y_max, y_min):
super(Bird, self).__init__(name, x_max, x_min, y_max, y_min)
self.icon = cv2.imread("bird1.jpg") / 255.0
self.icon_w = 32
self.icon_h = 32
self.icon = cv2.resize(self.icon, (self.icon_h, self.icon_w))
class Fuel(Point):
def __init__(self, name, x_max, x_min, y_max, y_min):
super(Fuel, self).__init__(name, x_max, x_min, y_max, y_min)
self.icon = cv2.imread("fuel1.jpg") / 255.0
self.icon_w = 32
self.icon_h = 32
self.icon = cv2.resize(self.icon, (self.icon_h, self.icon_w))
if __name__ == '__main__':
from IPython import display
env = ChopperScape()
obs = env.reset()
while True:
# random agent
action = random.randrange(-1,1)
obs, reward, done, info = env.step(action)
# Render the game
env.render()
if done == True:
break
env.close()
the ddpg algorithm to train the agent is as follows (ddpg.py):
from custom import ChopperScape
import random
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
#袅参数
lr_mu = 0.005
lr_q = 0.01
gamma = 0.99
batch_size = 32
buffer_limit = 50000
tau = 0.005 # for target network soft update
class ReplayBuffer():
def __init__(self):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask = 0.0 if done else 1.0
done_mask_lst.append(done_mask)
return torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst, dtype=torch.float), \
torch.tensor(r_lst, dtype=torch.float), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_mask_lst, dtype=torch.float)
def size(self):
return len(self.buffer)
class MuNet(nn.Module):
def __init__(self):
super(MuNet, self).__init__()
self.fc1 = nn.Linear(28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc_mu = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
mu = torch.tanh(self.fc_mu(x))
return mu
class QNet(nn.Module):
def __init__(self):
super(QNet, self).__init__()
self.fc_s = nn.Linear(28, 64)
self.fc_a = nn.Linear(1,64)
self.fc_q = nn.Linear(128, 32)
self.fc_out = nn.Linear(32,1)
def forward(self, x, a):
h1 = F.relu(self.fc_s(x))
h2 = F.relu(self.fc_a(a))
cat = torch.cat([h1,h2], dim=1)
q = F.relu(self.fc_q(cat))
q = self.fc_out(q)
return q
class OrnsteinUhlenbeckNoise:
def __init__(self, mu):
self.theta, self.dt, self.sigma = 0.1, 0.01, 0.1
self.mu = mu
self.x_prev = np.zeros_like(self.mu)
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def train(mu, mu_target, q, q_target, memory, q_optimizer, mu_optimizer):
s,a,r,s_prime,done_mask = memory.sample(batch_size)
core = q_target(s_prime, mu_target(s_prime)) * done_mask
target = r + gamma * core
q_loss = F.smooth_l1_loss(q(s,a), target.detach())
q_optimizer.zero_grad()
q_loss.backward()
q_optimizer.step()
mu_loss = -q(s,mu(s)).mean() # That's all for the policy loss.
mu_optimizer.zero_grad()
mu_loss.backward()
mu_optimizer.step()
def soft_update(net, net_target):
for param_target, param in zip(net_target.parameters(), net.parameters()):
param_target.data.copy_(param_target.data * (1.0 - tau) + param.data * tau)
def main():
env = ChopperScape()
memory = ReplayBuffer()
q, q_target = QNet(), QNet()
q_target.load_state_dict(q.state_dict())
mu, mu_target = MuNet(), MuNet()
mu_target.load_state_dict(mu.state_dict())
score = 0.0
print_interval = 20
mu_optimizer = optim.Adam(mu.parameters(), lr=lr_mu)
q_optimizer = optim.Adam(q.parameters(), lr=lr_q)
ou_noise = OrnsteinUhlenbeckNoise(mu=np.zeros(1))
for n_epi in range(10000):
s = env.reset()
done = False
while not done:
a = mu(torch.from_numpy(s).float())
a = a.item() + ou_noise()[0]
print('action:',a)
s_prime, r, done, info = env.step(a)
env.render()
memory.put((s,a,r/100.0,s_prime,done))
score += r
s = s_prime
if memory.size()>20000:
for _ in range(10):
train(mu, mu_target, q, q_target, memory, q_optimizer, mu_optimizer)
soft_update(mu, mu_target)
soft_update(q, q_target)
if n_epi%print_interval==0 and n_epi!=0:
print("# of episode :{}, avg score : {:.1f}".format(n_epi, score/print_interval))
score = 0.0
env.close()
if __name__ == '__main__':
main()
and the dqn algorithm is as follows(dqn.py):
import gym
import collections
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from custom import ChopperScape
#Hyperparameters
learning_rate = 0.0005
gamma = 0.98
buffer_limit = 50000
batch_size = 32
class ReplayBuffer():
def __init__(self):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
done_mask_lst.append([done_mask])
return torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_mask_lst)
def size(self):
return len(self.buffer)
class Qnet(nn.Module):
def __init__(self):
super(Qnet, self).__init__()
self.fc1 = nn.Linear(28, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 5)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def sample_action(self, obs, epsilon):
out = self.forward(obs)
# coin = random.random()
# if coin < epsilon:
# return random.randint(0,1)
# else :
# return out.argmax().item()
return out.argmax().item()
def train(q, q_target, memory, optimizer):
for _ in range(10):
s,a,r,s_prime,done_mask = memory.sample(batch_size)
q_out = q(s)
q_a = q_out.gather(1,a)
max_q_prime = q_target(s_prime).max(1)[0].unsqueeze(1)
target = r + gamma * max_q_prime * done_mask
loss = F.smooth_l1_loss(q_a, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def main():
env = ChopperScape()
q = torch.load('10000_dqn_3.pt')
q_target = torch.load('10000_dqn_3_qtarget.pt')
# q_target.load_state_dict(q.state_dict())
memory = ReplayBuffer()
print_interval = 20
score = 0.0
optimizer = optim.Adam(q.parameters(), lr=learning_rate)
for n_epi in range(10000):
epsilon = max(0.01, 0.08 - 0.01*(n_epi/200)) #Linear annealing from 8% to 1%
s = env.reset()
done = False
while not done:
a = q.sample_action(torch.from_numpy(s).float(), epsilon)
s_prime, r, done, info = env.step(a)
env.render()
done_mask = 0.0 if done else 1.0
memory.put((s,a,r,s_prime, done_mask))
s = s_prime
if done:
break
score += r
if memory.size()>20000:
train(q, q_target, memory, optimizer)
if n_epi%print_interval==0 and n_epi!=0:
q_target.load_state_dict(q.state_dict())
print("n_episode :{}, score : {:.1f}, n_buffer : {}, eps : {:.1f}%".format(n_epi, score/print_interval, memory.size(), epsilon*100))
score = 0.0
env.close()
def test():
env = ChopperScape()
q = torch.load('10000_dqn_q.pt')
done = False
s = env.reset()
while not done:
a = q.sample_action(torch.from_numpy(s).float(), 1)
s_prime, r, done, info = env.step(a)
env.render()
s = s_prime
if done:
break
if __name__ == '__main__':
main()
when perform dqn, please annotate the action convert part in custom.py/class ChoperScape/step
after 10000 episode in ddpg/dqn, the agent still can not play more than 15 seconds, could you point out where the problem is?

Tensor conversion requested dtype int64 for Tensor with dtype float32 when creating CNN model

I tried to create a CNN but I got an error. Could you figure out what I did wrong?
C1, C2 = tf.constant(70, dtype='float32'), tf.constant(1000, dtype="float32")
def score(y_true, y_pred):
tf.dtypes.cast(y_true, tf.float32)
tf.dtypes.cast(y_pred, tf.float32)
sigma = y_pred[:, 2] - y_pred[:, 0]
fvc_pred = y_pred[:, 1]
# sigma_clip = sigma + C1
sigma_clip = tf.maximum(sigma, C1)
delta = tf.abs(y_true[:, 0] - fvc_pred)
delta = tf.minimum(delta, C2)
sq2 = tf.sqrt(tf.dtypes.cast(2, dtype=tf.float32))
metric = (delta / sigma_clip) * sq2 + tf.math.log(sigma_clip * sq2)
return K.mean(metric)
def mloss(_lambda):
def loss(y_true, y_pred):
return _lambda * qloss(y_true, y_pred) + (1 - _lambda) * score(y_true, y_pred)
return loss
def make_model():
z = L.Input((9,), name="Patient")
x = L.Dense(100, activation="relu", name="d1")(z)
x = L.Dense(100, activation="relu", name="d2")(x)
p1 = L.Dense(3, activation="linear", name="p1")(x)
p2 = L.Dense(3, activation="relu", name="p2")(x)
preds = L.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis=1), name="preds")([p1, p2])
model = M.Model(z, preds, name="CNN")
model.compile(loss=mloss(0.8), optimizer="adam", metrics=[score])
return model
net = make_model()
net.fit(z[tr_idx], y[tr_idx], batch_size=200, epochs=1000,
validation_data=(z[val_idx], y[val_idx]), verbose=0)
Error here: ValueError: Tensor conversion requested dtype int64 for
Tensor with dtype float32: <tf.Tensor 'CNN/preds/add:0' shape=(None,
3) dtype=float32>
type cast but didn't solve the problem

Pytorch running out of memory when initialising cnn model

Trying to implement a SAGEConv classifier using Pytorch. Here's the model definition:
embed_dim = 128
from torch_geometric.nn import GraphConv, TopKPooling, GatedGraphConv
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = SAGEConv(embed_dim, 128)
self.pool1 = TopKPooling(128, ratio=0.8)
self.conv2 = SAGEConv(128, 128)
self.pool2 = TopKPooling(128, ratio=0.8)
self.conv3 = SAGEConv(128, 128)
self.pool3 = TopKPooling(128, ratio=0.8)
self.item_embedding = torch.nn.Embedding(num_embeddings=df.item_id.max() +1, embedding_dim=embed_dim)
self.lin1 = torch.nn.Linear(256, 128)
self.lin2 = torch.nn.Linear(128, 64)
self.lin3 = torch.nn.Linear(64, 1)
self.bn1 = torch.nn.BatchNorm1d(128)
self.bn2 = torch.nn.BatchNorm1d(64)
self.act1 = torch.nn.ReLU()
self.act2 = torch.nn.ReLU()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = self.item_embedding(x)
x = x.squeeze(1)
x = F.relu(self.conv1(x, edge_index))
x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch)
x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv2(x, edge_index))
x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch)
x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv3(x, edge_index))
x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch)
x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = x1 + x2 + x3
x = self.lin1(x)
x = self.act1(x)
x = self.lin2(x)
x = self.act2(x)
x = F.dropout(x, p=0.5, training=self.training)
x = torch.sigmoid(self.lin3(x)).squeeze(1)
return x
I get the following error:
RuntimeError: [enforce fail at ..\c10\core\CPUAllocator.cpp:75] data. DefaultCPUAllocator: not enough memory: you tried to allocate 603564952576 bytes. Buy new RAM!
When initialising the model:
device = torch.device('cuda')
model = Net().to(device)
I am new to pytorch. Any help would be appreciated.
Thank you!
Turns out that df.item_id.max() was too large as I did not subset the data. Hence dimensionality was too high.

CUDA out of memory error when doing matrix multiplication using Numba

I need to multiply a matrix with its transpose and I am running out of memory on my GPU with eror message numba.cuda.cudadrv.driver.CudaAPIError: [2] Call to cuMemAlloc results in CUDA_ERROR_OUT_OF_MEMORY
I am expecting the size of my matrix to be around 10k rows and 100k columns so multiplying it with its trnspose will give a result of a square matrix of 10k rows and 10k columns. The matrix only contains 0 and 1.
This is the script that I am running.
from numba import cuda, uint16
import numba
import numpy
import math
import time
TPB = 16
#cuda.jit()
def matmul_shared_mem(A, B, C):
sA = cuda.shared.array((TPB, TPB), dtype=uint16)
sB = cuda.shared.array((TPB, TPB), dtype=uint16)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
if x >= C.shape[0] and y >= C.shape[1]:
return
tmp = 0.
for i in range(int(A.shape[1] / TPB)):
sA[tx, ty] = A[x, ty + i * TPB]
sB[tx, ty] = B[tx + i * TPB, y]
cuda.syncthreads()
for j in range(TPB):
tmp += sA[tx, j] * sB[j, ty]
cuda.syncthreads()
C[x, y] = tmp
A = numpy.random.randint(2, size=(TPB * 625, 50000))
B = A.transpose()
C_shared_mem = cuda.device_array((A.shape[0], B.shape[1]))
threads_per_block = (TPB, TPB)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(math.ceil(B.shape[1] / threads_per_block[1]))
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
start_gpu_shared_memory = time.time()
matmul_shared_mem[blocks_per_grid, threads_per_block](A, B, C_shared_mem)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
Update 1:
Based on your suggestions, I made certain changes but well I am still running out of memory.
import numpy as np
import numba as nb
colm = int(200000/8)
rows = 100000
cols = int(colm*8)
AU = np.random.randint(2,size=(rows, cols),dtype=np.int8)
A = np.empty((rows,colm), dtype=np.uint8)
#nb.njit('void(uint8[:,:],int8[:,:])', parallel=True)
def compute(A, AU):
for i in nb.prange(A.shape[0]):
for j in range(A.shape[1]):
offset = j * 8
res = AU[i,offset] << 7
res |= AU[i,offset+1] << 6
res |= AU[i,offset+2] << 5
res |= AU[i,offset+3] << 4
res |= AU[i,offset+4] << 3
res |= AU[i,offset+5] << 2
res |= AU[i,offset+6] << 1
res |= AU[i,offset+7]
A[i,j] = res
compute(A, AU)
from numba import cuda, uint8, int32
import numba
import numpy as np
import math
import time
TPB = 8
TPB1 = 9
#cuda.jit()
def bit_A_AT(A, C):
sA = cuda.shared.array((TPB, TPB), dtype=uint8)
sB = cuda.shared.array((TPB, TPB1), dtype=uint8)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
if bx >= by:
tmp = int32(0)
for i in range((A.shape[1]+TPB-1) // TPB):
if y < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sA[ty, tx] = A[y, i*TPB+tx]
else:
sA[ty, tx] = 0
if (TPB*bx+ty) < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sB[ty, tx] = A[TPB*bx+ty, i*TPB+tx]
else:
sB[ty, tx] = 0
cuda.syncthreads()
for j in range(TPB):
tmp1 = sA[ty,j] & sB[tx, j]
test = uint8(1)
for k in range(8):
if (tmp1 & test) > 0:
tmp += 1
test <<= 1
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
C = np.empty((A.shape[0], A.shape[0]), dtype=np.int32)
threads_per_block = (TPB, TPB)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(math.ceil(A.shape[0] / threads_per_block[1]))
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
start_gpu_shared_memory = time.time()
bit_A_AT[blocks_per_grid, threads_per_block](A, C)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
Any idea how I can fiix this?
The following method should reduce the amount of device memory required for the calculation of A x AT. We'll use the following ideas:
since the input array (A) only takes on values of 0,1, we'll reduce the storage for that array down to the minimum convenient size, int8, i.e. one byte per element
since the B array is just the transpose of the A array, there is no need to handle it explicitly. We can derive it from the A array, somehwhat similar to here, although that is performing AT x A
the matrix multiplication of A x AT involves taking the dot-product of the rows of matrix A, as indicated here
we will provide the A transposed version in the sB array using adjusted indexing
there are a range of other changes to your code, to address various errors and also to improve load/store efficiency, such as a general reversal of your usage of x,y indices
I've also fixed your usage of syncthreads and modified the code to allow arbitrary values for row and column dimensions
Here is a worked example:
$ cat t62.py
from numba import cuda, int32, int8
import numba
import numpy as np
import math
import time
TPB = 32
TPB1 = TPB+1
#cuda.jit()
def byte_A_AT(A, C):
sA = cuda.shared.array((TPB, TPB), dtype=int8)
sB = cuda.shared.array((TPB, TPB1), dtype=int8)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
# uncomment and indent remainder of kernel to only do the "symmetric half" of calculation
# if bx >= by:
tmp = int32(0)
for i in range((A.shape[1]+TPB-1)// TPB):
if y < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sA[ty, tx] = A[y, i*TPB+tx]
else:
sA[ty, tx] = 0
if (TPB*bx+ty) < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sB[ty, tx] = A[TPB*bx+ty, i*TPB+tx]
else:
sB[ty, tx] = 0
cuda.syncthreads()
for j in range(TPB):
tmp += int32(sA[ty,j]) * int32(sB[tx, j])
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
rows = 1041
cols = 1043
print('host mem: ', (rows*cols*2+rows*rows*4*2)//1048576, 'MB device mem: ', (rows*cols+rows*rows*4)//1048576, 'MB')
A = np.random.randint(2,size=(rows, cols),dtype=np.int8)
AT = A.transpose()
CU = np.matmul(A,AT, dtype = np.int32)
C = np.empty((A.shape[0], A.shape[0]), dtype=np.int32)
threads_per_block = (TPB, TPB)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(math.ceil(A.shape[0] / threads_per_block[1]))
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
byte_A_AT[blocks_per_grid, threads_per_block](A, C)
cuda.synchronize()
start_gpu_shared_memory = time.time()
byte_A_AT[blocks_per_grid, threads_per_block](A, C)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
test = np.array_equal(C, CU)
print(test)
if test == False:
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if C[i,j] != CU[i,j]:
print(i, ' ' , j ,' ' , C[i,j] , ' ' , CU[i,j])
$ python t62.py
host mem: 10 MB device mem: 5 MB
GPU time(shared memory):0.019593000411987305
True
$
Notes:
most of the runtime of the above code will be spent in python (in the np.matmul() operation, which is really only used to verify results, it should not be necessary for an actual implementation), not in the GPU portion. As the matrices are made larger, the code will run much more slowly.
as mentioned in the comments, the result of A x AT is a symmetric matrix. My code does not take advantage of this, however we could crudely take advantage of it by uncommenting the if test at the beginning of the kernel and then indenting the remainder of the kernel. However this will cause the host code np.array_equal test to fail, of course.
the device memory consumption for this is calculated in the code. for your largest value in the comments (rows = 30k, cols = 200k) this would amount to about 10GB, so it will still not run in your 8GB GPU.
I have created a version of this code which packs 8 elements per byte for the A matrix, which would further reduce memory demand, however writing this code to handle the case of arbitrary column dimensions (vs. multiples of 8) proves to be rather messy. However that code could get the total device memory consumption down to about 5GB for 30k rows and 200k columns case.
Here is the one bit per element version, it has a requirement that the number of columns be whole-number divisible by 8:
$ cat t61.py
from numba import cuda, uint8, int32
import numba
import numpy as np
import math
import time
TPB = 32
TPB1 = 33
#cuda.jit()
def bit_A_AT(A, C):
sA = cuda.shared.array((TPB, TPB), dtype=uint8)
sB = cuda.shared.array((TPB, TPB1), dtype=uint8)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
tmp = int32(0)
for i in range((A.shape[1]+TPB-1) // TPB):
if y < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sA[ty, tx] = A[y, i*TPB+tx]
else:
sA[ty, tx] = 0
if (TPB*bx+ty) < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sB[ty, tx] = A[TPB*bx+ty, i*TPB+tx]
else:
sB[ty, tx] = 0
cuda.syncthreads()
for j in range(TPB):
tmp1 = sA[ty,j] & sB[tx, j]
test = uint8(1)
for k in range(8):
if (tmp1 & test) > 0:
tmp += 1
test <<= 1
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
colm = 131
rows = 1041
cols = int(colm*8)
print('host mem: ', (rows*cols*2+rows*rows*4*2)//1048576, 'MB device mem: ', (((rows*cols)//8)+rows*rows*4)//1048576, 'MB')
AU = np.random.randint(2,size=(rows, cols),dtype=np.int8)
AUT = AU.transpose()
CU = np.matmul(AU,AUT,dtype=np.int32)
A = np.empty((rows,colm), dtype=np.uint8)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i,j] = 0
for k in range(8):
if AU[i,(j*8)+k] == 1:
A[i,j] = A[i,j] | (1<<(7-k))
C = np.empty((A.shape[0], A.shape[0]), dtype=np.int32)
threads_per_block = (TPB, TPB)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(math.ceil(A.shape[0] / threads_per_block[1]))
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
bit_A_AT[blocks_per_grid, threads_per_block](A, C)
cuda.synchronize()
start_gpu_shared_memory = time.time()
bit_A_AT[blocks_per_grid, threads_per_block](A, C)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
test = np.array_equal(C, CU)
print(test)
if test == False:
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if C[i,j] != CU[i,j]:
print(i, ' ' , j ,' ' , C[i,j] , ' ' , CU[i,j])
break
$ python t61.py
host mem: 10 MB device mem: 4 MB
GPU time(shared memory):0.009343624114990234
True
$
EDIT: Responding to some questions in the comments, updates, and now taking into account that the A matrix may have significantly more than 30k rows, this will cause the C matrix to increase as well. If the A matrix can be fit in GPU memory, we can reduce the memory demand of the C matrix by computing it in pieces. These pieces will be a group of rows computed together, which I refer to as a row_slice of the C matrix. The following code demonstrates that this can be achieved with relatively minor changes to the code above:
$ cat t63.py
from numba import cuda, uint8, int32
import numba as nb
import numpy as np
import math
import time
TPB = 32
TPB1 = 33
#cuda.jit()
def bit_slice_A_AT(A, C, row_offset):
sA = cuda.shared.array((TPB, TPB), dtype=uint8)
sB = cuda.shared.array((TPB, TPB1), dtype=uint8)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
tmp = int32(0)
for i in range((A.shape[1]+TPB-1) // TPB):
if y+row_offset < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sA[ty, tx] = A[y+row_offset, i*TPB+tx]
else:
sA[ty, tx] = 0
if (TPB*bx+ty) < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sB[ty, tx] = A[TPB*bx+ty, i*TPB+tx]
else:
sB[ty, tx] = 0
cuda.syncthreads()
for j in range(TPB):
tmp1 = sA[ty,j] & sB[tx, j]
test = uint8(1)
for k in range(8):
if (tmp1 & test) > 0:
tmp += 1
test <<= 1
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
#nb.njit('void(uint8[:,:],int8[:,:])', parallel=True)
def bitpack(A, AU):
for i in nb.prange(A.shape[0]):
for j in range(A.shape[1]):
offset = j * 8
res = AU[i,offset] << 7
res |= AU[i,offset+1] << 6
res |= AU[i,offset+2] << 5
res |= AU[i,offset+3] << 4
res |= AU[i,offset+4] << 3
res |= AU[i,offset+5] << 2
res |= AU[i,offset+6] << 1
res |= AU[i,offset+7]
A[i,j] = res
colm = 131
rows = 1535
cols = int(colm*8)
row_slice = 512
print('host mem: ', (rows*cols*2+rows*rows*4*2)//1048576, 'MB device mem: ', (((rows*cols)//8)+row_slice*rows*4)//1048576, 'MB')
AU = np.random.randint(2,size=(rows, cols),dtype=np.int8)
CU = np.matmul(AU,AU.T,dtype=np.int32)
A = np.empty((rows,colm), dtype=np.uint8)
bitpack(A, AU)
A_dev = cuda.to_device(A)
threads_per_block = (TPB, TPB)
C = np.empty((row_slice, A.shape[0]), dtype=np.int32)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(row_slice / threads_per_block[1])
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
for i in range((A.shape[0]+row_slice-1)//row_slice):
bit_slice_A_AT[blocks_per_grid, threads_per_block](A_dev, C, i*row_slice)
lower = i*row_slice
upper = min(lower+row_slice, CU.shape[0])
width = upper-lower
test = np.array_equal(C[:width,:], CU[i*row_slice:i*row_slice+width,:])
print(test)
cuda.synchronize()
C_dev = cuda.device_array_like(C)
start_gpu_shared_memory = time.time()
for i in range((A.shape[0]+row_slice-1)//row_slice):
bit_slice_A_AT[blocks_per_grid, threads_per_block](A_dev, C_dev, i*row_slice)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
$ python t63.py
host mem: 21 MB device mem: 3 MB
True
True
True
GPU time(shared memory):0.010116815567016602
$
This means, as suggested, for the case of rows = 100k and columns = 200k as given in the latest update to the question, we should be able to divide the C matrix into chunks of say 5k rows. The memory usage for the A matrix would be 2.5GB, but for the C matrix, since we are only computing a 5k row slice at a time, the device memory storage required would be 100k*5k*4 bytes, so 2GB for this example.
After some further study, we can speed up the host code matmul operation by switching from int8 datatype to float32 datatype. This makes that op quite a bit faster, but the GPU code still seems to ~4x faster than that:
$ cat t64.py
from numba import cuda, uint8, int32
import numba as nb
import numpy as np
import math
import time
TPB = 32
TPB1 = 33
#cuda.jit()
def bit_slice_A_AT(A, C, row_offset):
sA = cuda.shared.array((TPB, TPB), dtype=uint8)
sB = cuda.shared.array((TPB, TPB1), dtype=uint8)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
tmp = int32(0)
for i in range((A.shape[1]+TPB-1) // TPB):
if y+row_offset < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sA[ty, tx] = A[y+row_offset, i*TPB+tx]
else:
sA[ty, tx] = 0
if (TPB*bx+ty) < A.shape[0] and (i*TPB+tx) < A.shape[1]:
sB[ty, tx] = A[TPB*bx+ty, i*TPB+tx]
else:
sB[ty, tx] = 0
cuda.syncthreads()
for j in range(TPB):
tmp1 = sA[ty,j] & sB[tx, j]
test = uint8(1)
for k in range(8):
if (tmp1 & test) > 0:
tmp += 1
test <<= 1
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
#nb.njit('void(uint8[:,:],float32[:,:])', parallel=True)
def bitpack(A, AU):
for i in nb.prange(A.shape[0]):
for j in range(A.shape[1]):
offset = j * 8
res = int(AU[i,offset]) << 7
res |= int(AU[i,offset+1]) << 6
res |= int(AU[i,offset+2]) << 5
res |= int(AU[i,offset+3]) << 4
res |= int(AU[i,offset+4]) << 3
res |= int(AU[i,offset+5]) << 2
res |= int(AU[i,offset+6]) << 1
res |= int(AU[i,offset+7])
A[i,j] = res
colm = 1000
rows = 6000
cols = int(colm*8)
row_slice = 512
print('host mem: ', (rows*cols*4+rows*colm+rows*rows*4+rows*row_slice*4)//1048576, 'MB device mem: ', (((rows*cols)//8)+row_slice*rows*4)//1048576, 'MB')
t1 = time.time()
AU = np.random.randint(2,size=(rows, cols),dtype=np.int8)
AU = AU.astype(np.float32)
print("randint:" + str(time.time()-t1))
t1 = time.time()
#CU = np.empty((rows, rows), dtype=np.int32)
CU = np.matmul(AU,AU.T,dtype=np.float32)
print("matmul:" + str(time.time()-t1))
t1 = time.time()
A = np.empty((rows,colm), dtype=np.uint8)
print("np.empty:" + str(time.time()-t1))
t1 = time.time()
bitpack(A, AU)
print("bitpack:" + str(time.time()-t1))
t1 = time.time()
A_dev = cuda.to_device(A)
threads_per_block = (TPB, TPB)
C = np.empty((row_slice, A.shape[0]), dtype=np.int32)
blocks_per_grid_x = int(math.ceil(A.shape[0] / threads_per_block[0]))
blocks_per_grid_y = int(row_slice / threads_per_block[1])
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
for i in range((A.shape[0]+row_slice-1)//row_slice):
bit_slice_A_AT[blocks_per_grid, threads_per_block](A_dev, C, i*row_slice)
lower = i*row_slice
upper = min(lower+row_slice, CU.shape[0])
width = upper-lower
test = np.array_equal(C[:width,:], CU[i*row_slice:i*row_slice+width,:])
print(test)
cuda.synchronize()
C_dev = cuda.device_array_like(C)
start_gpu_shared_memory = time.time()
for i in range((A.shape[0]+row_slice-1)//row_slice):
bit_slice_A_AT[blocks_per_grid, threads_per_block](A_dev, C_dev, i*row_slice)
cuda.synchronize()
end_gpu_shared_memory = time.time()
time_gpu_shared = end_gpu_shared_memory - start_gpu_shared_memory
print("GPU time(shared memory):" + str(time_gpu_shared))
$ python t64.py
host mem: 337 MB device mem: 17 MB
randint:0.1817936897277832
matmul:3.498671531677246
np.empty:7.62939453125e-05
bitpack:0.03707313537597656
True
True
True
True
True
True
True
True
True
True
True
True
GPU time(shared memory):0.8318064212799072
$
I haven't thoroughly tested these codes. Bugs may exist. Use at your own risk.
For attribution, the numba bit-packing code seems to have come from here.

Training Accuracy is Very Low in A Simple CNN using Theano

I'm trying to implement a CNN using Theano and tried to test my code with a small sample-set of my bigger dataset. I'm trying to categorize a set of 8280 pictures(of 250*250 sizes) into 115 classes and my sample set is a set of 32 pictures of the first two classes(16 pictures from each). The problem I'm experiencing is that from the first epoch, the training loss in NaN and It will not change in the further epochs.
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import re
import cv2
from lasagne.layers import Conv2DLayer, MaxPool2DLayer , DropoutLayer
from lasagne.layers import InputLayer, DenseLayer, batch_norm
def split_list(a_list):
half = len(a_list)/2
return a_list[:half], a_list[half:]
def load_dataset(path=''):
cat_list = []
filelist = sorted(os.listdir(path))
trainlist = []
testlist = []
tmptrain = []
tmptest = []
max_id = 0
for f in filelist:
match = re.match(r'C(\d+)([F|G])(\d+)\.PNG', f)
id = int(match.group(1)) - 1
max_id = max(max_id,id)
fg_class = match.group(2)
fg_id = int(match.group(3))
if id not in [p[0] for p in cat_list]:
cat_list.append([id, [], []])
if fg_class == 'G':
cat_list[-1][1].append(f)
else:
cat_list[-1][2].append(f)
for f in cat_list:
id = f[0]
trainG, testG = split_list(f[1])
trainF, testF = split_list(f[2])
tmptrain = tmptrain + [(id, 1, F) for F in trainF] + [(id, 0, G) for G in trainG] # (Class_id,Forgery,Img)
tmptest = tmptest + [(id, 1, F) for F in testF] + [(id, 0, F) for F in testG]
X_train = np.array([cv2.imread(path+f[2],0) for f in tmptrain]).astype(np.int32)
y_train = np.array([f[0] for f in tmptrain]).astype(np.int32)
X_test = np.array([cv2.imread(path+f[2],0) for f in tmptest]).astype(np.int32)
y_test = np.array([f[0] for f in tmptest]).astype(np.int32)
fg_train = np.array([f[1] for f in tmptrain]).astype(np.int32)
fg_test = np.array([f[1] for f in tmptest]).astype(np.int32)
X_train = np.expand_dims(X_train,axis=1).astype(np.int32)
X_test = np.expand_dims(X_test, axis=1).astype(np.int32)
return X_train, y_train, X_test, y_test, fg_train , fg_test
def ExplicitNegativeCorrelation(net,layer='fc2',lr=0.00001):
for param in lasagne.layers.get_all_params(net[layer]):
if param.name.startswith('W'):
W = param
mean = T.mean(W,0) * lr
W = W - mean#T.mean(T.mean(W,0))
def ImplicitNegativeCorrelation(MSE,Cross,Hinge):
mean = T.mean((MSE+Cross+Hinge),axis=0)
return ((MSE-mean)**2+(Cross-mean)**2+(Hinge-mean)**2)/3
def build_cnn(inputvar,input_shape, trained_weights=None):
net = {}
net['input'] = InputLayer(input_shape,input_var=inputvar)
net['drop_input'] = DropoutLayer(net['input'],p=0.2)
net['conv1'] = batch_norm(Conv2DLayer(net['input'], num_filters=96, filter_size=11, stride=4, flip_filters=False))#,W=lasagne.init.HeNormal()))
net['pool1'] = MaxPool2DLayer(net['conv1'], pool_size=3, stride=2)
net['conv2'] = batch_norm(Conv2DLayer(net['pool1'], num_filters=256, filter_size=5, pad=2, flip_filters=False))#, W=lasagne.init.HeNormal()))
net['pool2'] = MaxPool2DLayer(net['conv2'], pool_size=3, stride=2)
net['conv3'] = batch_norm(Conv2DLayer(net['pool2'], num_filters=384, filter_size=3, pad=1, flip_filters=False))#, W=lasagne.init.HeNormal()))
net['conv4'] = batch_norm(Conv2DLayer(net['conv3'], num_filters=384, filter_size=3, pad=1, flip_filters=False))#, W=lasagne.init.HeNormal()))
net['conv5'] = batch_norm(Conv2DLayer(net['conv4'], num_filters=256, filter_size=3, pad=1, flip_filters=False))#, W=lasagne.init.HeNormal()))
net['pool5'] = MaxPool2DLayer(net['conv5'], pool_size=3, stride=2)
net['fc1'] = batch_norm(DenseLayer(net['pool5'], num_units=2048))
net['drop_fc1'] = DropoutLayer(net['fc1'])
net['fc2'] = batch_norm(DenseLayer(net['drop_fc1'], num_units=2048))
net['fc_class'] = batch_norm(DenseLayer(net['fc2'],num_units=115))
return net
def iterate_minibatches(inputs, targets_class,targets_verif, batchsize, shuffle=False):
assert len(inputs) == len(targets_class)
assert len(inputs) == len(targets_verif)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets_class[excerpt], targets_verif[excerpt]
def main(num_epochs=500):
print("Loading data...")
X_train, y_train, X_test, y_test, fg_train, fg_test = load_dataset('./signatures/tmp4/')
X_val, y_val, fg_val = X_train, y_train, fg_train
print(y_train.shape)
input_var = T.tensor4('inputs')
target_var_class = T.ivector('targets')
network = build_cnn(input_var, (None, 1, 250, 250))
class_prediction = lasagne.layers.get_output(network['fc_class']) # ,inputs={network['input']:input_var})
loss_class = lasagne.objectives.categorical_crossentropy(class_prediction, target_var_class)
loss = loss_class.mean()
params = lasagne.layers.get_all_params([network['fc_class']], trainable=True)
lr = 0.01
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=lr, momentum=0.9)
test_prediction_class = lasagne.layers.get_output(network['fc_class'], deterministic=True)
test_loss_class = lasagne.objectives.categorical_crossentropy(test_prediction_class,
target_var_class)
test_loss_class = test_loss_class.mean()
test_acc_class = T.mean(T.eq(T.argmax(test_prediction_class, axis=1), target_var_class),
dtype=theano.config.floatX)
predict_class = theano.function([input_var], T.argmax(test_prediction_class,axis=1))
train_fn = theano.function([input_var, target_var_class], loss, updates=updates)
val_fn_class = theano.function([input_var, target_var_class], [test_loss_class, test_acc_class])
print("Starting training...")
BatchSize = 2
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train,fg_train, BatchSize, shuffle=True):
inputs, targets_class, targets_verif = batch
train_err += train_fn(inputs, targets_class)
#ExplicitNegativeCorrelation(network, layer='fc2',lr=lr/10)
print(targets_class,predict_class(inputs))
train_batches += 1
val_err_class = 0
val_acc_class = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, fg_val, BatchSize, shuffle=False):
inputs, targets_class, targets_verif = batch
err_class, acc_class = val_fn_class(inputs, targets_class)
val_err_class += err_class
val_acc_class += acc_class
val_batches += 1
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" Classification loss:\t\t{:.6f}".format(val_err_class / val_batches))
print(" Classification accuracy:\t\t{:.2f} %".format(
val_acc_class / val_batches * 100))
test_err_class = 0
test_acc_class = 0
test_err_verif = 0
test_acc_verif = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, fg_test, BatchSize, shuffle=False):
inputs, targets_class, targets_verif = batch
err_class, acc_class = val_fn_class(inputs, targets_class)
test_err_class += err_class
test_acc_class += acc_class
test_batches += 1
print("Final results:")
print(" test loss (Classification):\t\t\t{:.6f}".format(test_err_class / test_batches))
print(" test accuracy (Classification):\t\t{:.2f} %".format(
test_acc_class / test_batches * 100))
if __name__ == '__main__':
main()
I've tried to put lasagne.nonlinearities.softmax in the DenseLayers but it does fix the NaN issue but the accuracy of the Training model will not be any good, it will be fluctuating between 0 to 25%.(after 50 epochs!).
I have implemented a load_dataset function which I think that works correctly (I've tested the function multiple times), and I'm giving the class id of each picture as the target in the loss function. So my inputs and Targets would be like this:
Input Shape: (BatchSize, 1, 250, 250)
Target Shape: (BatchSize, 1) : vector of class ids
I've uploaded my sample-set here in this link.
It looks like we have 4 classes, according to the data, so I changed loading code to reflect it:
y_train = np.array([f[0] * 2 + f[1] for f in tmptrain]).astype(np.int32)
y_test = np.array([f[0] * 2 + f[1] for f in tmptest]).astype(np.int32)
Number of units in output layer should be equal to the number of classes, so I added an output layer with SoftMax:
net['fo_class'] = DenseLayer(net['fc_class'],num_units=4,
nonlinearity=lasagne.nonlinearities.softmax)
I suggest removing dropout layer just after inputs – you can compare outcomes with it and without it to make sure of that
Batch size = 2 is too small and learning rate is too high
Here is an example of code with those changes:
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import re
import cv2
from lasagne.layers import Conv2DLayer, MaxPool2DLayer , DropoutLayer
from lasagne.layers import InputLayer, DenseLayer
def split_list(a_list):
half = len(a_list)/2
return a_list[:half], a_list[half:]
def load_dataset(path=''):
cat_list = []
filelist = sorted(os.listdir(path))
tmptrain = []
tmptest = []
max_id = 0
for f in filelist:
match = re.match(r'C(\d+)([F|G])(\d+)\.PNG', f)
id = int(match.group(1)) - 1
max_id = max(max_id,id)
fg_class = match.group(2)
if id not in [p[0] for p in cat_list]:
cat_list.append([id, [], []])
if fg_class == 'G':
cat_list[-1][1].append(f)
else:
cat_list[-1][2].append(f)
for f in cat_list:
id = f[0]
trainG, testG = split_list(f[1])
trainF, testF = split_list(f[2])
tmptrain = tmptrain + [(id, 1, F) for F in trainF] + [(id, 0, G) for G in trainG]
tmptest = tmptest + [(id, 1, F) for F in testF] + [(id, 0, F) for F in testG]
X_train = np.array([cv2.imread(path+f[2],0) for f in tmptrain]).astype(np.float32)
y_train = np.array([f[0] * 2 + f[1] for f in tmptrain]).astype(np.int32)
X_test = np.array([cv2.imread(path+f[2],0) for f in tmptest]).astype(np.float32)
y_test = np.array([f[0] * 2 + f[1] for f in tmptest]).astype(np.int32)
fg_train = np.array([f[1] for f in tmptrain]).astype(np.float32)
fg_test = np.array([f[1] for f in tmptest]).astype(np.float32)
X_train = np.expand_dims(X_train,axis=1).astype(np.float32)
X_test = np.expand_dims(X_test, axis=1).astype(np.float32)
return X_train, y_train, X_test, y_test, fg_train , fg_test
def ExplicitNegativeCorrelation(net,layer='fc2',lr=0.00001):
for param in lasagne.layers.get_all_params(net[layer]):
if param.name.startswith('W'):
W = param
mean = T.mean(W,0) * lr
W = W - mean
def ImplicitNegativeCorrelation(MSE,Cross,Hinge):
mean = T.mean((MSE+Cross+Hinge),axis=0)
return ((MSE-mean)**2+(Cross-mean)**2+(Hinge-mean)**2)/3
def build_cnn(inputvar,input_shape, trained_weights=None):
net = {}
net['input'] = InputLayer(input_shape,input_var=inputvar)
net['conv1'] = Conv2DLayer(net['input'], num_filters=96, filter_size=11, stride=4)
net['pool1'] = MaxPool2DLayer(net['conv1'], pool_size=3, stride=2)
net['conv2'] = Conv2DLayer(net['pool1'], num_filters=256, filter_size=5, pad=2)
net['pool2'] = MaxPool2DLayer(net['conv2'], pool_size=3, stride=2)
net['conv3'] = Conv2DLayer(net['pool2'], num_filters=384, filter_size=3, pad=1)
net['conv4'] = Conv2DLayer(net['conv3'], num_filters=384, filter_size=3, pad=1)
net['conv5'] = Conv2DLayer(net['conv4'], num_filters=256, filter_size=3, pad=1)
net['pool5'] = MaxPool2DLayer(net['conv5'], pool_size=3, stride=2)
net['fc1'] = DenseLayer(net['pool5'], num_units=2048)
net['drop_fc1'] = DropoutLayer(net['fc1'])
net['fc2'] = DenseLayer(net['drop_fc1'], num_units=2048)
net['fc_class'] = DenseLayer(net['fc2'],num_units=115)
net['fo_class'] = DenseLayer(net['fc_class'],num_units=4,
nonlinearity=lasagne.nonlinearities.softmax)
return net
def iterate_minibatches(inputs, targets_class,targets_verif, batchsize, shuffle=False):
assert len(inputs) == len(targets_class)
assert len(inputs) == len(targets_verif)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets_class[excerpt], targets_verif[excerpt]
def main(num_epochs=500):
print("Loading data...")
X_train, y_train, X_test, y_test, fg_train, fg_test = load_dataset('./signatures/tmp4/')
X_train /= 255
X_val, y_val, fg_val = X_train, y_train, fg_train
print(y_train.shape)
check = X_train[0][0]
print(check)
input_var = T.tensor4('inputs')
target_var_class = T.ivector('targets')
network = build_cnn(input_var, (None, 1, 250, 250))
class_prediction = lasagne.layers.get_output(network['fo_class'])
loss_class = lasagne.objectives.categorical_crossentropy(class_prediction, target_var_class)
loss = loss_class.mean()
params = lasagne.layers.get_all_params([network['fo_class']], trainable=True)
lr = 0.0007
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=lr, momentum=0.9)
test_prediction_class = lasagne.layers.get_output(network['fo_class'], deterministic=True)
test_loss_class = lasagne.objectives.categorical_crossentropy(test_prediction_class,
target_var_class)
test_loss_class = test_loss_class.mean()
test_acc_class = T.mean(T.eq(T.argmax(test_prediction_class, axis=1), target_var_class),
dtype=theano.config.floatX)
predict_class = theano.function([input_var], T.argmax(test_prediction_class,axis=1))
train_fn = theano.function([input_var, target_var_class], loss, updates=updates)
val_fn_class = theano.function([input_var, target_var_class], [test_loss_class, test_acc_class])
print("Starting training...")
BatchSize = 16
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train,fg_train, BatchSize, shuffle=True):
inputs, targets_class, targets_verif = batch
train_err += train_fn(inputs, targets_class)
print(targets_class,predict_class(inputs))
train_batches += 1
val_err_class = 0
val_acc_class = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, fg_val, BatchSize, shuffle=False):
inputs, targets_class, targets_verif = batch
err_class, acc_class = val_fn_class(inputs, targets_class)
val_err_class += err_class
val_acc_class += acc_class
val_batches += 1
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" Classification loss:\t\t{:.6f}".format(val_err_class / val_batches))
print(" Classification accuracy:\t\t{:.2f} %".format(
val_acc_class / val_batches * 100))
test_err_class = 0
test_acc_class = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, fg_test, BatchSize, shuffle=False):
inputs, targets_class, targets_verif = batch
err_class, acc_class = val_fn_class(inputs, targets_class)
test_err_class += err_class
test_acc_class += acc_class
test_batches += 1
print("Final results:")
print(" test loss (Classification):\t\t\t{:.6f}".format(test_err_class / test_batches))
print(" test accuracy (Classification):\t\t{:.2f} %".format(
test_acc_class / test_batches * 100))
if __name__ == '__main__':
main()