Issues with Q-learning and neural networks - reinforcement-learning

I'm just starting out learning Q-learning, and I've been okay with using the tabular method to get some decent results. One game I found quite fun to use Q-learning was with Blackjack, which seemed like a perfect MDP type problem.
I've been wanting to extend this to using a neural network as a function approximator, but I'm not having any luck at all. The approach is to calculate the expected value for every action in a given state and then pick the best one with a small chance of picking something random (epsilon greedy). Nothing converges, it learns silly Q-values, and it can't even figure out how to play when the only card in the deck is 5.
I am genuinely stuck, after spending hours on this and tuning hyper parameters and everything else I can think of. I feel like I must have made a fundamental error with Q-learning that I can't see. My code is below:
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import random
import pandas as pd
import sklearn
import math
import itertools
import tensorflow as tf
from matplotlib import pyplot as plt
############################ START BLACKJACK CLASS ############################
class Blackjack(gym.Env):
"""Simple Blackjack environment"""
def __init__(self, natural=False):
self.action_space = spaces.Discrete(2)
self._seed()
# Start the first game
self.prevState = self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return seed
# Returns a tuple of the form (str, int) where str is "H" or "S" depending on if its a
# Soft or Hard hand and int is the sum total of the cards in hand
# Example output: ("H", 15)
def getTotal(cards):
running_total = 0
softs = 0
for c in cards:
running_total += c
if c == 11:
softs += 1
if running_total > 21 and softs > 0:
softs -= 1
running_total -= 10
return "H" if softs == 0 else "S", running_total
def drawCard():
# Draw a random card from the deck with replacement. 11 is ACE
# I've set it to always draw a 5. In theory this should be very easy to learn and
# The only possible states, and their correct Q values should be:
# Q[10_5, stand] = -1 Q[10_5, hit] = 0
# Q[15_5, stand] = -1 Q[15_5, hit] = 0
# Q[20_5, stand] = 0 Q[20_5, hit] = -1
# The network can't even learn this!
return 5
return random.choice([5,6])
return random.choice([2,3,4,5,6,7,8,9,10,10,10,10,11])
def isBlackjack(cards):
return sum(cards) == 21 and len(cards) == 2
def getState(self):
# Defines the state of the current game
pstate, ptotal = Blackjack.getTotal(self.player)
dstate, dtotal = Blackjack.getTotal(self.dealer)
return "{}_{}".format("BJ" if Blackjack.isBlackjack(self.player) else pstate+str(ptotal), dtotal)
def reset(self):
# Resets the game - Dealer is dealt 1 card, player is dealt 2 cards
# The player and dealer are represented by an array of numbers, which are the cards they were
# dealt in order
self.soft = "H"
self.dealer = [Blackjack.drawCard()]
self.player = [Blackjack.drawCard() for _ in range(2)]
pstate, ptotal = Blackjack.getTotal(self.player)
dstate, dtotal = Blackjack.getTotal(self.dealer)
# Returns the current state of the game
return self.getState()
def step(self, action):
assert self.action_space.contains(action)
# Action should be 0 or 1.
# If standing, the dealer will draw all cards until they are >= 17. This will end the episode
# If hitting, a new card will be added to the player, if over 21, reward is -1 and episode ends
# Stand
if action == 0:
pstate, ptotal = Blackjack.getTotal(self.player)
dstate, dtotal = Blackjack.getTotal(self.dealer)
while dtotal < 17:
self.dealer.append(Blackjack.drawCard())
dstate, dtotal = Blackjack.getTotal(self.dealer)
# if player won with blackjack
if Blackjack.isBlackjack(self.player) and not Blackjack.isBlackjack(self.dealer):
rw = 1.5
# if dealer bust or if the player has a higher number than dealer
elif dtotal > 21 or (dtotal <= 21 and ptotal > dtotal and ptotal <= 21):
rw = 1
# if theres a draw
elif dtotal == ptotal:
rw = 0
# player loses in all other situations
else:
rw = -1
state = self.getState()
# Returns (current_state, reward, boolean_true_if_episode_ended, empty_dict)
return state, rw, True, {}
# Hit
else:
# Player draws another card
self.player.append(Blackjack.drawCard())
# Calc new total for player
pstate, ptotal = Blackjack.getTotal(self.player)
state = self.getState()
# Player went bust and episode is over
if ptotal > 21:
return state, -1, True, {}
# Player is still in the game, but no observed reward yet
else:
return state, 0, False, {}
############################ END BLACKJACK CLASS ############################
# Converts a player or dealers hand into an array of 10 cards
# that keep track of how many of each card are held. The card is identified
# through its index:
# Index: 0 1 2 3 4 5 6 7 9 10
# Card: 2 3 4 5 6 7 8 9 T A
def cardsToX(cards):
ans = [0] * 12
for c in cards:
ans[c] += 1
ans = ans[2:12]
return ans
# Easy way to convert Q values into weighted decision probabilities via softmax.
# This is useful if we probablistically choose actions based on their values rather
# than always choosing the max.
# eg Q[s,0] = -1
# Q[s,1] = -2
# softmax([-1,-2]) = [0.731, 0.269] --> 73% chance of standing, 27% chance of hitting
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
plt.ion()
# Define number of Neurons per layer
K = 20 # Layer 1
L = 10 # Layer 2
M = 5 # Layer 2
N_IN = 20 # 10 unique cards for player, and 10 for dealer = 20 total inputs
N_OUT = 2
SDEV = 0.000001
# Input / Output place holders
X = tf.placeholder(tf.float32, [None, N_IN])
X = tf.reshape(X, [-1, N_IN])
# This will be the observed reward + decay_factor * max(Q[s+1, 0], Q[s+1, 1]).
# This should be an estimate of the 'correct' Q-value with the ony caveat being that
# the Q-value of the next state is a biased estimate of the true value.
Q_TARGET = tf.placeholder(tf.float32, [None, N_OUT])
# LAYER 1
W1 = tf.Variable(tf.random_normal([N_IN, K], stddev = SDEV))
B1 = tf.Variable(tf.random_normal([K], stddev = SDEV))
# LAYER 2
W2 = tf.Variable(tf.random_normal([K, L], stddev = SDEV))
B2 = tf.Variable(tf.random_normal([L], stddev = SDEV))
# LAYER 3
W3 = tf.Variable(tf.random_normal([L, M], stddev = SDEV))
B3 = tf.Variable(tf.random_normal([M], stddev = SDEV))
# LAYER 4
W4 = tf.Variable(tf.random_normal([M, N_OUT], stddev = SDEV))
B4 = tf.Variable(tf.random_normal([N_OUT], stddev = SDEV))
H1 = tf.nn.relu(tf.matmul(X, W1) + B1)
H2 = tf.nn.relu(tf.matmul(H1, W2) + B2)
H3 = tf.nn.relu(tf.matmul(H2, W3) + B3)
# The predicted Q value, as determined by our network (function approximator)
# outputs expected reward for standing and hitting in the form [stand, hit] given the
# current game state
Q_PREDICT = (tf.matmul(H3, W4) + B4)
# Is this correct? The Q_TARGET should be a combination of the real reward and the discounted
# future rewards of the future state as predicted by the network. Q_TARGET - Q_PREDICT should be
# the error in prediction, which we want to minimise. Does this loss function work to help the network
# converge to the true Q values with sufficient training?
loss_func = tf.reduce_sum(tf.square(Q_TARGET - Q_PREDICT))
# This are some placeholder values to enable manually set decayed learning rates. For now, use
# the same learning rate all the time.
LR_START = 0.001
#LR_END = 0.000002
#LR_DECAY = 0.999
# Optimizer
LEARNING_RATE = tf.Variable(LR_START, trainable=False)
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)#(LEARNING_RATE)
train_step = optimizer.minimize(loss_func)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Initialise the game environment
game = Blackjack()
# Number of episodes (games) to play
num_eps = 10000000
# probability of picking a random action. This decays over time
epsilon = 0.1
# discount factor. For blackjack, future rewards are equally important as immediate rewards.
discount = 1.0
all_rewards = [] # Holds all observed rewards. The rolling mean of rewards should improve as the network learns
all_Qs = [] # Holds all predicted Q values. Useful as a sanity check once the network is trained
all_losses = [] # Holds all the (Q_TARGET - Q_PREDICTED) values. The rolling mean of this should decrease
hands = [] # Holds a summary of all hands played. (game_state, Q[stand], Q[hit], action_taken)
# boolean switch to use the highest action value instead of a stochastic decision via softmax on Q-values
use_argmax = True
# Begin generating episodes
for ep in range(num_eps):
game.reset()
# Keep looping until the episode is not over
while True:
# x is the array of 20 numbers. The player cards, and the dealer cards.
x = cardsToX(game.player) + cardsToX(game.dealer)
# Q1 refers to the predicted Q-values before any action was taken
Q1 = sess.run(Q_PREDICT, feed_dict = {X : np.reshape( np.array(x), (-1, N_IN) )})
all_Qs.append(Q1)
if use_argmax:
# action is selected to be the one with the highest Q-value
act = np.argmax(Q1)
else:
# action is a weighted selection based on predicted Q_values
act = np.random.choice(range(N_OUT), p = softmax(Q1)[0])
if random.random() < epsilon:
# action is selected randomly
act = random.randint(0, N_OUT-1)
# Get game state before action is taken
game_state = game.getState()
# Take action! Observe new state, reward, and if the game is over
game_state_new, reward, done, _ = game.step(act)
hands.append( (game_state, Q1[0][0], Q1[0][1], act, reward) )
# Store the new state vector to feed into our network.
# x2 corresponds to the x vector observed in state s+1
x2 = cardsToX(game.player) + cardsToX(game.dealer)
# Q2 refers to the predicted Q-values in the new s+1 state. This is used for the 'SARSA' update.
Q2 = sess.run(Q_PREDICT,feed_dict = {X : np.reshape( np.array(x2), (-1, N_IN) )})
# Store the maximum Q-value in this new state. This should be the expected reward from this new state
maxQ2 = np.max(Q2)
# targetQ is the same as our predicted one initially. The index of the action we took will be
# updated to be [observed reward] + [discount_factor] * max(Q[s+1])
targetQ = np.copy(Q1)
# If the game is done, then there is no future state
if done:
targetQ[0,act] = reward
all_rewards.append(reward)
else:
targetQ[0,act] = reward + discount * maxQ2
# Perform one gradient descent update, filling the placeholder value for Q_TARGET with targetQ.
# The returned loss is the difference between the predicted Q-values and the targetQ we just calculated
loss, _, _ = sess.run([loss_func, Q_PREDICT, train_step],
feed_dict = {X : np.reshape( np.array(x), (-1, N_IN) ),
Q_TARGET : targetQ}
)
all_losses.append(loss)
# Every 1000 episodes, show how the q-values moved after the gradient descent update
if ep % 1000 == 0 and ep > 0:
Q_NEW = sess.run(Q_PREDICT, feed_dict = {X : np.reshape( np.array(x), (-1, N_IN) ),
Q_TARGET : targetQ})
#print(game_state, targetQ[0], Q1[0], (Q_NEW-Q1)[0], loss, ep, epsilon, act)
rolling_window = 1000
rolling_mean = np.mean( all_rewards[-rolling_window:] )
rolling_loss = np.mean( all_losses[-rolling_window:] )
print("Rolling mean reward: {:<10.4f}, Rolling loss: {:<10.4f}".format(rolling_mean, rolling_loss))
if done:
# Reduce chance of random action as we train the model.
epsilon = 2/((ep/500) + 10)
epsilon = max(0.02, epsilon)
# rolling mean of rewards should increase over time!
if ep % 1000 == 0 and ep > 0:
pass# Show the rolling mean of all losses. This should decrease over time!
#plt.plot(pd.rolling_mean(pd.Series(all_losses), 5000))
#plt.pause(0.02)
#plt.show()
break
print(cardsToX(game.player))
print(game.dealer)
Any ideas? I'm stuck :(

Related

Sentence similarity

Can anyone explain how this line works
X_set = {w for w in X_list if not w in sw}
So I need to know why we use the variable w 3 times, and what each w refers to.
I've also posted my code below for further reference
# Program to measure the similarity between
# two sentences using cosine similarity.
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# X = input("Enter first string: ").lower()
# Y = input("Enter second string: ").lower()
X =" Ravi went to the market and buy 4 oranges and 2 apples in total how many fruits did Ravi buy"
Y =" Ram went to the shopping mall and buy 1pant and 5 shirts. how many clothes does Ram buy"
# tokenization
X_list = word_tokenize(X)
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
sw = stopwords.words('english')
l1 =[]
l2 =[]
# remove stop words from the string
X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
print(X_set)
print(Y_set)
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
# print(w)
if w in X_set:
l1.append(1) # create a vector
else:
l1.append(0)
if w in Y_set:
l2.append(1)
else:
l2.append(0)
c = 0
#
# # cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
print("similarity: ", cosine)

Why Deep Adaptive Input Normalization (DAIN) normalizes time series data accross rows?

The DAIN paper describes how a network learns to normalize time series data by itself, here is how the authors implemented it. The code leads me to think that normalization is happening across rows, not columns. Can anyone explain why it is implemented that way? Because I always thought that one normalizes time series only across columns to keep each feature's true information.
Here is the piece the does normalization:
```python
class DAIN_Layer(nn.Module):
def __init__(self, mode='adaptive_avg', mean_lr=0.00001, gate_lr=0.001, scale_lr=0.00001, input_dim=144):
super(DAIN_Layer, self).__init__()
print("Mode = ", mode)
self.mode = mode
self.mean_lr = mean_lr
self.gate_lr = gate_lr
self.scale_lr = scale_lr
# Parameters for adaptive average
self.mean_layer = nn.Linear(input_dim, input_dim, bias=False)
self.mean_layer.weight.data = torch.FloatTensor(data=np.eye(input_dim, input_dim))
# Parameters for adaptive std
self.scaling_layer = nn.Linear(input_dim, input_dim, bias=False)
self.scaling_layer.weight.data = torch.FloatTensor(data=np.eye(input_dim, input_dim))
# Parameters for adaptive scaling
self.gating_layer = nn.Linear(input_dim, input_dim)
self.eps = 1e-8
def forward(self, x):
# Expecting (n_samples, dim, n_feature_vectors)
# Nothing to normalize
if self.mode == None:
pass
# Do simple average normalization
elif self.mode == 'avg':
avg = torch.mean(x, 2)
avg = avg.resize(avg.size(0), avg.size(1), 1)
x = x - avg
# Perform only the first step (adaptive averaging)
elif self.mode == 'adaptive_avg':
avg = torch.mean(x, 2)
adaptive_avg = self.mean_layer(avg)
adaptive_avg = adaptive_avg.resize(adaptive_avg.size(0), adaptive_avg.size(1), 1)
x = x - adaptive_avg
# Perform the first + second step (adaptive averaging + adaptive scaling )
elif self.mode == 'adaptive_scale':
# Step 1:
avg = torch.mean(x, 2)
adaptive_avg = self.mean_layer(avg)
adaptive_avg = adaptive_avg.resize(adaptive_avg.size(0), adaptive_avg.size(1), 1)
x = x - adaptive_avg
# Step 2:
std = torch.mean(x ** 2, 2)
std = torch.sqrt(std + self.eps)
adaptive_std = self.scaling_layer(std)
adaptive_std[adaptive_std <= self.eps] = 1
adaptive_std = adaptive_std.resize(adaptive_std.size(0), adaptive_std.size(1), 1)
x = x / (adaptive_std)
elif self.mode == 'full':
# Step 1:
avg = torch.mean(x, 2)
adaptive_avg = self.mean_layer(avg)
adaptive_avg = adaptive_avg.resize(adaptive_avg.size(0), adaptive_avg.size(1), 1)
x = x - adaptive_avg
# # Step 2:
std = torch.mean(x ** 2, 2)
std = torch.sqrt(std + self.eps)
adaptive_std = self.scaling_layer(std)
adaptive_std[adaptive_std <= self.eps] = 1
adaptive_std = adaptive_std.resize(adaptive_std.size(0), adaptive_std.size(1), 1)
x = x / adaptive_std
# Step 3:
avg = torch.mean(x, 2)
gate = F.sigmoid(self.gating_layer(avg))
gate = gate.resize(gate.size(0), gate.size(1), 1)
x = x * gate
else:
assert False
return x
```
I am not sure either but they do transpose in forward function : x = x.transpose(1, 2) of the MLP class. Thus, it seemed to me that they normalise over time for each feature.

Why does my agent always takes a same action in DQN - Reinforcement Learning

I have trained an RL agent using DQN algorithm. After 20000 episodes my rewards are converged. Now when I test this agent, the agent is always taking the same action , irrespective of state. I find this very weird. Can someone help me with this. Is there a reason, anyone can think of why is the agent behaving this way?
Reward plot
When I test the agent
state = env.reset()
print('State: ', state)
state_encod = np.reshape(state, [1, state_size])
q_values = model.predict(state_encod)
action_key = np.argmax(q_values)
print(action_key)
print(index_to_action_mapping[action_key])
print(q_values[0][0])
print(q_values[0][action_key])
q_values_plotting = []
for i in range(0,action_size):
q_values_plotting.append(q_values[0][i])
plt.plot(np.arange(0,action_size),q_values_plotting)
Every time it gives the same q_values plot, even though state initialized is different every time.Below is the q_Value plot.
Testing:
code
test_rewards = []
for episode in range(1000):
terminal_state = False
state = env.reset()
episode_reward = 0
while terminal_state == False:
print('State: ', state)
state_encod = np.reshape(state, [1, state_size])
q_values = model.predict(state_encod)
action_key = np.argmax(q_values)
action = index_to_action_mapping[action_key]
print('Action: ', action)
next_state, reward, terminal_state = env.step(state, action)
print('Next_state: ', next_state)
print('Reward: ', reward)
print('Terminal_state: ', terminal_state, '\n')
print('----------------------------')
episode_reward += reward
state = deepcopy(next_state)
print('Episode Reward' + str(episode_reward))
test_rewards.append(episode_reward)
plt.plot(test_rewards)
Thanks.
Adding environment
import gym
import rom_vav_150mm_polyreg as rom
import numpy as np
import random
class VAVenv(gym.Env):
def __init__(self):
# Zone temperature set point and limits
self.temp_sp = 24
self.temp_sp_max = 24.5
self.temp_sp_min = 23.7
# no; of hours in an episode and time interval for each step
self.MAXSTEPS = 11
self.time_interval = 5./60. #in hrs
# constants
self.zone_volume = 775
def step(self,state,action):
# state -> Time, Volume, Load, SAT ,RAT
# action -> CFM
action_cfm = action[0]
# damper_opening = state[2]
load = state[2]
sat = state[3]
current_temp = state[4]
#input
inputs_rat = np.array([load,action_cfm, self.zone_volume,current_temp,sat])
'''
AFTER 5 MINUTES
'''
#output
output = [self.KStep + self.time_interval,self.zone_volume,rom.load(self.KStep + self.time_interval),
sat,rom.rat(inputs_rat)]
#reward calculation
thermal_coefficient = -0.1
zone_temperature = output[4]
if zone_temperature < self.temp_sp_min:
temp_penalty = self.temp_sp_min - zone_temperature
elif zone_temperature > self.temp_sp_max:
temp_penalty = zone_temperature - self.temp_sp_max
else :
temp_penalty = -10
reward = thermal_coefficient * temp_penalty
# create next step
next_state = np.array(output)
# increment simulation step count
self.KStep += self.time_interval
# done - end of one episode, when kSteps reaches the maximum steps in an episode
done = False
if self.KStep > self.MAXSTEPS:
done = True
return next_state,reward,done
def reset(self):
self.KStep = 0
# initialize all the values of a state
initial_rat = random.uniform(23,27)
initial_sat = random.uniform(12,14)
# return a state
return np.array([self.KStep,self.zone_volume,
rom.load(self.KStep),initial_sat,initial_rat])

Reinforcement learning cost function

Newb question
I am writing a OpenAI Gym pong player with TensorFlow and thus far have been able to create the network based on a random initialization so that it would randomly return to move the player paddle up or down.
After the epoch is over (21 games played where the computer won) I collected a set of observations, moves and scores. The final observation of a game receives a score and each preceding observation can be scored based on Bellman equation.
Now my questions what I do not understand yet:
How do I calculate the cost function so that it would be propagated as a start gradient for backward propagation? I totally get it with supervised learning, but here we do not have any labels to score agains.
How would I start optimizing the network?
Maybe a pointer to existing code or some literature would help.
Here's where I compute the rewards:
def compute_observation_rewards(self, gamma, up_score_probabilities):
"""
Applies Bellman equation and determines reward for each stored observation
:param gamma: Learning decay
:param up_score_probabilities: Probabilities for up score
:returns: List of scores for each move
"""
score_sum = 0
discounted_rewards = []
# go backwards through all observations
for i, p in enumerate(reversed(self._states_score_action)):
o = p[0]
s = p[1]
if s != 0:
score_sum = 0
score_sum = score_sum * gamma + s
discounted_rewards.append(score_sum)
# # normalize scores
discounted_rewards = np.array(discounted_rewards)
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
return discounted_rewards
Below is my network:
with tf.variable_scope('NN_Model', reuse=tf.AUTO_REUSE):
layer1 = tf.layers.conv2d(inputs,
3,
3,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation= tf.nn.relu,
use_bias=True,
bias_initializer=tf.zeros_initializer(),
trainable=True,
name='layer1'
)
# (N - F + 1) x (N - F + 1)
# => layer1 should be
# (80 - 3 + 1) * (80 - 3 + 1) = 78 x 78
pool1 = tf.layers.max_pooling2d(layer1,
pool_size=5,
strides=2,
name='pool1')
# int((N - f) / s +1)
# (78 - 5) / 2 + 1 = 73/2 + 1 = 37
layer2 = tf.layers.conv2d(pool1,
5,
5,
strides=(2, 2),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation= tf.nn.relu,
use_bias=True,
kernel_initializer=tf.random_normal_initializer(),
bias_initializer=tf.zeros_initializer(),
trainable=True,
name='layer2',
reuse=None
)
# ((N + 2xpadding - F) / stride + 1) x ((N + 2xpadding - F) / stride + 1)
# => layer1 should be
# int((37 + 0 - 5) / 2) + 1
# 16 + 1 = 17
pool2 = tf.layers.max_pooling2d(layer2,
pool_size=3,
strides=2,
name='pool2')
# int((N - f) / s +1)
# (17 - 3) / 2 + 1 = 7 + 1 = 8
flat1 = tf.layers.flatten(pool2, 'flat1')
# Kx64
full1 = tf.contrib.layers.fully_connected(flat1,
num_outputs=1,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
trainable=True,
scope=None
)
The algorithm you're looking for is called REINFORCE.
I would suggest reading chapter 13 of Sutton and Barto's RL book.
Here's pseudocode from the book.
Here, theta is the set of weights of your neural net. If you're unfamiliar with some of the rest of the notation, I'd suggest reading Chapter 3 of the above-mentioned book. It covers the basic problem formulation.

Loss is not decreasing for convolutional autoencoder

I'm trying to train a convolutional autoencoder to encode and decode a piano roll representation of monophonic midi clips. I reduced the note range to 3 octaves, divide songs into 100 time step pieces (where 1 time step = 1/100th of a second), and train the net in batches of 3 pieces.
I'm using Adagrad as my optimizer, and MSE as my loss function. The loss is huge, and I see no decrease in average loss even after hundreds of training examples are fed in.
Here's my code:
"""
Most absolutely simple assumptions:
- not changing the key of any of the files
- not changing the tempo of any of the files
- take blocks of 36 by 100
- divide up all songs by this amount, cutting off any excess from the
end, train
"""
from __future__ import print_function
import cPickle as pickle
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from reverse_pianoroll import piano_roll_to_pretty_midi as pr2pm
N = 1000
# load a NxMxC dataset
# N: Number of clips
# M: Piano roll size, the number of midi notes that could possibly be 'on'
# C: Clip length, in 100ths of a second
dataset = pickle.load(open('mh-midi-data.pickle', 'rb'))
######## take a subset of the data for training ######
# based on the mean and standard deviation of non zero entries in the data, I've
# found that the most populous, and thus best range of notes to take is from
# 48 to 84 (C2 - C5); this is 3 octaves, which is much less than the original
# 10 and a half. Additionally, we're going to take a subsample of 1000 because
# i'm training on my macbook and the network is pretty simple
######################################################
dataset = dataset[:, :, 48:84, :]
dataset = dataset[:N]
######################################################
midi_dim, clip_len = dataset.shape[2:]
class Autoencoder(nn.Module):
def __init__(self, **kwargs):
super(Autoencoder, self).__init__(**kwargs)
# input is 3 x 1 x 36 x 100
self.conv1 = nn.Conv2d(in_channels=1, out_channels=14, kernel_size=(midi_dim, 2))
# now transformed to 3 x 14 x 1 x 99
self.conv2 = nn.Conv2d(in_channels=14, out_channels=77, kernel_size=(1, 4))
# now transformed to 3 x 77 x 1 x 96
input_size = 3*77*1*96
self.fc1 = nn.Linear(input_size, input_size/2)
self.fc2 = nn.Linear(input_size/2, input_size/4)
self.fc3 = nn.Linear(input_size/4, input_size/2)
self.fc4 = nn.Linear(input_size/2, input_size)
self.tconv2 = nn.ConvTranspose2d(in_channels=77, out_channels=14, kernel_size=(1, 4))
self.tconv1 = nn.ConvTranspose2d(in_channels=14, out_channels=1, kernel_size=(midi_dim, 2))
self.sigmoid = nn.Sigmoid()
return
def forward(self, x):
# print("1: {}".format(x.size()))
x = F.relu(self.conv1(x))
# print("2: {}".format(x.size()))
x = F.relu(self.conv2(x))
# print("3: {}".format(x.size()))
x = x.view(-1, np.prod(x.size()[:]))
# print("4: {}".format(x.size()))
x = F.relu(self.fc1(x))
# print("5: {}".format(x.size()))
h = F.relu(self.fc2(x))
# print("6: {}".format(h.size()))
d = F.relu(self.fc3(h))
# print("7: {}".format(d.size()))
d = F.relu(self.fc4(d))
# print("8: {}".format(d.size()))
d = d.view(3, 77, 1, 96)
# print("9: {}".format(d.size()))
d = F.relu(self.tconv2(d))
# print("10: {}".format(d.size()))
d = self.tconv1(d)
d = self.sigmoid(d)
# print("11: {}".format(d.size()))
return d
net = Autoencoder()
loss_fn = nn.MSELoss()
# optimizer = optim.SGD(net.parameters(), lr=1e-3, momentum=0.9)
optimizer = optim.Adagrad(net.parameters(), lr=1e-3)
batch_count = 0
avg_loss = 0.0
print_every = 3
print("Beginning Training")
for epoch in xrange(2):
# for i, clip in enumerate(dataset):
for i in xrange(len(dataset)/3):
batch = dataset[(3*i):(3*i + 3), :, :]
# get the input, wrap it in a Variable
inpt = Variable(torch.from_numpy(batch).type(torch.FloatTensor))
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outpt = net(inpt)
loss = loss_fn(outpt, inpt)
loss.backward()
optimizer.step()
# print stats out
avg_loss += loss.data[0]
if batch_count % print_every == print_every - 1:
print('epoch: %d, batch_count: %d, loss: %.3f'%(
epoch + 1, batch_count + 1, avg_loss / print_every))
avg_loss = 0.0
batch_count += 1
print('Finished Training')
I'm really a beginner with this stuff, so any advice would be greatly appreciated.
Double check that you normalize your inpt to be in the range of 0 to 1. For instance, if you are working with images you could just divide inpt variable by 255.