Calling Class Function Without Period - function

New to python and it seems like a simple question but I haven't been able to find an answer. I found this code on github. I can't understand why it doesn't say model.forward(x).
import torch
from torch import nn
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.flatten = nn.Flatten()
def forward(self, x):
tensor = self.flatten(x)
return tensor
x = torch.rand(2, 2, 2, 2, 2, 2)
print(x)
model = NeuralNet()
output = model(x)
print(output)

Refere to this Why there are different output between model.forward(input) and model(input) and also read about
__call__
method.

Related

Why model with one GRU layer return zero gradients?

I'm trying to compare between 2 models in order to learn about the behaviour of the gradients.
import torch
import torch.nn as nn
import torchinfo
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.Identity = nn.Identity ()
self.GRU = nn.GRU(input_size=3, hidden_size=32, num_layers=2, batch_first=True)
self.fc = nn.Linear(32, 5)
def forward(self, input_series):
self.Identity(input_series)
output, h = self.GRU(input_series)
output = output[:, -1, :] # get last state
output = self.fc(output)
output = output.view(-1, 5, 1) # reorginize output
return output
class SecondModel(nn.Module):
def __init__(self):
super(SecondModel, self).__init__()
self.GRU = nn.GRU(input_size=3, hidden_size=32, num_layers=2, batch_first=True)
def forward(self, input_series):
output, h = self.GRU(input_series)
return output
Checking the gradient of the first model gives True (zero gradients):
model = MyModel()
x = torch.rand([2, 10, 3])
y = model(x)
y.retain_grad()
y[:, -1].sum().backward()
print(torch.allclose(y.grad[:, :-1], torch.tensor(0.))) # gradients w.r.t previous outputs are zeroes
Checking the gradient of the second model also gives True (zero gradients):
model = SecondModel()
x = torch.rand([2, 10, 3])
y = model(x)
y.retain_grad()
y[:, -1].sum().backward()
print(torch.allclose(y.grad[:, :-1], torch.tensor(0.))) # gradients w.r.t previous outputs are zeroes
According to the answer here:
Do linear layer after GRU saved the sequence output order?
the second model (with just GRU layer) need to give non zero gradients.
What am I missing ?
When will we get zero or non-zero gradients ?
The value of y.grad[:, :-1] theoretically shouldn't be zeroes, but here they are because y[:, :-1] doesn't seem to refer to the same tensor objects used to compute y[:, -1] in the GRU implementation. As an illustration, a simple 1-layer GRU implementation looks like
import torch
import torch.nn as nn
class GRU(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.lin_r = nn.Linear(input_size + hidden_size, hidden_size)
self.lin_z = nn.Linear(input_size + hidden_size, hidden_size)
self.lin_in = nn.Linear(input_size, hidden_size)
self.lin_hn = nn.Linear(hidden_size, hidden_size)
self.hidden_size = hidden_size
def forward(self, x):
bsz, len_, in_ = x.shape
h = torch.zeros([bsz, self.hidden_size])
hs = []
for i in range(len_):
r = self.lin_r(torch.cat([x[:, i], h], dim=-1)).sigmoid()
z = self.lin_z(torch.cat([x[:, i], h], dim=-1)).sigmoid()
n = (self.lin_in(x[:, i]) + r * self.lin_hn(h)).tanh()
h = (1.-z)*n + z*h
hs.append(h)
# Return the output both as a single tensor and as a list of
# tensors actually used in computing the hidden vectors
return torch.stack(hs, dim=1), hs
Then, we have
model = GRU(input_size=3, hidden_size=32)
x = torch.rand([2, 10, 3])
y, hs = model(x)
y.retain_grad()
for h in hs:
h.retain_grad()
y[:, -1].sum().backward()
print(torch.allclose(y.grad[:, -1], torch.tensor(0.))) # False, as expected (sanity check)
print(torch.allclose(y.grad[:, :-1], torch.tensor(0.))) # True, unexpected
print(any(torch.allclose(h.grad, torch.tensor(0.)) for h in hs)) # False, as expected
It appears PyTorch computes the gradients w.r.t all tensors in hs as expected but not those w.r.t y.
So, to answer your question:
I don't think you miss anything. The linked answer is just not quite right as it incorrectly assumes PyTorch would compute y.grad as expected.
The theory given as a comment in the linked answer is still right, but not quite complete: gradient is always zero iff the input doesn't matter.

NonImplementedError on using torch.onnx.export

I am trying to convert a pre-saved PyTorch model into a TensorFlow one via ONNX. For now, the following code is to export the model into .onnx format. The neural network has 2 inputs, one hidden layer with 5 neurons and a scalar output.
Here's the code I'm working with:
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model,self).__init__()
self.n_h_layers=n_h_layers
self.n_h_neurons=n_h_neurons
self.dim_in=dim_in
self.dim_out=dim_out
self.in_bound=in_bound
self.out_bound=out_bound
layer_input = [nn.Linear(dim_in, n_h_neurons, bias=True)]
layer_output = [nn.ReLU(), nn.Linear(n_h_neurons, dim_out, bias=True), nn.Hardtanh(in_bound, out_bound)]
# hidden layer
module_hidden = [[nn.ReLU(), nn.Linear(n_h_neurons, n_h_neurons, bias=True)] for _ in range(n_h_layers - 1)]
layer_hidden = list(np.array(module_hidden).flatten())
# nn model
layers = layer_input + layer_hidden + layer_output
self.model = nn.Sequential(*layers)
print(self.model)
trained_nn=torch.load('path')
trained_model=Model(1,5,2,1,-1,1)
trained_model.load_state_dict(trained_nn,strict=False)
dummy_input=Variable(torch.randn(1,2))
torch.onnx.export(trained_model,dummy_input, 'file.onnx', verbose=True)
I have two problems:
Running this snippet raises "NonImplementedError" in _forward_unimplemented in module.py as follows:
File ".../anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 201, in _forward_unimplemented
raise NotImplementedError
NotImplementedError
I am not aware with Exception handling in python and I do not know what I must change in order to tackle the error.
When I print trained_nn, this is what it gives me:
OrderedDict([('0.weight',
tensor([[ 0.2035, -0.7679],
[ 1.6368, -0.4135],
[-0.0908, -0.2335],
[ 1.3731, -0.3135],
[ 0.6361, 0.2521]])),
('0.bias', tensor([-1.6907, 0.7262, 1.4032, 1.2551, 0.8013])),
('2.weight',
tensor([[-0.4603, -0.0719, 0.4082, -1.0235, -0.0538]])),
('2.bias', tensor([-1.1568]))])
However, printing trained_model.state_dict() gives me a neural network with a completely different set of weights and biases, although I believe that it should be giving me the exact same model as before as this is what I need to save as onnx file?
OrderedDict([('model.0.weight',
tensor([[ 0.4817, 0.0928],
[-0.4313, 0.1253],
[ 0.6681, -0.4029],
[ 0.6474, 0.0029],
[-0.4663, 0.5029]])),
('model.0.bias',
tensor([-0.2292, 0.6674, -0.3755, 0.0778, 0.0527])),
('model.2.weight',
tensor([[-0.2097, -0.3029, 0.2792, 0.2596, 0.1362]])),
('model.2.bias', tensor([-0.1835]))])
Not sure what mistakes I'm making. Any help is appreciated.
When you are making a subclass of nn.Module you need to implement forward method. In your case you need to add:
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model, self).__init__()
...
def forward(self, x):
return self.model(x)
The names of parameters does not match:
model.0.weight != 0.weight
model.0.bias != 0.bias
prefix model is missed.
So when you call load_state_dict() with strict=False the parameters will not be used.
You can rename the parameters to match the model:
trained_nn = torch.load('path')
trained_nn = {f'model.{name}': w for name, w in trained_nn.items()}
trained_model.load_state_dict(trained_nn, strict=True)

How can I share module in pytorch?

I want to share my embedding in Encoder and Decoder, I do it something like this:
from torch import nn
class FooDecoder(nn.Module):
def __init__(self, embedding):
super().__init__()
self.embedding = embedding
class FooEncoder(nn.Module):
def __init__(self, embedding):
super().__init__()
self.embedding = embedding
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(10, 10)
self.encoder = FooEncoder(self.embedding)
self.decoder = FooDecoder(self.embedding)
model = Foo()
But I find that I will have two different params in state_dict:
state_dict = model.state_dict()
print(state_dict.keys()) # odict_keys(['embedding.weight', 'encoder.embedding.weight', 'decoder.embedding.weight'])
It seems PyTorch copy the embedding, I want to know:
embedding.weight encoder.embedding.weight decoder.embedding.weight will always keep the same in forward propagation and backward propagation?
Does this mean parameter increase if I do so?)(Because I have three embeeding weight)
Do I have any better way do this?

'Net' object has no attribute 'parameters'

I am fairly new to machine learning. I learned to write this code from youtube tutorials but I keep getting this error
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/aniket/Desktop/DeepLearning/PythonLearningPyCharm/CatVsDogs.py", line 109, in <module>
optimizer = optim.Adam(net.parameters(), lr=0.001) # tweaks the weights from what I understand
AttributeError: 'Net' object has no attribute 'parameters'
this is the Net class
class Net():
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1,32,5)
self.conv2 = nn.Conv2d(32,64,5)
self.conv3 = nn.Conv2d(64,128,5)
self.to_linear = None
x = torch.randn(50,50).view(-1,1,50,50)
self.Conv2d_Linear_Link(x)
self.fc1 = nn.Linear(self.to_linear, 512)
self.fc2 = nn.Linear(512, 2)
def Conv2d_Linear_Link(self , x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self.to_linear is None :
self.to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self, x):
x = self.Conv2d_Linear_Link(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
and this is the function train
def train():
for epoch in range(epochs):
for i in tqdm(range(0,len(X_train), batch)):
batch_x = train_X[i:i + batch].view(-1, 1, 50, 50)
batch_y = train_y[i:i + batch]
net.zero_grad() # i don't understand why we do this but we do we don't want the probabilites adding up
output = net(batch_x)
loss = loss_function(output, batch_y)
loss.backward()
optimizer.step()
print(loss)
and the optimizer and loss functions and data
optimizer = optim.Adam(net.parameters(), lr=0.001) # tweaks the weights from what I understand
loss_function = nn.MSELoss() # gives the loss
You're not subclassing nn.Module. It should look like this:
class Net(nn.Module):
def __init__(self):
super().__init__()
This allows your network to inherit all the properties of the nn.Module class, such as the parameters attribute.
You may have a spelling problem and you should look to Net which parameters has.
You need to import optim from torch
from torch import optim

Keras Functional API and loss function with multiple inputs

I am trying to use a custom Keras loss function that apart from the usual signature (y_true, y_pred) takes another parameter sigma (which is also produced by the last layer of the network).
The training works fine, but then I am not sure how to perform forward propagation and return sigma (while muis the output of the model.predict method).
This is the code I am using, which features a custom layer GaussianLayer that returns the list [mu, sigma].
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Dense, Layer, Dropout
from keras.models import Model
from keras.initializers import glorot_normal
import numpy as np
def custom_loss(sigma):
def gaussian_loss(y_true, y_pred):
return tf.reduce_mean(0.5*tf.log(sigma) + 0.5*tf.div(tf.square(y_true - y_pred), sigma)) + 10
return gaussian_loss
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(30, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(30, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim, ),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim, ),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
output_mu = K.dot(x, self.kernel_1) + self.bias_1
output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]
# This returns a tensor
inputs = Input(shape=(1,))
x = Dense(30, activation='relu')(inputs)
x = Dropout(0.3)(x)
x = Dense(30, activation='relu')(x)
x = Dense(40, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(30, activation='relu')(x)
mu, sigma = GaussianLayer(1)(x)
model = Model(inputs, mu)
model.compile(loss=custom_loss(sigma), optimizer='adam')
model.fit(train_x, train_y, epochs=150)
Since your model returns two tensors as output, you also need to pass a list of two arrays as the output when calling fit() method. That's essentially what the error is trying to convey:
Error when checking model target:
So the error is in targets (i.e. labels). What is wrong?
the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 2 array(s), but instead got the following list of 1 arrays:
I may have found the answer among Keras FAQs.
I found out that it is possible to retrieve intermediate steps' output using the code snippet below:
layer_name = 'main_output'
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model.predict(train_x[0])
intermediate_output
In this case intermediate_output is a list of two values [mu, sigma] (just needed to name the output layer main_output and retrieve it later)