How can I share module in pytorch? - deep-learning

I want to share my embedding in Encoder and Decoder, I do it something like this:
from torch import nn
class FooDecoder(nn.Module):
def __init__(self, embedding):
super().__init__()
self.embedding = embedding
class FooEncoder(nn.Module):
def __init__(self, embedding):
super().__init__()
self.embedding = embedding
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(10, 10)
self.encoder = FooEncoder(self.embedding)
self.decoder = FooDecoder(self.embedding)
model = Foo()
But I find that I will have two different params in state_dict:
state_dict = model.state_dict()
print(state_dict.keys()) # odict_keys(['embedding.weight', 'encoder.embedding.weight', 'decoder.embedding.weight'])
It seems PyTorch copy the embedding, I want to know:
embedding.weight encoder.embedding.weight decoder.embedding.weight will always keep the same in forward propagation and backward propagation?
Does this mean parameter increase if I do so?)(Because I have three embeeding weight)
Do I have any better way do this?

Related

NonImplementedError on using torch.onnx.export

I am trying to convert a pre-saved PyTorch model into a TensorFlow one via ONNX. For now, the following code is to export the model into .onnx format. The neural network has 2 inputs, one hidden layer with 5 neurons and a scalar output.
Here's the code I'm working with:
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model,self).__init__()
self.n_h_layers=n_h_layers
self.n_h_neurons=n_h_neurons
self.dim_in=dim_in
self.dim_out=dim_out
self.in_bound=in_bound
self.out_bound=out_bound
layer_input = [nn.Linear(dim_in, n_h_neurons, bias=True)]
layer_output = [nn.ReLU(), nn.Linear(n_h_neurons, dim_out, bias=True), nn.Hardtanh(in_bound, out_bound)]
# hidden layer
module_hidden = [[nn.ReLU(), nn.Linear(n_h_neurons, n_h_neurons, bias=True)] for _ in range(n_h_layers - 1)]
layer_hidden = list(np.array(module_hidden).flatten())
# nn model
layers = layer_input + layer_hidden + layer_output
self.model = nn.Sequential(*layers)
print(self.model)
trained_nn=torch.load('path')
trained_model=Model(1,5,2,1,-1,1)
trained_model.load_state_dict(trained_nn,strict=False)
dummy_input=Variable(torch.randn(1,2))
torch.onnx.export(trained_model,dummy_input, 'file.onnx', verbose=True)
I have two problems:
Running this snippet raises "NonImplementedError" in _forward_unimplemented in module.py as follows:
File ".../anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 201, in _forward_unimplemented
raise NotImplementedError
NotImplementedError
I am not aware with Exception handling in python and I do not know what I must change in order to tackle the error.
When I print trained_nn, this is what it gives me:
OrderedDict([('0.weight',
tensor([[ 0.2035, -0.7679],
[ 1.6368, -0.4135],
[-0.0908, -0.2335],
[ 1.3731, -0.3135],
[ 0.6361, 0.2521]])),
('0.bias', tensor([-1.6907, 0.7262, 1.4032, 1.2551, 0.8013])),
('2.weight',
tensor([[-0.4603, -0.0719, 0.4082, -1.0235, -0.0538]])),
('2.bias', tensor([-1.1568]))])
However, printing trained_model.state_dict() gives me a neural network with a completely different set of weights and biases, although I believe that it should be giving me the exact same model as before as this is what I need to save as onnx file?
OrderedDict([('model.0.weight',
tensor([[ 0.4817, 0.0928],
[-0.4313, 0.1253],
[ 0.6681, -0.4029],
[ 0.6474, 0.0029],
[-0.4663, 0.5029]])),
('model.0.bias',
tensor([-0.2292, 0.6674, -0.3755, 0.0778, 0.0527])),
('model.2.weight',
tensor([[-0.2097, -0.3029, 0.2792, 0.2596, 0.1362]])),
('model.2.bias', tensor([-0.1835]))])
Not sure what mistakes I'm making. Any help is appreciated.
When you are making a subclass of nn.Module you need to implement forward method. In your case you need to add:
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model, self).__init__()
...
def forward(self, x):
return self.model(x)
The names of parameters does not match:
model.0.weight != 0.weight
model.0.bias != 0.bias
prefix model is missed.
So when you call load_state_dict() with strict=False the parameters will not be used.
You can rename the parameters to match the model:
trained_nn = torch.load('path')
trained_nn = {f'model.{name}': w for name, w in trained_nn.items()}
trained_model.load_state_dict(trained_nn, strict=True)

how to combine two trained models using PyTorch?

I'm currently working on two models that use different types of data but are connected. I'd want to create a combination model that takes in one instance of each of the data types, runs them through each of the pre-trained models independently, and then processes the combined output of the two distinct models through a few feed-forward layers at the top.
So far, I've learned that I can change forward to accept both inputs, so I've just cloned the structures of my individual models into the combined one, processed them each individually using forward(right )'s layers, and then merged the outputs as specified. What I'm having trouble with is figuring out how to achieve this.
as I understand from your question you can create two models then you need a third model that combines both the neural network with the forward and in the __main__ you can then load_state_dict
for example:
the first model
class FirstM(nn.Module):
def __init__(self):
super(FirstM, self).__init__()
self.fc1 = nn.Linear(20, 2)
def forward(self, x):
x = self.fc1(x)
return x
the second model
class SecondM(nn.Module):
def __init__(self):
super(SecondM, self).__init__()
self.fc1 = nn.Linear(20, 2)
def forward(self, x):
x = self.fc1(x)
return x
here you create a model that you can merge both two models in it as follows:
class Combined_model(nn.Module):
def __init__(self, modelA, modelB):
super(Combined_model, self).__init__()
self.modelA = modelA
self.modelB = modelB
self.classifier = nn.Linear(4, 2)
def forward(self, x1, x2):
x1 = self.modelA(x1)
x2 = self.modelB(x2)
x = torch.cat((x1, x2), dim=1)
x = self.classifier(F.relu(x))
return x
and then outside the classed in the main you can do as following
# Create models and load state_dicts
modelA = FirstM()
modelB = SecondM()
# Load state dicts
modelA.load_state_dict(torch.load(PATH))
modelB.load_state_dict(torch.load(PATH))
model = Combined_model(modelA, modelB)
x1, x2 = torch.randn(1, 10), torch.randn(1, 20)
output = model(x1, x2)

Calling Class Function Without Period

New to python and it seems like a simple question but I haven't been able to find an answer. I found this code on github. I can't understand why it doesn't say model.forward(x).
import torch
from torch import nn
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.flatten = nn.Flatten()
def forward(self, x):
tensor = self.flatten(x)
return tensor
x = torch.rand(2, 2, 2, 2, 2, 2)
print(x)
model = NeuralNet()
output = model(x)
print(output)
Refere to this Why there are different output between model.forward(input) and model(input) and also read about
__call__
method.

CNN + RNN architecture for video recognition

I am trying to replicate the ConvNet + LSTM approach presented in this paper using pytorch. But I am struggling to find the correct way to combine the CNN and the LSTM in my model. Here is my attempt :
class VideoRNN(nn.Module):
def __init__(self, hidden_size, n_classes):
super(VideoRNN, self).__init__()
self.hidden_size = hidden_size
vgg = models.vgg16(pretrained=True)
embed = nn.Sequential(*list(vgg.classifier.children())[:-1])
vgg.classifier = embed
for param in vgg.parameters():
param.requires_grad = False
self.embedding = vgg
self.GRU = nn.GRU(4096, hidden_size)
def forward(self, input, hidden=None):
embedded = self.embedding(input)
output, hidden = self.gru(output, hidden)
output = self.classifier(output.view(-1, 4096))
return output, hidden
As my videos have variable length, I provide a PackedSequence as an input. It is created from a Tensor with shape (M,B,C,H,W) where M is the maximum sequence length and B the batch size. The C,H,W are the channels, height and width of each frame.
I want the pre-trained CNN to be part of the model as I may later unfreeze some layer to finetune the CNN for my task. That's why I didn't compute the embedding of the images separately.
My questions are then the following :
Is the shape of my input data correct in order to handle batches of videos in my context or should I use something else than a PackedSequence?
In my forward function, how can I handle the batch of sequences of images with my VGG and my GRU unit ? I cannot feed directly the PackedSequence as an input to my VGG so how can I proceed?
Does this approach seem to respect the "pytorch way of doing things" or should is my approach flawed?
I finally found the solution to make it works. Here is a simplified yet complete example of how I managed to create a VideoRNN able to use packedSequence as an input :
class VideoRNN(nn.Module):
def __init__(self, n_classes, batch_size, device):
super(VideoRNN, self).__init__()
self.batch = batch_size
self.device = device
# Loading a VGG16
vgg = models.vgg16(pretrained=True)
# Removing last layer of vgg 16
embed = nn.Sequential(*list(vgg.classifier.children())[:-1])
vgg.classifier = embed
# Freezing the model 3 last layers
for param in vgg.parameters():
param.requires_grad = False
self.embedding = vgg
self.gru = nn.LSTM(4096, 2048, bidirectional=True)
# Classification layer (*2 because bidirectionnal)
self.classifier = nn.Sequential(
nn.Linear(2048 * 2, 256),
nn.ReLU(),
nn.Linear(256, n_classes),
)
def forward(self, input):
hidden = torch.zeros(2, self.batch , 2048).to(
self.device
)
c_0 = torch.zeros(self.num_layer * 2, self.batch, 2048).to(
self.device
)
embedded = self.simple_elementwise_apply(self.embedding, input)
output, hidden = self.gru(embedded, (hidden, c_0))
hidden = hidden[0].view(-1, 2048 * 2)
output = self.classifier(hidden)
return output
def simple_elementwise_apply(self, fn, packed_sequence):
return torch.nn.utils.rnn.PackedSequence(
fn(packed_sequence.data), packed_sequence.batch_sizes
)
the key is the simple_elementwise_apply methods allowing to feed the PackedSequence in the CNN networks and to retrieve a new PackedSequence made of embedding as an output.
I hope you'll find it useful.

Python 3 Accessing variable result from another Class

I have a little problem with a variable update.
I have my variable declared in my first function as such self.TestVar = 0
then if a certain count ==2 self.TestVar = 2
in a second function (in the same class) but called from within another class I want returning self.TestVar. no way.
AttributeError: 'ThndClass' object has no attribute 'TestVar'
I am most certainly not doing the good way, all I want is accessing self.TestVar = 2 from my other class that's it's but I can't find a proper way to do so in Python.
It looks like my issue is that I get my self.TestVar = 2 in a "if" statement which make it live in another scope (or I might be wrong).
import sys
from PIL import Image
from PyQt4 import QtCore, QtGui
class MainWindow(QtGui.QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
def initUI(self):
self.TestVar = 0
self.TheCount = 2
if self.TheCount ==2:
self.TestVar = 2
ThndClass()
def Getit(self):
print("called correctly")
print(self.TestVar)
return self.TestVar
def main():
app = QtGui.QApplication([])
mw = MainWindow()
sys.exit(app.exec_())
class ThndClass(QtGui.QWidget):
def __init__(self):
super(ThndClass, self).__init__()
self.initUI2()
def initUI2(self):
print("Class Called")
print(MainWindow.Getit(self))
if __name__ == '__main__':
main()
If I remove the 2nd Class call :
import sys
from PIL import Image
from PyQt4 import QtCore, QtGui
class MainWindow(QtGui.QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
def initUI(self):
self.TestVar = 0
self.TheCount = 2
if self.TheCount ==2:
self.TestVar = 2
self.Getit()
def Getit(self):
print("called correctly")
print(self.TestVar)
return self.TestVar
def main():
app = QtGui.QApplication([])
mw = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
This works correctly, but I want to be able to call def Getit() from another class and get my result. Or simply get a way to directly access self.TestVar from my other class.
When you call
MainWindow.Getit(self)
in ThndClass.initUI2, you are treating MainWindow and ThndClass interchangeably, when they do not have the same attributes. Here is an actual minimal example:
class Parent():
def __init__(self):
pass
class Child1(Parent):
def __init__(self):
super().__init__()
self.foo = "foo"
def method(self):
print(type(self))
print(self.foo)
class Child2(Parent):
def __init__(self):
super().__init__()
self.bar = "bar"
c1 = Child1()
Child1.method(c1) # pass Child1 instance to Child1 instance method
c2 = Child2()
Child1.method(c2) # pass Child2 instance to Child1 instance method
and full output:
<class '__main__.Child1'> # gets a Child1 instance
foo # first call succeeds
<class '__main__.Child2'> # gets a Child2 instance (which doesn't have 'foo')
Traceback (most recent call last):
File "C:/Python34/so.py", line 25, in <module>
Child1.method(c2)
File "C:/Python34/so.py", line 11, in method
print(self.foo)
AttributeError: 'Child2' object has no attribute 'foo' # second call fails
However, as it is not clear what exactly the code is supposed to be doing, I can't suggest a fix. I don't know why you create but don't assign a ThndClass instance in MainWindow.initUI, for example.
Here is one possible fix; pass a Child1 instance to Child2.__init__, then use it either as an argument to Child2.method:
class Child2(Parent):
def __init__(self, c1): # provide Child1 instance as parameter
super().__init__()
self.bar = "bar"
self.method(c1) # pass instance to Child2.method
def method(self, c1):
c1.method() # call Child1.method with c1 as self parameter
(Note that c1.method() is equivalent to Child1.method(c1).)
or make it an instance attribute:
class Child2(Parent):
def __init__(self, c1): # provide Child1 instance as parameter
super().__init__()
self.bar = "bar"
self.c1 = c1 # make Child1 instance a Child2 instance attribute
self.method() # now no argument needed
def method(self):
self.c1.method() # call Child1.method with c1 as self parameter
(Note that self.c1.method() is equivalent to Child1.method(self.c1).)
In use (either way):
>>> c1 = Child1()
>>> c2 = Child2(c1)
<class '__main__.Child1'> # Child1.method gets a Child1 instance
foo # and is called successfully
Thank's to your help jonrsharpe here's my working code :)
import sys
from PIL import Image
from PyQt4 import QtCore, QtGui
class MainWindow(QtGui.QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
def initUI(self):
self.TestVar = 0
self.TheCount = 2
if self.TheCount ==2:
self.TestVar = 2
Themain = self
ThndClass(Themain)
def Getit(self):
print("called correctly")
print(self.TestVar)
return self.TestVar
def main():
app = QtGui.QApplication([])
mw = MainWindow()
sys.exit(app.exec_())
class ThndClass(QtGui.QWidget):
def __init__(self, Themain):
super(ThndClass, self).__init__()
self.Themain = Themain
self.initUI2()
def initUI2(self):
print("Class Called")
print(self.Themain.Getit())
if __name__ == '__main__':
main()
All working good now : ) Thanks you very much !