import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
class AlexSal(nn.Module):
def __init__(self):
super(AlexSal, self).__init__()
self.features = nn.Sequential(*list(torch.load('alexnet_places365.pth.tar').features.children())[:-2])
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.conv6 = nn.Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
def forward(self, x):
x = self.relu(self.features(x))
x = self.sigmoid(self.conv6(x))
x = x.squeeze(1)
return x
model = AlexSal().cuda()
Traceback (most recent call last):
File "main.py", line 23, in <module>
model = AlexSal().cuda()
File "main.py", line 13, in __init__
self.features = nn.Sequential(*list(torch.load('alexnet_places365.pth.tar').features.children())[:-2])
AttributeError: 'dict' object has no attribute 'features'
I got this piece of code from internet , i downloaded alexnet_places365.pth.tar ,and when i am running this , it is showing the above error
It looks like torch.load('alexnet_places365.pth.tar') does not contain an object with a member features, but instead a state dict as described here.
I would suggest you print out the result of torch.load('alexnet_places365.pth.tar') and then look for an entry features.
Related
I am working on cardiac CT data(axial, sagittal, coronal). I am using
the pre-trained model vgg_16. But got the following error. According
to this error, my dimension is not correct but according to my code,
I write things coreectly Can somebody guide me regarding this I try
to correct my code already but got the same error? Below is Error and
Code.
Traceback (most recent call last):
File "ct_pretrained.py", line 199, in <module>
loss, metric = train(model, train_loader, optimizer)
File "ct_pretrained.py", line 57, in train
output = model(axial, sagittal, coronal, emr)
File "/root/miniconda/lib/python3.8/site->packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/data/heart_ct/torch/models/test.py", line 38, in forward
axial_feature = self.axial_model(axial)
File "/root/miniconda/lib/python3.8/site->packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/root/miniconda/lib/python3.8/site->packages/torchvision/models/vgg.py", line 46, in forward
x = self.classifier(x)
File "/root/miniconda/lib/python3.8/site->packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/root/miniconda/lib/python3.8/site->packages/torch/nn/modules/linear.py", line 93, in forward
return F.linear(input, self.weight, self.bias)
File "/root/miniconda/lib/python3.8/site-> packages/torch/nn/functional.py", line 1690, in linear
ret = torch.addmm(bias, input, weight.t())
RuntimeError: mat1 dim 1 must match mat2 dim 0
The code is:
import torch
import torch.nn as nn
from torchvision import models
__all__ = ['VGG']
class VGG(nn.Module):
def __init__(self, is_emr=False, mode='sum'):
super().__init__()
self.is_emr = is_emr
self.mode = mode
in_dim = 45
self.axial_model = models.vgg16(pretrained=True)
out_channels = self.axial_model.features[0].out_channels
self.axial_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=0, bias=False)
self.axial_model.features[3] = nn.MaxPool2d(1)
num_ftrs = self.axial_model.classifier.in_features #error in this line of code
self.axial_model.classifier = nn.Linear(num_ftrs, 15)
self.sa_co_model = models.vgg16(pretrained=True)
self.sa_co_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=(3,0), bias=False)
self.sa_co_model.features[3] = nn.MaxPool2d(1)
self.sa_co_model.classifier = nn.Linear(num_ftrs, 15)
if self.is_emr:
self.emr_model = EMRModel()
if self.mode == 'concat': in_dim = 90
self.classifier = Classifier(in_dim)
def forward(self, axial, sagittal, coronal, emr):
axial = axial[:,:,:-3,:-3]
sagittal = sagittal[:,:,:,:-3]
coronal = coronal[:,:,:,:-3]
axial_feature = self.axial_model(axial)
sagittal_feature = self.sa_co_model(sagittal)
coronal_feature = self.sa_co_model(coronal)
out = torch.cat([axial_feature, sagittal_feature, coronal_feature], dim=1)
if self.is_emr:
emr_feature = self.emr_model(emr)
if self.mode == 'concat':
out = torch.cat([out, emr_feature], dim=1)
elif self.mode == 'sum':
out += emr_feature
out = self.classifier(out)
return out
As the error suggests: there's a mismatch between the dimension of the computed features out of your backbones and the dimensions of the classification layer of self.classifier.
Try to inspect (in debug mode, or using print) the shape and dimensions of out and what is in_dim of the classifier layer.
I am trying to apply LSTM on HP news dataset. The data is in JSON format (https://www.kaggle.com/rmisra/news-category-dataset). I have tried this code and got errors. Don't know what's wrong with this code?
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.optimizers import RMSprop
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
import json
from sklearn.preprocessing import LabelBinarizer
with open('News_Category_Dataset_v2.json', 'r') as f:
train = json.load(f)
Y_train = list(train.values())
lb = LabelBinarizer()
X_train = lb.fit_transform(list(train.keys()))
##
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.15)
##
max_words = 1000
max_len = 150
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X_train)
sequences = tok.texts_to_sequences(X_train)
sequences_matrix = sequence.pad_sequences(sequences,maxlen=max_len)
def RNN():
inputs = Input(name='inputs',shape=[max_len])
layer = Embedding(max_words,50,input_length=max_len)(inputs)
layer = LSTM(64)(layer)
layer = Dense(256,name='FC1')(layer)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(1,name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=inputs,outputs=layer)
return model
model = RNN()
model.summary()
model.compile(loss='binary_crossentropy',optimizer=RMSprop(),metrics=['accuracy'])
model.fit(sequences_matrix,Y_train,batch_size=128,epochs=10,
validation_split=0.2,callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)])
Got these errors
Traceback (most recent call last):
Traceback (most recent call last):
File ".\Hpnews.py", line 30, in <module>
train = json.load(f)
File "C:\Users\a\Anaconda3\lib\json\__init__.py", line 293, in load
return loads(fp.read(),
File "C:\Users\a\Anaconda3\lib\json\__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "C:\Users\a\Anaconda3\lib\json\decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 366)
this is my json file format
"root":{6 items
"category":string"CRIME"
"headline":string"There Were 2 Mass Shootings In Texas Last Week, But Only 1 On TV"
"authors":string"Melissa Jeltsen"
"link":string"huffingtonpost.com/entry/…" "short_description":string"She left her husband. He killed their children. Just another day in America."
"date":string"2018-05-26" }
The JSON is not a typical JSON but a ndJSON ("newline-delimited JSON") that won't be opened by json.load.
You should use pandas to load you data:
import pandas as pd
data = pd.read_json('News_Category_Dataset_v2.json', lines=True)
I am fairly new to machine learning. I learned to write this code from youtube tutorials but I keep getting this error
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/aniket/Desktop/DeepLearning/PythonLearningPyCharm/CatVsDogs.py", line 109, in <module>
optimizer = optim.Adam(net.parameters(), lr=0.001) # tweaks the weights from what I understand
AttributeError: 'Net' object has no attribute 'parameters'
this is the Net class
class Net():
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1,32,5)
self.conv2 = nn.Conv2d(32,64,5)
self.conv3 = nn.Conv2d(64,128,5)
self.to_linear = None
x = torch.randn(50,50).view(-1,1,50,50)
self.Conv2d_Linear_Link(x)
self.fc1 = nn.Linear(self.to_linear, 512)
self.fc2 = nn.Linear(512, 2)
def Conv2d_Linear_Link(self , x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self.to_linear is None :
self.to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self, x):
x = self.Conv2d_Linear_Link(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
and this is the function train
def train():
for epoch in range(epochs):
for i in tqdm(range(0,len(X_train), batch)):
batch_x = train_X[i:i + batch].view(-1, 1, 50, 50)
batch_y = train_y[i:i + batch]
net.zero_grad() # i don't understand why we do this but we do we don't want the probabilites adding up
output = net(batch_x)
loss = loss_function(output, batch_y)
loss.backward()
optimizer.step()
print(loss)
and the optimizer and loss functions and data
optimizer = optim.Adam(net.parameters(), lr=0.001) # tweaks the weights from what I understand
loss_function = nn.MSELoss() # gives the loss
You're not subclassing nn.Module. It should look like this:
class Net(nn.Module):
def __init__(self):
super().__init__()
This allows your network to inherit all the properties of the nn.Module class, such as the parameters attribute.
You may have a spelling problem and you should look to Net which parameters has.
You need to import optim from torch
from torch import optim
gdd.forward(x) call error, but why?
This code uses imcol to implement the convolution layer
Traceback (most recent call last):
File "E:/PycharmProjects/untitled2/kk.py", line 61, in <module>
gdd.forward(x)
File "E:/PycharmProjects/untitled2/kk.py", line 46, in forward
FN,C,FH,FW=self.W.shape
ValueError: not enough values to unpack (expected 4, got 2)
import numpy as np
class Convolution:
# 卷积核大小
def __init__(self,W,b,stride=1,pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self,x):
FN,C,FH,FW=self.W.shape
N,C,H,W = x.shape
out_h = int(1+(H+ 2*self.pad - FH) / self.stride)
out_w = int(1+(W + 2*self.pad -FW) / self.stride)
e = np.array([[2,0,1],[0,1,2],[1,0,2]])
x = np.array([[1,2,3,0],[0,1,2,3],[3,0,1,2],[2,3,0,1]])
gdd = Convolution(e,3,1,0)
gdd.forward(x)
not enough value to unpack means that there are 2 outputs, but you are expecting 4:
FN,C,FH,FW=self.W.shape
just get rid of 2 of them and you are good to go :)
BTW I'm assuming you speak Chinese? 我说中文, 不懂可以用中文问一下
So I'm trying to create a generator to iterate through a data set for use in training with Keras's fit_generator. Here's the definition of the generator, the model, and the call to fit_generator:
import numpy as np
from queue import Queue, deque
from keras.models import Sequential
from keras.layers import Dense
num_features = 40
len_data = 100
data = np.random.rand(len_data, num_features)
def train_generator(train_idxs):
while True:
i = train_idxs.get(block=False)
training_example = data[i,:]
training_example.shape = (1, len(training_example))
yield (training_example, training_example)
layer0_size = num_features
layer1_size = layer0_size / 2
layer2_size = layer1_size / 2
layers = []
layers.append(
Dense(input_dim=layer0_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer2_size, activation='relu'))
layers.append(
Dense(input_dim=layer2_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer0_size, activation='sigmoid'))
model = Sequential()
for layer in layers:
model.add(layer)
model.compile(optimizer='adam', loss='binary_crossentropy')
train_idxs = Queue()
train_idxs.queue = deque(range(len_data))
train_gen = train_generator(train_idxs)
max_q_size = 2
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
Keras will then successfully train 98/100 training examples and throw this error
98/100 [============================>.] - ETA: 0s - loss: 0.6930Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 429, in data_generator_task
generator_output = next(self._generator)
File "scrap.py", line 12, in train_generator
i = train_idxs.get(block=False)
File "/usr/lib/python3.5/queue.py", line 161, in get
raise Empty
queue.Empty
Traceback (most recent call last):
File "scrap.py", line 43, in <module>
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 935, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1528, in fit_generator
str(generator_output))
ValueError: output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None
It seems like what's happening is that it popped of all of the training_idxs and it's still trying to get more until Keras exhaust the training examples in its internal queue. Is there a way to get it to stop trying to get more training examples from the generator?