when call the class convolution, it say error - deep-learning

gdd.forward(x) call error, but why?
This code uses imcol to implement the convolution layer
Traceback (most recent call last):
File "E:/PycharmProjects/untitled2/kk.py", line 61, in <module>
gdd.forward(x)
File "E:/PycharmProjects/untitled2/kk.py", line 46, in forward
FN,C,FH,FW=self.W.shape
ValueError: not enough values to unpack (expected 4, got 2)
import numpy as np
class Convolution:
# 卷积核大小
def __init__(self,W,b,stride=1,pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self,x):
FN,C,FH,FW=self.W.shape
N,C,H,W = x.shape
out_h = int(1+(H+ 2*self.pad - FH) / self.stride)
out_w = int(1+(W + 2*self.pad -FW) / self.stride)
e = np.array([[2,0,1],[0,1,2],[1,0,2]])
x = np.array([[1,2,3,0],[0,1,2,3],[3,0,1,2],[2,3,0,1]])
gdd = Convolution(e,3,1,0)
gdd.forward(x)

not enough value to unpack means that there are 2 outputs, but you are expecting 4:
FN,C,FH,FW=self.W.shape
just get rid of 2 of them and you are good to go :)
BTW I'm assuming you speak Chinese? 我说中文, 不懂可以用中文问一下

Related

too many values to unpack (expected 2) lda

I received error : too many values to unpack (expected 2) , when running the below code. anyone can help me? I added more details.
import gensim
import gensim.corpora as corpora
dictionary = corpora.Dictionary(doc_clean)
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
Lda = gensim.models.ldamodel.LdaModel
ldamodel = Lda(doc_term_matrix, num_topics=3, id2word = dictionary, passes=50, per_word_topics = True, eval_every = 1)
print(ldamodel.print_topics(num_topics=3, num_words=20))
for i in range (0,46):
for index, score in sorted(ldamodel[doc_term_matrix[i]], key=lambda tup: -1*tup[1]):
print("subject", i)
print("\n")
print("Score: {}\t \nTopic: {}".format(score, ldamodel.print_topic(index, 6)))
Focusing on the loop, since this is where the error is being raised. Let's take it one iteration at a time.
>>> import numpy as np # just so we can use np.shape()
>>> i = 0 # value in first loop
>>> x = sorted( ldamodel[doc_term_matrix[i]], key=lambda tup: -1*tup[1] )
>>> np.shape(x)
(3, 3, 2)
>>> for index, score in x:
... pass
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: too many values to unpack (expected 2)
Here is where your error is coming from. You are expecting this returned matrix to have 2 elements, however it is a multislice matrix with no simple infer-able way to unpack it. I do not personally have enough experience with this subject material to be able to infer what you might mean to be doing, I can only show you where your problem is coming from. Hope this helps!

scipy.fft.ifftn of complex pyopecl.array

I'm trying to add two 3D complex arrays on gpu using pyopencl and then perform inverse fast Fourier transform of it result. But I have a mistake, which I do not really understand. Any advices on increasing code performance would be great.
import pyopencl as cl
import numpy as np
import os
from scipy.fftpack import fftn, ifftn
import pyopencl.array as cl_array
from pyopencl.elementwise import ElementwiseKernel
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
Lx = 50
Ly = 50
Lz = 1
M1f = np.ones((2 * Lx - 1, 2 * Ly - 1, 2 * Lz - 1)).astype(np.float32)
M2f = np.ones((2 * Lx - 1, 2 * Ly - 1, 2 * Lz - 1)).astype(np.float32)
FM1 = fftn(M1f)
FM2 = fftn(M2f)
res_cpu = FM1 + FM2
print(ifftn(res_cpu))
FM1_gpu = cl_array.to_device(queue,np.reshape(FM1, (2*Lx-1)*(2*Ly- 1)*(2*Lz-1)).astype(np.complex64))
FM2_gpu = cl_array.to_device(queue,np.reshape(FM2, (2*Lx-1)*(2*Ly-1)*(2*Lz-1)).astype(np.complex64))
complex_add = ElementwiseKernel(ctx,
"float *x, "
"float *y, "
"float *z",
"z[i] = x[i] + y[i]",
"complex_add")
add_gpu = cl_array.empty_like(FM1_gpu)
complex_add(FM1_gpu, FM2_gpu, add_gpu)
res_gpu = np.zeros((2*Lx-1, 2*Ly-1, 2*Lz-1)).astype(np.complex64)
res_gpu = np.reshape(add_gpu, (2*Lx-1, 2*Ly-1, 2*Lz-1))
print(ifftn(res_gpu))
I expect to have a true value of ifft of 2 complex arrays, which were add in gpu, but instead of it i get the result:
Traceback (most recent call last): File
"/home/heisenberg/Desktop/НИР/FM/math/GPU/loopsum.py", line 43, in
print(ifftn(res_gpu)) File "/home/heisenberg/.local/lib/python3.7/site-packages/scipy/fftpack/basic.py",
line 670, in ifftn
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1) File "/home/heisenberg/.local/lib/python3.7/site-packages/scipy/fftpack/basic.py",
line 628, in _raw_fftn_dispatch
tmp = _asfarray(x) File "/home/heisenberg/.local/lib/python3.7/site-packages/scipy/fftpack/basic.py",
line 136, in _asfarray
return numpy.asarray(x, dtype=x.dtype) File "/home/heisenberg/.local/lib/python3.7/site-packages/numpy/core/numeric.py",
line 538, in asarray
return array(a, dtype, copy=False, order=order) TypeError: must be real number, not Array
Process finished with exit code 1

unknown resampling filter error when trying to create my own dataset with pytorch

I am trying to create a CNN implemented with data augmentation in pytorch to classify dogs and cats. The issue that I am having is that when I try to input my dataset and enumerate through it I keep getting this error:
Traceback (most recent call last):
File "<ipython-input-55-6337e0536bae>", line 75, in <module>
for i, (inputs, labels) in enumerate(trainloader):
File "/usr/local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 188, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/usr/local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 188, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/usr/local/lib/python3.6/site-packages/torchvision/datasets/folder.py", line 124, in __getitem__
img = self.transform(img)
File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 42, in __call__
img = t(img)
File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 147, in __call__
return F.resize(img, self.size, self.interpolation)
File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 197, in resize
return img.resize((ow, oh), interpolation)
File "/usr/local/lib/python3.6/site-packages/PIL/Image.py", line 1724, in resize
raise ValueError("unknown resampling filter")
ValueError: unknown resampling filter
and I really dont know whats wrong with my code. I have provided the code below:
# Creating the CNN
# Importing the libraries
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torchvision
from torchvision import transforms
#Creating the CNN Model
class CNN(nn.Module):
def __init__(self, nb_outputs):
super(CNN, self).__init__() #activates the inheritance and allows the use of all the tools in the nn.Module
#making the 3 convolutional layers that will be used in the convolutional neural network
self.convolution1 = nn.Conv2d(in_channels = 1, out_channels = 32, kernel_size = 5) #kernal_size -> the deminson of the feature detector e.g kernel_size = 5 => feature detector of size 5x5
self.convolution2 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 2)
#making 2 full connections one to connect the inputs of the ANN to the hidden layer and another to connect the hidden layer to the outputs of the ANN
self.fc1 = nn.Linear(in_features = self.count_neurons((1, 64,64)), out_features = 40)
self.fc2 = nn.Linear(in_features = 40, out_features = nb_outputs)
def count_neurons(self, image_dim):
x = Variable(torch.rand(1, *image_dim)) #this variable repersents a fake image to allow us to compute the number of neruons
#in order to pass the elements of the tuple image_dim into our function as a list of arguments we need to add a * before image_dim
#since x will be going into our neural network we need to convert it into a torch variable using the Variable() function
x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) #first we apply the convolution to x then apply max_pooling to the convolutional fake images and then activate all the neurons in the pooling layer
x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2)) #the signals are now propragated up to the thrid convoulational layer
#Now to flatten x to obtain the number of neurons in the flattening layer
return x.data.view(1, -1).size(1) #this will flatten x into a huge vector and returns the size of the vector, that size repersents the number of neurons that will be inputted into the ANN
#even though x is not a real image from the game since the size of the flattened vector only depends on the dimention of the inputted image we can just set x to have the same dimentions as the image
def forward(self, x):
x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) #first we apply the convolution to x then apply max_pooling to the convolutional fake images and then activate all the neurons in the pooling layer
x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2))
#flattening layer of the CNN
x = x.view(x.size(0), -1)
#x is now the inputs to the ANN
x = F.relu(self.fc1(x)) #we propagte the signals from the flatten layer to the full connected layer and activate the neruons by breaking the linearilty with the relu function
x = F.sigmoid(self.fc2(x))
#x is now the output neurons of the ANN
return x
train_tf = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.Resize(64,64),
transforms.RandomRotation(20),
transforms.RandomGrayscale(.2),
transforms.ToTensor()])
test_tf = transforms.Compose([transforms.Resize(64,64),
transforms.ToTensor()])
training_set = torchvision.datasets.ImageFolder(root = './dataset/training_set',
transform = train_tf)
test_set = torchvision.datasets.ImageFolder(root = './dataset/test_set',
transform = transforms.Compose([transforms.Resize(64,64),
transforms.ToTensor()]) )
trainloader = torch.utils.data.DataLoader(training_set, batch_size=32,
shuffle=True, num_workers=0)
testloader = torch.utils.data.DataLoader(test_set, batch_size= 32,
shuffle=False, num_workers=0)
#training the model
cnn = CNN(1)
cnn.train()
loss = nn.BCELoss()
optimizer = optim.Adam(cnn.parameters(), lr = 0.001) #the optimizer => Adam optimizer
nb_epochs = 25
for epoch in range(nb_epochs):
train_loss = 0.0
train_acc = 0.0
total = 0.0
for i, (inputs, labels) in enumerate(trainloader):
inputs, labels = Variable(inputs), Variable(labels)
cnn.zero_grad()
outputs = cnn(inputs)
loss_error = loss(outputs, labels)
optimizer.step()
_, pred = torch.max(outputs.data, 1)
total += labels.size(0)
train_loss += loss_error.data[0]
train_acc += (pred == labels).sum()
train_loss = train_loss/len(training_loader)
train_acc = train_acc/total
print('Epoch: %d, loss: %.4f, accuracy: %.4f' %(epoch+1, train_loss, train_acc))
The folder arrangement for the code is /dataset/training_set and inside the training_set folder are two more folders one for all the cat images and the other for all the dog images. Each image is name either dog.xxxx.jpg or cat.xxxx.jpg, where the xxxx represents the number so for the first cat image it would be cat.1.jpg up to cat.4000.jpg. This is the same format for the test_set folder. The number of training images is 8000 and the number of test images is 2000. If anyone can point out my error I would greatly appreciate it.
Thank you
Try to set the desired size in transforms.Resize as a tuple:
transforms.Resize((64, 64))
PIL is using the second argument (in your case 64) as the interpolation method.
in torchvision.transforms.Compose([put every transform in these brackets]),
This, will not give the error.

Keras fit_generator throwing ValueError

So I'm trying to create a generator to iterate through a data set for use in training with Keras's fit_generator. Here's the definition of the generator, the model, and the call to fit_generator:
import numpy as np
from queue import Queue, deque
from keras.models import Sequential
from keras.layers import Dense
num_features = 40
len_data = 100
data = np.random.rand(len_data, num_features)
def train_generator(train_idxs):
while True:
i = train_idxs.get(block=False)
training_example = data[i,:]
training_example.shape = (1, len(training_example))
yield (training_example, training_example)
layer0_size = num_features
layer1_size = layer0_size / 2
layer2_size = layer1_size / 2
layers = []
layers.append(
Dense(input_dim=layer0_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer2_size, activation='relu'))
layers.append(
Dense(input_dim=layer2_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer0_size, activation='sigmoid'))
model = Sequential()
for layer in layers:
model.add(layer)
model.compile(optimizer='adam', loss='binary_crossentropy')
train_idxs = Queue()
train_idxs.queue = deque(range(len_data))
train_gen = train_generator(train_idxs)
max_q_size = 2
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
Keras will then successfully train 98/100 training examples and throw this error
98/100 [============================>.] - ETA: 0s - loss: 0.6930Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 429, in data_generator_task
generator_output = next(self._generator)
File "scrap.py", line 12, in train_generator
i = train_idxs.get(block=False)
File "/usr/lib/python3.5/queue.py", line 161, in get
raise Empty
queue.Empty
Traceback (most recent call last):
File "scrap.py", line 43, in <module>
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 935, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1528, in fit_generator
str(generator_output))
ValueError: output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None
It seems like what's happening is that it popped of all of the training_idxs and it's still trying to get more until Keras exhaust the training examples in its internal queue. Is there a way to get it to stop trying to get more training examples from the generator?

can you modify this theano code for fft-convolution available?

I'm searching for the way to use fft-convolution in theano.
I wrote simple convolution code with theano.
But this code doesn't work if i set "fft_conv = 1" though simple convolution works with "fft_conv = 0"
Please tell me what is wrong with this code?
import numpy as np
import theano.sandbox.cuda.fftconv
from theano.tensor.nnet import conv
import theano.tensor as T
xdata_test = np.random.uniform(low=-1, high=1, size=(100,76,76),)
xdata_test = np.asarray(xdata_test,dtype='float32')
CONVfilter = np.random.uniform(low=-1,high=1,size=(10,1,6,6))
CONVfilter = np.asarray(CONVfilter,dtype='float32')
x = T.tensor3('x') # the data is presented as rasterized images
layer0_input = x.reshape((100, 1, 76, 76))
fft_flag = 1
if fft_flag == 1 :
##### FFT-CONVOLUTION VERSION
conv_out = theano.sandbox.cuda.fftconv.conv2d_fft(
input=layer0_input,
filters=CONVfilter,
filter_shape=(10, 1, 6, 6),
image_shape=(100,1,76,76),
border_mode='valid',
pad_last_dim=False
)
elif fft_flag == 0 :
###### CONVENTIONAL CONVOLUTION VERSION
conv_out = conv.conv2d(
input=layer0_input,
filters=CONVfilter,
filter_shape=(10, 1, 6, 6),
image_shape=(100,1,76,76),
)
test_conv = theano.function([x],conv_out)
result = test_conv(xdata_test)
The error message is like below:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Anaconda\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 580, in runfile
execfile(filename, namespace)
File "C:/Users/user/Documents/Python Scripts/ffttest.py", line 38, in <module>
result = test_conv(xdata_test)
File "C:\Anaconda\lib\site-packages\theano\compile\function_module.py", line 606, in __call__
storage_map=self.fn.storage_map)
File "C:\Anaconda\lib\site-packages\theano\gof\link.py", line 205, in raise_with_op
'\n' + '\n'.join(hints))
TypeError: __init__() takes at least 3 arguments (2 given)