can you modify this theano code for fft-convolution available? - fft

I'm searching for the way to use fft-convolution in theano.
I wrote simple convolution code with theano.
But this code doesn't work if i set "fft_conv = 1" though simple convolution works with "fft_conv = 0"
Please tell me what is wrong with this code?
import numpy as np
import theano.sandbox.cuda.fftconv
from theano.tensor.nnet import conv
import theano.tensor as T
xdata_test = np.random.uniform(low=-1, high=1, size=(100,76,76),)
xdata_test = np.asarray(xdata_test,dtype='float32')
CONVfilter = np.random.uniform(low=-1,high=1,size=(10,1,6,6))
CONVfilter = np.asarray(CONVfilter,dtype='float32')
x = T.tensor3('x') # the data is presented as rasterized images
layer0_input = x.reshape((100, 1, 76, 76))
fft_flag = 1
if fft_flag == 1 :
##### FFT-CONVOLUTION VERSION
conv_out = theano.sandbox.cuda.fftconv.conv2d_fft(
input=layer0_input,
filters=CONVfilter,
filter_shape=(10, 1, 6, 6),
image_shape=(100,1,76,76),
border_mode='valid',
pad_last_dim=False
)
elif fft_flag == 0 :
###### CONVENTIONAL CONVOLUTION VERSION
conv_out = conv.conv2d(
input=layer0_input,
filters=CONVfilter,
filter_shape=(10, 1, 6, 6),
image_shape=(100,1,76,76),
)
test_conv = theano.function([x],conv_out)
result = test_conv(xdata_test)
The error message is like below:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Anaconda\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 580, in runfile
execfile(filename, namespace)
File "C:/Users/user/Documents/Python Scripts/ffttest.py", line 38, in <module>
result = test_conv(xdata_test)
File "C:\Anaconda\lib\site-packages\theano\compile\function_module.py", line 606, in __call__
storage_map=self.fn.storage_map)
File "C:\Anaconda\lib\site-packages\theano\gof\link.py", line 205, in raise_with_op
'\n' + '\n'.join(hints))
TypeError: __init__() takes at least 3 arguments (2 given)

Related

Pytorch torchvision.transforms execute randomly?

I am doing this transformation:
self.transform = transforms.Compose( {
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
} )
and then
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
this works for the first epoch then how the hell it crashes for the second epoch?
why the f normalize getting PIL-image and not torch.tensor? is the execution of each transforms Compose items random?
Traceback (most recent call last): File
"/home/ubuntu/projects/ssl/src/train_supervised.py", line 63, in
main() File "/home/ubuntu/projects/ssl/src/train_supervised.py", line 60, in main
train() File "/home/ubuntu/projects/ssl/src/train_supervised.py", line 45, in train
for i, data in enumerate(tqdm_): File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/tqdm/std.py",
line 1195, in iter
for obj in iterable: File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/dataloader.py",
line 530, in next
data = self._next_data() File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/dataloader.py",
line 1224, in _next_data
return self._process_data(data) File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/dataloader.py",
line 1250, in _process_data
data.reraise() File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/_utils.py",
line 457, in reraise
raise exception TypeError: Caught TypeError in DataLoader worker process 0. Original Traceback (most recent call last): File
"/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index) File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index] File
"/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 49, in
data = [self.dataset[idx] for idx in possibly_batched_index] File "/home/ubuntu/projects/ssl/src/data_loader.py", line 44, in
getitem
image = self.transform(image) File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torchvision/transforms/transforms.py", line 95, in call
img = t(img) File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torch/nn/modules/module.py",
line 1110, in _call_impl
return forward_call(*input, **kwargs) File "/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torchvision/transforms/transforms.py", line 270, in forward
return F.normalize(tensor, self.mean, self.std, self.inplace) File
"/home/ubuntu/anaconda3/envs/pytorch-1.11.0/lib/python3.9/site-packages/torchvision/transforms/functional.py", line 341, in normalize
raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") TypeError: Input tensor should be a torch tensor.
Got <class 'PIL.Image.Image'>.
Python set iteration order is not deterministic. Kindly use list instead ([] rather than {}).

FileNotFoundError: [Errno 2] File b'Downloads/BetterLifeIndex2015.csv' does not exist: b'Downloads/BetterLifeIndex2015.csv'

Resolved
Answer: Changed the path, it was in fact inncorect path after all. Used absolute path (alt+d+copy from file explorer". Also used "r" before the path so the path is treated like a raw string.
# load the data
BetterLifeIndex = pd.read_csv(r"C:\Users\brede\OneDrive\Dokumenter\Downloads\BetterLifeIndex2015.csv", thousands = ',')
gdp_per_capita = pd.read_csv(r"C:\Users\brede\OneDrive\Dokumenter\Downloads\gdpcapita.csv", thousands= ',', delimiter ='\t',
encoding = 'latin1' , na_values="n/a")
Im new to Python and I'm running a Example from a machine learning book. I cant get python to read my csv file.
Code:
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
# load the data
oecd_bli = pd.read_csv("Downloads/BetterLifeIndex2015.csv", thousands = ',')
gdp_per_capita = pd.read_csv("C:/Users/brede/Downloads/gdpcapita.csv", thousands= ',', delimiter ='\t',
encoding = 'latin1' , na_values="n/a")
#prepare the data
country_stats = prepare_country_stats (oecd_bli, gdp_per_capita)
x = np.c_[country_stats["gdp per capita"]]
y = np.c_[country_stats["life satisfaction"]]
#visualize the data
country_stats.plot(kind= 'scatter' , x = "GDP per capita", y ='Life satisfaction')
#select a linear model
model = sklearn.linear_model.LinearRegression()
#train the model
model.fit (x, y)
#make a prediction for Cyprus
X_new = [[22587]] #Cyprus GDP per capita
print(model.predict(X_new)) #outputs[[5.96242338]]
The output is:
runfile('C:/Users/brede/Downloads/practice_gdp.py', wdir='C:/Users/brede/Downloads')
Traceback (most recent call last):
File "<ipython-input-59-2f130edd277c>", line 1, in <module>
runfile('C:/Users/brede/Downloads/practice_gdp.py', wdir='C:/Users/brede/Downloads')
File "C:\Users\brede\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\brede\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/brede/Downloads/practice_gdp.py", line 31, in <module>
oecd_bli = pd.read_csv("Downloads/BetterLifeIndex2015.csv", thousands = ',')
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 457, in _read
parser = TextFileReader(fp_or_buf, **kwds)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 895, in __init__
self._make_engine(self.engine)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 1135, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 1917, in __init__
self._reader = parsers.TextReader(src, **kwds)
File "pandas\_libs\parsers.pyx", line 382, in pandas._libs.parsers.TextReader.__cinit__
File "pandas\_libs\parsers.pyx", line 689, in pandas._libs.parsers.TextReader._setup_parser_source
FileNotFoundError: [Errno 2] File b'Downloads/BetterLifeIndex2015.csv' does not exist: b'Downloads/BetterLifeIndex2015.csv'
I have triplechecked the path to the file, and I can't seem to figure this out! All help is appreciated.
This is done in Spyder, also tried in Jupyter with same result. I've even copied the path etc.
help...
I think you have to include'/' in the file path.Try that 'C:/Users/brede/OneDrive....'

when call the class convolution, it say error

gdd.forward(x) call error, but why?
This code uses imcol to implement the convolution layer
Traceback (most recent call last):
File "E:/PycharmProjects/untitled2/kk.py", line 61, in <module>
gdd.forward(x)
File "E:/PycharmProjects/untitled2/kk.py", line 46, in forward
FN,C,FH,FW=self.W.shape
ValueError: not enough values to unpack (expected 4, got 2)
import numpy as np
class Convolution:
# 卷积核大小
def __init__(self,W,b,stride=1,pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self,x):
FN,C,FH,FW=self.W.shape
N,C,H,W = x.shape
out_h = int(1+(H+ 2*self.pad - FH) / self.stride)
out_w = int(1+(W + 2*self.pad -FW) / self.stride)
e = np.array([[2,0,1],[0,1,2],[1,0,2]])
x = np.array([[1,2,3,0],[0,1,2,3],[3,0,1,2],[2,3,0,1]])
gdd = Convolution(e,3,1,0)
gdd.forward(x)
not enough value to unpack means that there are 2 outputs, but you are expecting 4:
FN,C,FH,FW=self.W.shape
just get rid of 2 of them and you are good to go :)
BTW I'm assuming you speak Chinese? 我说中文, 不懂可以用中文问一下

Keras fit_generator throwing ValueError

So I'm trying to create a generator to iterate through a data set for use in training with Keras's fit_generator. Here's the definition of the generator, the model, and the call to fit_generator:
import numpy as np
from queue import Queue, deque
from keras.models import Sequential
from keras.layers import Dense
num_features = 40
len_data = 100
data = np.random.rand(len_data, num_features)
def train_generator(train_idxs):
while True:
i = train_idxs.get(block=False)
training_example = data[i,:]
training_example.shape = (1, len(training_example))
yield (training_example, training_example)
layer0_size = num_features
layer1_size = layer0_size / 2
layer2_size = layer1_size / 2
layers = []
layers.append(
Dense(input_dim=layer0_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer2_size, activation='relu'))
layers.append(
Dense(input_dim=layer2_size, output_dim=layer1_size, activation='relu'))
layers.append(
Dense(input_dim=layer1_size, output_dim=layer0_size, activation='sigmoid'))
model = Sequential()
for layer in layers:
model.add(layer)
model.compile(optimizer='adam', loss='binary_crossentropy')
train_idxs = Queue()
train_idxs.queue = deque(range(len_data))
train_gen = train_generator(train_idxs)
max_q_size = 2
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
Keras will then successfully train 98/100 training examples and throw this error
98/100 [============================>.] - ETA: 0s - loss: 0.6930Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 429, in data_generator_task
generator_output = next(self._generator)
File "scrap.py", line 12, in train_generator
i = train_idxs.get(block=False)
File "/usr/lib/python3.5/queue.py", line 161, in get
raise Empty
queue.Empty
Traceback (most recent call last):
File "scrap.py", line 43, in <module>
model.fit_generator(train_gen, samples_per_epoch=len(data), max_q_size=max_q_size, nb_epoch=1)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 935, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1528, in fit_generator
str(generator_output))
ValueError: output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None
It seems like what's happening is that it popped of all of the training_idxs and it's still trying to get more until Keras exhaust the training examples in its internal queue. Is there a way to get it to stop trying to get more training examples from the generator?

Sqlalchemy class _MatchType(sqltypes.Float, sqltypes.MatchType): AttributeError

sometimes execute query and got this error:
ct = db.session.query(CIType).filter(
CIType.type_name == key).first() or \
db.session.query(CIType).filter(CIType.type_id == key).first()
full error info
2016-08-11 14:27:26,177 ERROR /usr/lib/python2.6/site-packages/flask/app.py 1306 - Exception on /api/v0.1/projects/search-indexer-rafael/product [GET]
Traceback (most recent call last):
File "/usr/lib/python2.6/site-packages/flask/app.py", line 1687, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.6/site-packages/flask/app.py", line 1360, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.6/site-packages/flask/app.py", line 1358, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.6/site-packages/flask/app.py", line 1344, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/data/webapps/cmdb-api/core/special.py", line 175, in get_project_product
product = ProjectManager().get_for_product(project_name)
File "/data/webapps/cmdb-api/lib/special/project.py", line 18, in __init__
self.ci_type = CITypeCache.get("project")
File "/data/webapps/cmdb-api/models/cmdb.py", line 458, in get
ct = db.session.query(CIType).filter(
File "/usr/lib64/python2.6/site-packages/sqlalchemy/orm/scoping.py", line 149, in do
def do(self, *args, **kwargs):
File "/usr/lib64/python2.6/site-packages/sqlalchemy/util/_collections.py", line 903, in __call__
item = dict.get(self, key)
File "/usr/lib/python2.6/site-packages/flask_sqlalchemy.py", line 201, in __init__
bind=db.engine,
File "/usr/lib/python2.6/site-packages/flask_sqlalchemy.py", line 754, in engine
return self.get_engine(self.get_app())
File "/usr/lib/python2.6/site-packages/flask_sqlalchemy.py", line 771, in get_engine
return connector.get_engine()
File "/usr/lib/python2.6/site-packages/flask_sqlalchemy.py", line 451, in get_engine
self._engine = rv = sqlalchemy.create_engine(info, **options)
File "/usr/lib64/python2.6/site-packages/sqlalchemy/engine/__init__.py", line 344, in create_engine
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
File "/usr/lib64/python2.6/site-packages/sqlalchemy/engine/strategies.py", line 50, in create
File "/usr/lib64/python2.6/site-packages/sqlalchemy/engine/url.py", line 116, in get_dialect
return self.get_dialect().driver
File "/usr/lib64/python2.6/site-packages/sqlalchemy/util/langhelpers.py", line 170, in load
fn.__func__.__doc__ = doc
File "/usr/lib64/python2.6/site-packages/sqlalchemy/dialects/__init__.py", line 33, in _auto_fn
try:
File "/usr/lib64/python2.6/site-packages/sqlalchemy/dialects/mysql/__init__.py", line 8, in <module>
from . import base, mysqldb, oursql, \
File "/usr/lib64/python2.6/site-packages/sqlalchemy/dialects/mysql/base.py", line 681, in <module>
class _MatchType(sqltypes.Float, sqltypes.MatchType):
AttributeError: 'module' object has no attribute 'MatchType'
code
#special.route("/api/v0.1/projects/<string:project_name>/product",
methods=["GET"])
def get_project_product(project_name):
product = ProjectManager().get_for_product(project_name)
return jsonify(product=product)
...
goto
class ProjectManager(object):
def __init__(self):
self.ci_type = CITypeCache.get("project")
...
then
class CITypeCache(object):
#classmethod
def get(cls, key):
if key is None:
return
ct = cache.get("CIType::ID::%s" % key) or \
cache.get("CIType::Name::%s" % key)
if ct is None:
ct = db.session.query(CIType).filter(
CIType.type_name == key).first() or \
db.session.query(CIType).filter(CIType.type_id == key).first()
if ct is not None:
CITypeCache.set(ct)
return ct
The sqlalchemy's version is SQLAlchemy-1.0.8-py2.6.egg-info
and after many same error, I can't catch this error any more. What's the reason of this error?
I assume CIType.type_name and CIType.type_id have different data types (perhaps string and numeric types). It may lead to situation when:
db.session.query(CIType).filter(CIType.type_name == key).first()
is valid expression but:
db.session.query(CIType).filter(CIType.type_id == key).first()
produces error because of type mismatch. You need to convert key to the type_id column type in this expression.
The second expression is calculated when first expression returns no results. As written in Python documentation:
The expression x or y first evaluates x; if x is true, its value is returned; otherwise, y is evaluated and the resulting value is returned.
For example:
>>> a = 1 or 2 + '2'
>>> print a
1
>>> a = 0 or 2 + '2'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'int' and 'str'