Create lmdb for multi label classification - deep-learning

I am trying to create 2 lmdbs. One for my images one for my labels. I want to determine the angle of a picture, to do so I am trying to estimate the horizontal and the vertical angle. I have classes like: 0-10 degree horizontal 10-20 degree horiozntal and so on. And the same for vertical angles. Now I do not know how to create the label db as in how the labels have to be formatted in the lmdb.
I have a .txt list file with: /path/pic.png 1 32 entries where 1 means 10-20 degree and 32 means 320-330 degrees.
My code looks like this:
for line in fileinput.input(data):
entries = re.split(' ', line.strip())
images.append(entries[0])
labels.append(entries[1:])
....
for in_idx, (image, label) in enumerate(inputs):
im = cv2.imread(image)
im = im[:,:,::-1]
im = im.transpose((2,0,1))
im_dat = caffe.io.array_to_datum(im)
print im_dat
images_txn.put('{:0>10d}'.format(in_idx), im_dat.SerializeToString())
label = np.array(label).astype(int).reshape(1, 1, len(label))
label_dat = caffe.io.array_to_datum(label)
labels_txn.put('{:0>10d}'.format(in_idx), label_dat.SerializeToString())
This however seems not to be correct since I got following error when trying to train the network:
Check failed: outer_num_ * inner_num_ == bottom[1]->count() (10 vs. 20) Number of labels must match number of predictions; e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), label count (number of labels) must be NHW, with integer values in {0, 1, ..., C-1}.
My data layer looks like this:
name: "LearnAngle"
layer {
name: "data"
type: "Data"
top: "images"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 256
mean_file: "/path/mean_train.binaryproto"
}
data_param {
source: "path/train2_images_lmdb"
batch_size: 10
backend: LMDB
}
}
layer {
name: "data_label"
type: "Data"
top: "labels"
include {
phase: TRAIN
}
data_param {
source: "path/train2_labels_lmdb"
batch_size: 10
backend: LMDB
}
}
My last layers looks like
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 36
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "labels"
top: "loss"
}

The problem is: your top: "labels" in the 2nd data layer contains 2 kind of labels for the horizontal and vertical angle, while you just used 1 SoftmaxWithLoss layer for the labels.
In fact, to train 2 classification tasks in one network, you can create 2 lmdb databases for the 2 tasks' labels respectivly, and use 2 data layers to parse them to 2 SoftmaxWithLoss layers. Like what's below:
Code for creating lmdb for 2-task classifications:
for line in fileinput.input(data):
entries = re.split(' ', line.strip())
images.append(entries[0])
horizontal_labels.append(entries[1])
vertical_labels.append(entries[2])
...
for in_idx, (image, label) in enumerate(inputs):
im = cv2.imread(image)
im = im[:,:,::-1]
im = im.transpose((2,0,1))
im_dat = caffe.io.array_to_datum(im)
print im_dat
images_txn.put('{:0>10d}'.format(in_idx), im_dat.SerializeToString())
horizontal_label = [label[0]]
vertical_label = [label[1]]
horizontal_label = np.array(horizontal_label).astype(int).reshape(1, 1, 1)
vertical_label = np.array(vertical_label).astype(int).reshape(1, 1, 1)
horizontal_label_dat = caffe.io.array_to_datum(horizontal_label)
vertical_label_dat = caffe.io.array_to_datum(vertical_label)
horizontal_labels_txn.put('{:0>10d}'.format(in_idx), horizontal_label_dat.SerializeToString())
vertical_labels_txn.put('{:0>10d}'.format(in_idx), vertical_label_dat.SerializeToString())
train_val.prototxt:
name: "LearnAngle"
layer {
name: "data"
type: "Data"
top: "images"
include {
phase: TRAIN
}
...
}
layer {
name: "horizontal_label"
type: "Data"
top: "horizontal_label"
include {
phase: TRAIN
}
data_param {
source: "path/horizontal_labels_lmdb" #created using above python code
...
}
}
layer {
name: "vertical_label"
type: "Data"
top: "vertical_label"
include {
phase: TRAIN
}
data_param {
source: "path/vertical_labels_lmdb" #created using above python code
...
}
}
... #follow layers
# branch for horizontal label classification
layer {
name: "fc_horizon"
type: "InnerProduct"
bottom: "fc7"
top: "fc_horizon"
...
inner_product_param {
num_output: 36
...
}
}
layer {
name: "loss_horizon"
type: "SoftmaxWithLoss"
bottom: "fc_horizon"
bottom: "horizontal_label"
top: "loss_horizon"
}
# branch for vertical label classification
layer {
name: "fc_vertical"
type: "InnerProduct"
bottom: "fc7"
top: "fc_vertical"
...
inner_product_param {
num_output: 36
...
}
}
layer {
name: "loss_vertical"
type: "SoftmaxWithLoss"
bottom: "fc_vertical"
bottom: "vertical_label"
top: "loss_vertical"
}

Related

How can I get the slider to work and filter on rent property of features in vega-lite?

This is how my current map looks like.
const selectslider = vl.param("maxrentalcost")
.value(400)
.bind(vl.slider(400, 1700, 50).name("Max Rental Cost"));
const buildings = vl
.markGeoshape()
.data({
values: somedata6,
format: {
type: "json",
property: "features"
}
})
.project(vl.projection("identity").reflectY(true))
.encode(
vl.fill({
value: "#ffffff"
}),
vl.stroke({
value: "black"
})
);
const apartments = vl
.markGeoshape()
.data({
values: somedata6,
format: {
type: "json",
property: "features"
}
})
.params(selectslider)
.transform(
vl.filter('maxrentalcost > datum.properties.rentalcost')
)
.project(vl.projection("identity").reflectY(true))
.encode(
vl.stroke({
value: "black"
}),
vl.fill({
field: "properties.rent",
type: "quantitative",
title: "Average Building Rent",
scale: { scheme: "reds", domainMax: 1200 }
})
);
return vl.layer(buildings, apartments).width(850).height(850).render();
I can't get the slider to work to show only the polygons(apartments) with rent property less than the slider value.
The geoshape has two layers. The bottom layer draws all the buildings. The top layer only draws buildings with nonnull rent property.
I would appreciate any help.
Sample data:

How to modify batch normalization layers (DeconvNet) to be able to run with caffe?

I wanted to run the Deconvnet on my data, however it seemd it has been written for another version of caffe. Does anyone know how to change batch_params?
The one that is in Deconvnet
layers { bottom: 'conv1_1' top: 'conv1_1' name: 'bn1_1' type: BN
bn_param { scale_filler { type: 'constant' value: 1 }
shift_filler { type: 'constant' value: 0.001 }
bn_mode: INFERENCE } }
And the one that Caffe provides for cifar10 example:
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
Once I wanted to run it show me the following error first:
I1029 13:46:47.156885 11601 solver.cpp:87] Creating training net from net file: train_val.prototxt
[libprotobuf ERROR google/protobuf/text_format.cc:299] Error parsing text-format caffe.NetParameter: 59:3: Unknown enumeration value of "BN" for field "type".
F1029 13:46:47.157971 11601 upgrade_proto.cpp:88] Check failed: ReadProtoFromTextFile(param_file, param)
and after changing BN into BatchNorm, it shows new error about parameters:
I1029 14:03:38.497725 12097 solver.cpp:87] Creating training net from net file: train_val.prototxt
[libprotobuf ERROR google/protobuf/text_format.cc:299] Error parsing text-format caffe.NetParameter: 59:3: Unknown enumeration value of "BatchNorm" for field "type".
F1029 14:03:38.503345 12097 upgrade_proto.cpp:88] Check failed: ReadProtoFromTextFile(param_file, param)
Has anyone tried to train Deconvnet? if yes could you please guide me?
Thanks
Could you please let me know if this is correct to change like this?
layer {
name: "bn1_1"
type: "BatchNorm"
bottom: "conv1_1"
top: "conv1_1"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
batch_norm_param {
use_global_stats: true
}
}
layer {
name: "scale_conv1_1"
type: "Scale"
bottom: "conv1_1"
top: "conv1_1"
scale_param {
bias_term: true
bias_filler {
type: "constant"
value: 0.001
}
}
}

Why loss is remaining constant during training FCN-8s?

I am trying to run FCN-8s. I did the following steps:
1. downloaded this repository
2. converting my data to LMDB and changing the paths in train_val.prototxt
3. downloading the fcn8s-heavy-pascal caffemodel
4. changing the number_of_output in train_val.prototxt and deploy.prototxt from 60 to 5 (the number of classes in my data) in the last following layers:
layer {
name: "score59"
type: "Convolution"
bottom: "fc7"
top: "score59"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 5 #60
kernel_size: 1
engine: CAFFE
}
}
layer {
name: "upscore2"
type: "Deconvolution"
bottom: "score59"
top: "upscore2"
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 5 #60
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score-pool4"
type: "Convolution"
bottom: "pool4"
top: "score-pool4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 5 #60
kernel_size: 1
engine: CAFFE
}
}
layer { type: 'Crop' name: 'crop' bottom: 'score-pool4' bottom: 'upscore2'
top: 'score-pool4c' }
layer {
name: "fuse"
type: "Eltwise"
bottom: "upscore2"
bottom: "score-pool4c"
top: "score-fused"
eltwise_param {
operation: SUM
}
}
layer {
name: "upsample-fused-16"
type: "Deconvolution"
bottom: "score-fused"
top: "score4"
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 5 #60
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score-pool3"
type: "Convolution"
bottom: "pool3"
top: "score-pool3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 5 #60
kernel_size: 1
engine: CAFFE
}
}
layer { type: 'Crop' name: 'crop' bottom: 'score-pool3' bottom: 'score4'
top: 'score-pool3c' }
layer {
name: "fuse"
type: "Eltwise"
bottom: "score4"
bottom: "score-pool3c"
top: "score-final"
eltwise_param {
operation: SUM
}
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "score-final"
top: "bigscore"
param {
lr_mult: 0
}
convolution_param {
num_output: 5 #60
bias_term: false
kernel_size: 16
stride: 8
}
}
layer { type: 'Crop' name: 'crop' bottom: 'bigscore' bottom: 'data' top: 'score' }
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "score"
bottom: "label"
top: "loss"
loss_param {
normalize: false
}
}
I started the training with the weights of pre-trained model for pascal dataset. But the the loss remains constant (loss = 105476) over the time.
0112 18:25:07.198588 5878 sgd_solver.cpp:106] Iteration 150, lr = 1e-14
I0112 18:26:07.614239 5878 solver.cpp:228] Iteration 200, loss = 105476
I0112 18:26:07.614459 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:26:07.614490 5878 sgd_solver.cpp:106] Iteration 200, lr = 1e-14
I0112 18:27:06.198556 5878 solver.cpp:228] Iteration 250, loss = 105476
I0112 18:27:06.198801 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:27:06.198834 5878 sgd_solver.cpp:106] Iteration 250, lr = 1e-14
I0112 18:28:05.056469 5878 solver.cpp:228] Iteration 300, loss = 105476
I0112 18:28:05.056715 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:28:05.056751 5878 sgd_solver.cpp:106] Iteration 300, lr = 1e-14
I0112 18:29:04.537042 5878 solver.cpp:228] Iteration 350, loss = 105476
I0112 18:29:04.537261 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:29:04.537293 5878 sgd_solver.cpp:106] Iteration 350, lr = 1e-14
I0112 18:30:05.320504 5878 solver.cpp:228] Iteration 400, loss = 105476
I0112 18:30:05.320751 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:30:05.320796 5878 sgd_solver.cpp:106] Iteration 400, lr = 1e-14
I0112 18:31:06.690937 5878 solver.cpp:228] Iteration 450, loss = 105476
I0112 18:31:06.691177 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:31:06.691207 5878 sgd_solver.cpp:106] Iteration 450, lr = 1e-14
I0112 18:32:06.593940 5878 solver.cpp:228] Iteration 500, loss = 105476
I0112 18:32:06.596643 5878 solver.cpp:244] Train net output #0: loss = 105476 (* 1 = 105476 loss)
I0112 18:32:06.596701 5878 sgd_solver.cpp:106] Iteration 500, lr = 1e-14
I do not know which part I am doing wrong. I really appreciate your help to solve this issue.
Did you use the function called surgery.transplant() in solve.py to transplant the caffemodel of original net to your current net?
Did you add weight-filler and bias-filler on Deconvolutional layer with initial value in net.py?
After doing these two steps, did you execute the net.py to generate a updated layer?
Check these steps, and see what will happen.

How to define BN layer in pycaffe?

I used this code:
conv_bn = L.BN(conv, bn_param=[dict(type='constant', scale_filler=1), dict(type='constant', shift_filler=0.001)], in_place=True)
Unfortunately, it outputs some error. The error is:
AttributeError: 'BNParameter' object has no attribute 'add'
What is the correct method to define a batch normalizaton layer?
This is the prototxt that I want to define use netspec in pycaffe.
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "conv1_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
Thanks.

How to change x-axis label in Stacked Area Chart NVD3.js? [duplicate]

I have a simple line graph with data in the format:
[
{
label: "lebel1",
x: 0,
y: 128
},
{
label: "lebel1",
x: 1,
y: 128
},
....
{
label: "lebel2",
x: 25,
y: 128
},
....
{
label: "lebel8",
x: 285,
y: 128
},
....
}
and I pass this into my nvd3 object:
nv.addGraph(function()
{
var chart = nv.models.lineChart();
chart.xAxis
.axisLabel("My X-Axis")
.ticks(36)
.tickFormat(function(d) { return d; });
chart.yAxis
.axisLabel('Voltage (v)')
.tickFormat(d3.format('.02f'));
d3.select('div svg')
.datum(myData)
.transition().duration(500)
.call(chart);
nv.utils.windowResize(function() { d3.select(gridSvgId).call(chart) });
return chart;
});
How can I have my x-axis ticks to show:
* eight labels: label1 - label8
Rather than have the grids broken up into a variable number of lines?
Try something like this
chart.xAxis.tickValues(['Label 1','Label 2','Label 3','Label 4','Label 5','Label 6','Label 7','Label 8']);
or if you want to get it from the dataset, you could try something like this,
chart.xAxis.tickValues(function(d) {
// do all you stuff and return an array
var dataset = ['Build Array from data'];
return dataset;
};)
Hope it helps