converting VGG functional model into sequential model in Keras - deep-learning

I am actually trying to get a Sequential model version of VGG16 with Keras. The functional version can be obtained with:
from __future__ import division, print_function
import os, json
from glob import glob
import numpy as np
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing import image
import keras
import keras.applications.vgg16
from keras.layers import Input
input_tensor = Input(shape=(224,224,3))
VGG_model=keras.applications.vgg16.VGG16(weights='imagenet',include_top= True,input_tensor=input_tensor)
Its summary goes like this :
VGG_model.summary()
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
____________________________________________________________________________________________________
block1_conv1 (Convolution2D) (None, 224, 224, 64) 1792 input_1[0][0]
____________________________________________________________________________________________________
block1_conv2 (Convolution2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
____________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
____________________________________________________________________________________________________
block2_conv1 (Convolution2D) (None, 112, 112, 128) 73856 block1_pool[0][0]
____________________________________________________________________________________________________
block2_conv2 (Convolution2D) (None, 112, 112, 128) 147584 block2_conv1[0][0]
____________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
____________________________________________________________________________________________________
block3_conv1 (Convolution2D) (None, 56, 56, 256) 295168 block2_pool[0][0]
____________________________________________________________________________________________________
block3_conv2 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
____________________________________________________________________________________________________
block3_conv3 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv2[0][0]
____________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv3[0][0]
____________________________________________________________________________________________________
block4_conv1 (Convolution2D) (None, 28, 28, 512) 1180160 block3_pool[0][0]
____________________________________________________________________________________________________
block4_conv2 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
____________________________________________________________________________________________________
block4_conv3 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv2[0][0]
____________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv3[0][0]
____________________________________________________________________________________________________
block5_conv1 (Convolution2D) (None, 14, 14, 512) 2359808 block4_pool[0][0]
____________________________________________________________________________________________________
block5_conv2 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
____________________________________________________________________________________________________
block5_conv3 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv2[0][0]
____________________________________________________________________________________________________
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv3[0][0]
____________________________________________________________________________________________________
flatten (Flatten) (None, 25088) 0 block5_pool[0][0]
____________________________________________________________________________________________________
fc1 (Dense) (None, 4096) 102764544 flatten[0][0]
____________________________________________________________________________________________________
fc2 (Dense) (None, 4096) 16781312 fc1[0][0]
____________________________________________________________________________________________________
predictions (Dense) (None, 1000) 4097000 fc2[0][0]
====================================================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
____________________________________________________________________________________________________
According to this website https://github.com/fchollet/keras/issues/3190 , it says
Sequential(layers=functional_model.layers)
Could covert functional models into sequential model. However, if I do:
model = Sequential(layers=VGG_model.layers)
model.summary()
It leads to
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
____________________________________________________________________________________________________
block1_conv1 (Convolution2D) (None, 224, 224, 64) 1792 input_1[0][0]
input_1[0][0]
input_1[0][0]
____________________________________________________________________________________________________
block1_conv2 (Convolution2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
block1_conv1[1][0]
block1_conv1[2][0]
____________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
block1_conv2[1][0]
block1_conv2[2][0]
____________________________________________________________________________________________________
block2_conv1 (Convolution2D) (None, 112, 112, 128) 73856 block1_pool[0][0]
block1_pool[1][0]
block1_pool[2][0]
____________________________________________________________________________________________________
block2_conv2 (Convolution2D) (None, 112, 112, 128) 147584 block2_conv1[0][0]
block2_conv1[1][0]
block2_conv1[2][0]
____________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
block2_conv2[1][0]
block2_conv2[2][0]
____________________________________________________________________________________________________
block3_conv1 (Convolution2D) (None, 56, 56, 256) 295168 block2_pool[0][0]
block2_pool[1][0]
block2_pool[2][0]
____________________________________________________________________________________________________
block3_conv2 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
block3_conv1[1][0]
block3_conv1[2][0]
____________________________________________________________________________________________________
block3_conv3 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv2[0][0]
block3_conv2[1][0]
block3_conv2[2][0]
____________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv3[0][0]
block3_conv3[1][0]
block3_conv3[2][0]
____________________________________________________________________________________________________
block4_conv1 (Convolution2D) (None, 28, 28, 512) 1180160 block3_pool[0][0]
block3_pool[1][0]
block3_pool[2][0]
____________________________________________________________________________________________________
block4_conv2 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
block4_conv1[1][0]
block4_conv1[2][0]
____________________________________________________________________________________________________
block4_conv3 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv2[0][0]
block4_conv2[1][0]
block4_conv2[2][0]
____________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv3[0][0]
block4_conv3[1][0]
block4_conv3[2][0]
____________________________________________________________________________________________________
block5_conv1 (Convolution2D) (None, 14, 14, 512) 2359808 block4_pool[0][0]
block4_pool[1][0]
block4_pool[2][0]
____________________________________________________________________________________________________
block5_conv2 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
block5_conv1[1][0]
block5_conv1[2][0]
____________________________________________________________________________________________________
block5_conv3 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv2[0][0]
block5_conv2[1][0]
block5_conv2[2][0]
____________________________________________________________________________________________________
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv3[0][0]
block5_conv3[1][0]
block5_conv3[2][0]
____________________________________________________________________________________________________
flatten (Flatten) (None, 25088) 0 block5_pool[0][0]
block5_pool[1][0]
block5_pool[2][0]
____________________________________________________________________________________________________
fc1 (Dense) (None, 4096) 102764544 flatten[0][0]
flatten[1][0]
flatten[2][0]
____________________________________________________________________________________________________
fc2 (Dense) (None, 4096) 16781312 fc1[0][0]
fc1[1][0]
fc1[2][0]
____________________________________________________________________________________________________
predictions (Dense) (None, 1000) 4097000 fc2[0][0]
fc2[1][0]
fc2[2][0]
====================================================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
_
This is different from the original functional model since the new layer is connected to the previous layer 3 times. People say it is more powerful to use functional models. But what I want to do is just to pop the final prediction layer. And functional model cannot do this...

I have also been struggling with this and the previous poster was almost there, but left out a particular detail that was stumping me before.
Indeed you can do the "pop" even with a Model created with the Functional API, but it is a little more work.
Here's my model (Just plain vanilla VGG16)
model.summary()
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_6 (InputLayer) (None, 224, 224, 3) 0
____________________________________________________________________________________________________
block1_conv1 (Convolution2D) (None, 224, 224, 64) 1792 input_6[0][0]
____________________________________________________________________________________________________
block1_conv2 (Convolution2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
____________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
____________________________________________________________________________________________________
block2_conv1 (Convolution2D) (None, 112, 112, 128) 73856 block1_pool[0][0]
____________________________________________________________________________________________________
block2_conv2 (Convolution2D) (None, 112, 112, 128) 147584 block2_conv1[0][0]
____________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
____________________________________________________________________________________________________
block3_conv1 (Convolution2D) (None, 56, 56, 256) 295168 block2_pool[0][0]
____________________________________________________________________________________________________
block3_conv2 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
____________________________________________________________________________________________________
block3_conv3 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv2[0][0]
____________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv3[0][0]
____________________________________________________________________________________________________
block4_conv1 (Convolution2D) (None, 28, 28, 512) 1180160 block3_pool[0][0]
____________________________________________________________________________________________________
block4_conv2 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
____________________________________________________________________________________________________
block4_conv3 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv2[0][0]
____________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv3[0][0]
____________________________________________________________________________________________________
block5_conv1 (Convolution2D) (None, 14, 14, 512) 2359808 block4_pool[0][0]
____________________________________________________________________________________________________
block5_conv2 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
____________________________________________________________________________________________________
block5_conv3 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv2[0][0]
____________________________________________________________________________________________________
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv3[0][0]
____________________________________________________________________________________________________
flatten (Flatten) (None, 25088) 0 block5_pool[0][0]
____________________________________________________________________________________________________
fc1 (Dense) (None, 4096) 102764544 flatten[0][0]
____________________________________________________________________________________________________
fc2 (Dense) (None, 4096) 16781312 fc1[0][0]
____________________________________________________________________________________________________
predictions (Dense) (None, 1000) 4097000 fc2[0][0]
====================================================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
____________________________________________________________________________________________________
Then I "popped" the last layer but not using pop, just with the Functional API
#Get the last but one layer/tensor from the old model
last_layer = model.layers[-2].output
#Define the new layer/tensor for the new model
new_model = Dense(2, activation='softmax', name='Binary_predictions')(last_layer)
#Create the new model, with the old models input and the new_model tensor as the output
new_model = Model(model.input, new_model, name='Finetuned_VGG16')
#Set all layers,except the last one to not trainable
for layer in new_model.layers[:-1]: layer.trainable=False
#Compile the new model
new_model.compile(optimizer=Adam(lr=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
#now train with the new outputs (cats and dogs!)
This creates a new model (new_model) with the last layer replaced and the old layers fixed (made non-trainable)..
new_model.summary()
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_6 (InputLayer) (None, 224, 224, 3) 0
____________________________________________________________________________________________________
block1_conv1 (Convolution2D) (None, 224, 224, 64) 1792 input_6[0][0]
____________________________________________________________________________________________________
block1_conv2 (Convolution2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
____________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
____________________________________________________________________________________________________
block2_conv1 (Convolution2D) (None, 112, 112, 128) 73856 block1_pool[0][0]
____________________________________________________________________________________________________
block2_conv2 (Convolution2D) (None, 112, 112, 128) 147584 block2_conv1[0][0]
____________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
____________________________________________________________________________________________________
block3_conv1 (Convolution2D) (None, 56, 56, 256) 295168 block2_pool[0][0]
____________________________________________________________________________________________________
block3_conv2 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
____________________________________________________________________________________________________
block3_conv3 (Convolution2D) (None, 56, 56, 256) 590080 block3_conv2[0][0]
____________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv3[0][0]
____________________________________________________________________________________________________
block4_conv1 (Convolution2D) (None, 28, 28, 512) 1180160 block3_pool[0][0]
____________________________________________________________________________________________________
block4_conv2 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
____________________________________________________________________________________________________
block4_conv3 (Convolution2D) (None, 28, 28, 512) 2359808 block4_conv2[0][0]
____________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv3[0][0]
____________________________________________________________________________________________________
block5_conv1 (Convolution2D) (None, 14, 14, 512) 2359808 block4_pool[0][0]
____________________________________________________________________________________________________
block5_conv2 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
____________________________________________________________________________________________________
block5_conv3 (Convolution2D) (None, 14, 14, 512) 2359808 block5_conv2[0][0]
____________________________________________________________________________________________________
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv3[0][0]
____________________________________________________________________________________________________
flatten (Flatten) (None, 25088) 0 block5_pool[0][0]
____________________________________________________________________________________________________
fc1 (Dense) (None, 4096) 102764544 flatten[0][0]
____________________________________________________________________________________________________
fc2 (Dense) (None, 4096) 16781312 fc1[0][0]
____________________________________________________________________________________________________
Binary_predictions (Dense) (None, 2) 8194 fc2[0][0]
====================================================================================================
Total params: 134,268,738
Trainable params: 8,194
Non-trainable params: 134,260,544
The tricky part was getting the .output as the last layer, since that makes it a Tensor. Then using that Tensor as the input for the new Dense layer and making THAT the final output in the new Model...
Hope that helps...
Thon

You can "pop" the final layer by just definining another Model taking the previous layer as output:
poppedModel = Model(VGG_model.input,VGG_model.layers[-2].output)
This model will share exactly the same weights as the original model, and training will affect both models.
You can add your own layers (or even models) after poppedModel, no problem:
popOut = poppedModel(input_tensor)
newLayOut = SomeKerasLayer(blablabla)(popOut)
anotherModel = Model(input_tensor, newLayOut)
#anotherModel will also share weights with poppedModel and VGG_model in the layers they have in common.
It's important, though, if you intend to train the new layers in anotherModel without affecting the VGG weights, that you make poppedModel.trainable = False and each layer in it with poppedModel.layers[i].trainable = False before compiling anotherModel.

Related

Hyperparameter Tuning with Wandb Sweep for custom parameters

I'm trying to tune the hyperparameters using the Stable-Baseline-3 Library for the network architecture.
My configuration file is:
program: main.py
method: bayes
name: sweep
metric:
goal: minimize
name: train/loss
parameters:
batch_size:
values: [16, 32, 64, 128, 256, 512, 1024]
epochs:
values: [20, 50, 100, 200, 250, 300]
lr:
max: 0.1
min: 0.000001
But if I try to add to the parameters:
policy_kwargs:
net_arch:
pi:
values: [[ 128, 128 ],[ 256, 256 ],[ 512, 512 ]]
vf:
values: [[ 128, 128 ],[ 256, 256 ],[ 512, 512 ]]
I got the following error:
wandb.errors.CommError: Invalid sweep config: invalid hyperparameter configuration: policy_kwargs
Is it possible to use wandb sweep with Stable-Baseline-3 for the network architecture?
You are trying to create a nested config. Please refer to this documentation here.
Your configuration should be:
program: main.py
method: bayes
name: sweep
metric:
goal: minimize
name: train/loss
parameters:
batch_size:
values: [16, 32, 64, 128, 256, 512, 1024]
epochs:
values: [20, 50, 100, 200, 250, 300]
lr:
max: 0.1
min: 0.000001
policy_kwargs:
parameters:
net_arch:
parameters:
pi:
values: [[ 128, 128 ],[ 256, 256 ],[ 512, 512 ]]
vf:
values: [[ 128, 128 ],[ 256, 256 ],[ 512, 512 ]]

Semantic segmentation labeling

I'm trynna make a scratch code of Semantic segmentation through U-Net. I'll use Cityscapes Dataset. I'm trying to make a dictionary(python) composed of the key(car, train, human, etc) and the value(rgb info). How can I match the dictionary with my ground_truth data?
example of labeling dictionary is like below
color_map = {
'0': [0, 0, 0], # unlabelled
'1': [128, 64, 128], # road
'2': [244, 35, 232], # sidewalk
'3': [70, 70, 70], # building
'4': [102, 102, 156], # wall
'5': [190, 153, 153], # fence
'6': [153, 153, 153], # pole
'7': [250,170, 30], # traffic_light
'8': [220, 220, 0], # traffic_sign
'9': [107, 142, 35], # vegetation
'10': [152, 251, 152], # terrain
'11': [0, 130, 180], # sky
'12': [220, 20, 60], # person
'13': [255, 0, 0], # rider
'14': [0, 0, 142], # car
'15': [0, 0, 70], # truck
'16': [0, 60, 100], # bus
'17': [0, 80, 100], # train
'18': [0, 0, 230], # motorcycle
'19': [119, 11, 32] # bicycle
}

Getting loss (binary_crossentropy) stagnated around 0.601 for this autoencoder architecture

I am working on an unsupervised image classification problem, the dataset consists of around 4700 photos of carnivores. I thought of achieving this task by constructing an autoencoder and getting the image embeddings, then applying cosine similarity. I am not getting much improvement. This is my autoencoder architecture:
Model: "functional_75"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_19 (InputLayer) [(None, 128, 128, 3)] 0
_________________________________________________________________
conv2d_126 (Conv2D) (None, 128, 128, 64) 1792
_________________________________________________________________
max_pooling2d_54 (MaxPooling (None, 64, 64, 64) 0
_________________________________________________________________
conv2d_127 (Conv2D) (None, 64, 64, 32) 18464
_________________________________________________________________
max_pooling2d_55 (MaxPooling (None, 32, 32, 32) 0
_________________________________________________________________
conv2d_128 (Conv2D) (None, 32, 32, 16) 4624
_________________________________________________________________
max_pooling2d_56 (MaxPooling (None, 16, 16, 16) 0
_________________________________________________________________
conv2d_129 (Conv2D) (None, 16, 16, 16) 2320
_________________________________________________________________
up_sampling2d_54 (UpSampling (None, 32, 32, 16) 0
_________________________________________________________________
conv2d_130 (Conv2D) (None, 32, 32, 32) 4640
_________________________________________________________________
up_sampling2d_55 (UpSampling (None, 64, 64, 32) 0
_________________________________________________________________
conv2d_131 (Conv2D) (None, 64, 64, 64) 18496
_________________________________________________________________
conv2d_132 (Conv2D) (None, 64, 64, 3) 1731
_________________________________________________________________
up_sampling2d_56 (UpSampling (None, 128, 128, 3) 0
=================================================================
Total params: 52,067
Trainable params: 52,067
Non-trainable params: 0
_________________________________________________________________
Please suggest some tips for improvement.

Google Apps Script equivalent for JS typed arrays (Uint8Array and Uint16Array)

When I try to run the following code in google apps script
var numArray = [31, -117, 8, 8, -102, -124, 75, 88, 2, 0, 106, 117, 108, 121, 46, 116, 120, 116, 0, 1, 4, 0, -5, -1, 106, 117, 108, 121, -13, -113, 116, -57, 4, 0, 0, 0];
var typedArray = new Uint8Array(numArray);
...I get:
ReferenceError: "Uint8Array" is not defined.
At the same time
var numArray = [31, -117, 8, 8, -102, -124, 75, 88, 2, 0, 106, 117, 108, 121, 46, 116, 120, 116, 0, 1, 4, 0, -5, -1, 106, 117, 108, 121, -13, -113, 116, -57, 4, 0, 0, 0];
var typedArray = new Array(numArray);
...works just fine. Is there a clever workaround way to implement a Uint8Array in google apps script?
OK, so thanks to the comment from #Xepoch, here is the answer to my original question.
The equivalent to
var numArray = [31, -117, 8, 8, -102, -124, 75, 88, 2, 0, 106, 117, 108, 121, 46, 116, 120, 116, 0, 1, 4, 0, -5, -1, 106, 117, 108, 121, -13, -113, 116, -57, 4, 0, 0, 0];
var typedArray = new Uint8Array(numArray);
is (in the absence of Uint8Array):
var numArray = [31, -117, 8, 8, -102, -124, 75, 88, 2, 0, 106, 117, 108, 121, 46, 116, 120, 116, 0, 1, 4, 0, -5, -1, 106, 117, 108, 121, -13, -113, 116, -57, 4, 0, 0, 0];
var typedArray = [];
for(var i=0;i<numArray .length;i++) {
typedArray.push(numArray [i]<0?numArray [i]+256:numArray [i]);
}

Reading numeric keys from JSON file

Let's say I store a dictionary's values in JSON file. Here is the simplified code:
test = {}
for i in range(10):
for j in range(15):
test['{},{}'.format(i, j)] = i * j
with open('file1.json', 'w') as f:
json.dump(test, f)
I have hard time reading back from this file. How can I read back from this file into a dictionary with elements like key as [i,j] and value as i*j?
I use simple
with open('file1.json', 'r') as f:
data2 = json.load(f)
Do you mean something like this?
with open('file1.json', 'r') as f:
data2 = {tuple(int(x) for x in k.split(',')): v
for (k, v) in json.load(f).items()}
Your code will returns a dictionary contain unicode key and values if you want to get a dictionary contains the integer values you can use json.dumps after loading the file :
import json
test = {}
for i in range(10):
for j in range(15):
test['{},{}'.format(i, j)] = i * j
with open('file1.json', 'w') as f:
json.dump(test, f)
with open('file1.json', 'r') as f:
data2 = json.load(f)
print json.dumps(data2)
result :
{"1,8": 8, "1,9": 9, "1,6": 6, "1,7": 7, "1,4": 4, "1,5": 5, "1,2": 2, "1,3": 3, "1,0": 0, "1,1": 1, "7,6": 42, "7,7": 49, "5,8": 40, "5,9": 45, "3,8": 24, "3,9": 27, "5,2": 10, "5,3": 15, "5,0": 0, "3,7": 21, "3,0": 0, "5,7": 35, "3,2": 6, "3,3": 9, "3,14": 42, "3,12": 36, "3,13": 39, "3,10": 30, "3,11": 33, "2,8": 16, "5,14": 70, "5,10": 50, "5,11": 55, "5,12": 60, "5,13": 65, "0,8": 0, "4,8": 32, "0,13": 0, "0,12": 0, "0,11": 0, "0,10": 0, "0,14": 0, "6,9": 54, "6,8": 48, "6,1": 6, "6,0": 0, "6,3": 18, "6,2": 12, "6,5": 30, "6,4": 24, "6,7": 42, "6,6": 36, "6,14": 84, "6,11": 66, "6,10": 60, "6,13": 78, "6,12": 72, "8,9": 72, "8,8": 64, "8,7": 56, "8,6": 48, "8,5": 40, "8,4": 32, "8,3": 24, "8,2": 16, "8,1": 8, "8,0": 0, "5,1": 5, "2,13": 26, "3,4": 12, "3,5": 15, "3,6": 18, "0,7": 0, "0,6": 0, "0,5": 0, "0,4": 0, "0,3": 0, "0,2": 0, "0,1": 0, "0,0": 0, "5,6": 30, "0,9": 0, "3,1": 3, "1,10": 10, "1,11": 11, "1,12": 12, "1,13": 13, "2,9": 18, "5,4": 20, "2,5": 10, "2,4": 8, "2,7": 14, "2,6": 12, "2,1": 2, "2,0": 0, "2,3": 6, "2,2": 4, "4,3": 12, "4,2": 8, "4,1": 4, "4,0": 0, "4,7": 28, "4,6": 24, "4,5": 20, "4,4": 16, "2,11": 22, "2,10": 20, "4,9": 36, "2,12": 24, "2,14": 28, "1,14": 14, "5,5": 25, "8,13": 104, "8,12": 96, "8,11": 88, "8,10": 80, "8,14": 112, "7,12": 84, "7,13": 91, "7,10": 70, "7,11": 77, "7,14": 98, "4,14": 56, "4,13": 52, "4,12": 48, "4,11": 44, "4,10": 40, "7,8": 56, "7,9": 63, "9,4": 36, "9,5": 45, "9,2": 18, "9,3": 27, "9,0": 0, "9,1": 9, "7,0": 0, "7,1": 7, "7,2": 14, "7,3": 21, "7,4": 28, "7,5": 35, "9,8": 72, "9,9": 81, "9,6": 54, "9,7": 63, "9,10": 90, "9,11": 99, "9,12": 108, "9,13": 117, "9,14": 126}