Custom loss function with Keras to penalise more negative prediction - function

I understand that mse will treat both actual - predict, and predict - actual the same way. I want to write a custom loss function such that
the penalty of predict > actual is more than actual > predict
Say I will have 2x more penalty for being predict > actual. How would I implement such function
import numpy as np
from keras.models import Model
from keras.layers import Input
import keras.backend as K
from keras.engine.topology import Layer
from keras.layers.core import Dense
from keras import objectives
def create_model():
# define the size
input_size = 6
hidden_size = 15;
# definte the model
model = Sequential()
model.add(Dense(input_size, input_dim=input_size, kernel_initializer='normal', activation='relu'))
model.add(Dense(hidden_size, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# mse is used as loss for the optimiser to converge quickly
# mae is something you can quantify the manitude
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
return model
early_stop = EarlyStopping(monitor='val_loss', patience=20)
history = model.fit(train_features, train_label, epochs=200, validation_split=0.2, verbose=0, shuffle=True)
predvalue = model.predict(test_features).flatten() * 100
How do I implement such loss function?

def customLoss(true,pred):
diff = pred - true
greater = K.greater(diff,0)
greater = K.cast(greater, K.floatx()) #0 for lower, 1 for greater
greater = greater + 1 #1 for lower, 2 for greater
#use some kind of loss here, such as mse or mae, or pick one from keras
#using mse:
return K.mean(greater*K.square(diff))
model.compile(optimizer = 'adam', loss = customLoss)

Related

Building neural network using k-fold cross validation

I am new to deep learning, trying to implement a neural network using 4-fold cross-validation for training, testing, and validating. The topic is to classify the vehicle using an existing dataset.
The accuracy result is 0.7.
Traning Accuracy
An example output for epochs
I also don't know whether the code is correct and what to do for increasing the accuracy.
Here is the code:
!pip install category_encoders
import tensorflow as tf
from sklearn.model_selection import KFold
import pandas as pd
import numpy as np
from tensorflow import keras
import category_encoders as ce
from category_encoders import OrdinalEncoder
car_data = pd.read_csv('car_data.csv')
car_data.columns = ['Purchasing', 'Maintenance', 'No_Doors','Capacity','BootSize','Safety','Evaluation']
# Extract the features and labels from the dataset
X = car_data.drop(['Evaluation'], axis=1)
Y = car_data['Evaluation']
encoder = ce.OrdinalEncoder(cols=['Purchasing', 'Maintenance', 'No_Doors','Capacity','BootSize','Safety'])
X = encoder.fit_transform(X)
X = X.to_numpy()
Y_df = pd.DataFrame(Y, columns=['Evaluation'])
encoder = OrdinalEncoder(cols=['Evaluation'])
Y_encoded = encoder.fit_transform(Y_df)
Y = Y_encoded.to_numpy()
input_layer = tf.keras.layers.Input(shape=(X.shape[1]))
# Define the hidden layers
hidden_layer_1 = tf.keras.layers.Dense(units=64, activation='relu', kernel_initializer='glorot_uniform')(input_layer)
hidden_layer_2 = tf.keras.layers.Dense(units=32, activation='relu', kernel_initializer='glorot_uniform')(hidden_layer_1)
# Define the output layer
output_layer = tf.keras.layers.Dense(units=1, activation='sigmoid', kernel_initializer='glorot_uniform')(hidden_layer_2)
# Create the model
model = tf.keras.Model(inputs=input_layer, outputs=output_layer)
# Initialize the 4-fold cross-validation
kfold = KFold(n_splits=4, shuffle=True, random_state=42)
# Initialize a list to store the scores
scores = []
quality_weights= []
# Compile the model
model.compile(optimizer='adam',
loss=''sparse_categorical_crossentropy'',
metrics=['accuracy'],
sample_weight_mode='temporal')
for train_index, test_index in kfold.split(X,Y):
# Split the data into train and test sets
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
# Fit the model on the training data
model.fit(X_train, Y_train, epochs=300, batch_size=64, sample_weight=quality_weights)
# Evaluate the model on the test data
score = model.evaluate(X_test, Y_test)
# Append the score to the scores list
scores.append(score[1])
plt.plot(history.history['accuracy'])
plt.title('Model Training Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train'], loc='upper left')
plt.show()
# Print the mean and standard deviation of the scores
print(f'Mean accuracy: {np.mean(scores):.3f} +/- {np.std(scores):.3f}')
The first thing that caught my attention was here:
model.fit(X_train, Y_train, epochs=300, batch_size=64, sample_weight=quality_weights)
Your quality_weights should be a numpy array of size of the input.
Refer here: https://keras.io/api/models/model_training_apis/#fit-method
If changing that doesn't seemt to help then may be your network doesn't seem to be learning from the data. A few possible reasons could be:
The network is a bit too shallow. Try adding just one more hidden layer to see if that improves anything
From the code I can't see the size of your input data. Does it have enough datapoints for 4-fold cross-validation? Can you somehow augment the data?

How to increase Emotion Detection Validation Accuracy on VGG16 model ? [Transfer Learning]

import pandas as pd
import numpy as np
import keras
import tensorflow
from keras.models import Model
from keras.layers import Dense
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="path",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="path", target_size=(224,224))
from keras.applications.vgg16 import VGG16
vggmodel = VGG16(weights='imagenet', include_top=True)
vggmodel.summary()
for layers in (vggmodel.layers)[:19]:
print(layers)
layers.trainable = False
#flatten_out = tensorflow.keras.layers.Flatten()(vggmodel.output)
#fc1 = tensorflow.keras.layers.Dense(units=4096,activation="relu")(flatten_out)
#fc2 = tensorflow.keras.layers.Dense(units=4096,activation="relu")(fc1)
#fc3 = tensorflow.keras.layers.Dense(units=256,activation="relu")(fc2)
#predictions = tensorflow.keras.layers.Dense(units=3, activation="softmax")(fc3)
X= vggmodel.layers[-2].output
predictions = Dense(units=3, activation="softmax")(X)
model_final = Model(vggmodel.input, predictions)
model_final.compile(loss = "categorical_crossentropy", optimizer = optimizers.SGD(lr=0.001, momentum=0.9), metrics=["accuracy"])
model_final.summary()
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= traindata, steps_per_epoch= 95, epochs= 100, validation_data= testdata, validation_steps=7, callbacks=[checkpoint,early])
i am classifying emotion in positive, negative and neutral.
i a, using Vgg16 transfer learning model.
though i m still not getting better validation accuracy.
things i've tried:
increase the number of training data
layers.trainable=False/True
learning rate:0.0001,0.001,0.01
Activation function= relu/softmax
batch size= 64
optimizers= adam/sgd
loss fn= categoricalcrossentrpy / sparsecategoricalcrossentrpy
momentum =0.09 /0.9
also, i tried to change my dataset color to GRAY and somehow it gave better accuracy than previous COLOR IMAGE but it is still not satisfactory.
i also changed my code and add dropout layers but still no progress.
i tried with FER2013 dataset it was giving me pretty decent accuracy.
these are the results on the FER dataset:
accuracy: 0.9997 - val_accuracy: 0.7105
but on my own dataset(which is pretty good) validation accuracy is not increasing more than 66%.
what else can I do to increase val_accuracy?
I think your model is more complex than necessary. I would remove the fc1 and fc2 layers. I would include regularization in the fc3 layer. I would add a dropout layer after the fc3 . In your early stopping callback change patience to 4. I recommend you use the Keras callback Reduce Learning rate on plateau. Full recommendations are in the code below
#flatten_out = tensorflow.keras.layers.Flatten()(vggmodel.output)
#fc3 = tensorflow.keras.layers.Dense(kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu'))(flatten_out)
x=Dropout(rate=.4, seed=123)
#predictions = tensorflow.keras.layers.Dense(units=3, activation="softmax")(x)
rlronp=tf.keras.callbacks.ReduceLROnPlateau( monitor='val_loss',
factor=0.4,patience=2,
verbose=0, mode='auto')
callbacks=[rlronp, checkpoint, early]
X= vggmodel.layers[-2].output
predictions = Dense(units=3, activation="softmax")(X)
model_final.fit_generator(generator= traindata, steps_per_epoch= 95, epochs= 100, validation_data= testdata, validation_steps=7, callbacks=callbacks)
I do not like VGG it is a very large model and is a bit old and slow. I think you will get better and faster result using EfficientNet models, EfficientNetB3 should work fine.
If you want to try that get rid of all code for VGG and use
lr=.001
img_size=(256,256)
base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False,
weights="imagenet",input_shape=img_shape, pooling='max')
base_model.trainable=True
x=base_model.output
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)
x = Dense(256, kernel_regularizer = regularizers.l2(l =
0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)
x=Dropout(rate=.4, seed=123)(x)
output=Dense(class_count, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=
['accuracy'])
NOTE: EfficientNet models expect pixels in the range 0 to 255 so don't scale the pixels. Also note I make the base model trainable. They tell you NOT to do that but in many experiments I find training the base model from the outset leads to faster convergence and net lower validation loss.

Importing data to tensorflow autoencoders through ImageDataGenerator

When I try to train autoencoder by importing images as numpy arrays the training proceeds quickly with the training loss at first epoch itself < 0 and the results are also decent.
But when I import same data through ImageDataGenerator the starting loss is around 32000 and as training proceeds it decreases very slowly and after 50 epochs it saturates at around 31000.
I used mse as loss function with Adam Optimiser. I tried different loss functions but the problem persists like Very high Value at start which saturates very quickly to significantly high value.
Any suggestions are welcomed. Thanks.
following is my code.
from convautoencoder import ConvAutoencoder
from tensorflow.keras.optimizers import Adam
import numpy as np
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.config import experimental
from tensorflow.python.client import device_lib
devices = experimental.list_physical_devices('GPU')
experimental.set_memory_growth(devices[0], True)
EPOCHS = 5000
BS = 4
trainAug = ImageDataGenerator()
valAug = ImageDataGenerator()
# initialize the training generator
trainGen = trainAug.flow_from_directory(
config.TRAIN_PATH,
class_mode="input",
classes=None,
target_size=(64, 64),
color_mode="grayscale",
shuffle=True,
batch_size=BS)
# initialize the validation generator
valGen = valAug.flow_from_directory(
config.TRAIN_PATH,
class_mode="input",
classes=None,
target_size=(64, 64),
color_mode="grayscale",
shuffle=False,
batch_size=BS)
# initialize the testing generator
testGen = valAug.flow_from_directory(
config.TRAIN_PATH,
class_mode="input",
classes=None,
target_size=(64, 64),
color_mode="grayscale",
shuffle=False,
batch_size=BS)
mc = ModelCheckpoint('best_model_1.h5', monitor='val_loss', mode='min', save_best_only=True)
print("[INFO] building autoencoder...")
(encoder, decoder, autoencoder) = ConvAutoencoder.build(64, 64, 1)
opt = Adam(learning_rate= 0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-04, amsgrad=False)
autoencoder.compile(loss="hinge", optimizer=opt)
H = autoencoder.fit( trainGen, validation_data=valGen, epochs=EPOCHS, batch_size=BS ,callbacks=[ mc])
Ok. This was a silly mistake.
Adding rescale factor rescale=1. / 255 to imageDataGenerator solved the problem.

How to use keras to fine-tune inception v3 to do multi-class classification?

I want to use Keras to do two classes image classify using Cat vs. Dog dataset from Kaggle.com.
But I have some problem with param "class_mode" as below code.
if I use "binary" mode, accuracy is about 95%, but if I use "categorical" accuracy is abnormally low, only above 50%.
binary mode means only one output in last layer and use sigmoid activation to classify. sample's label is only one integer.
categorical means two output in last layer and use softmax activation to classify. sample's label is one hot format, eg.(1,0), (0,1).
I think these two ways should have the similar result. Anyone knows the reason for the difference? Thanks very much!
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
from keras import __version__
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
set some params here
IM_WIDTH, IM_HEIGHT = 299, 299 #fixed size for InceptionV3
NB_EPOCHS = 1
BAT_SIZE = 32
FC_SIZE = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
loss_mode = "binary_crossentropy"
def get_nb_files(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
transfer_learn, keep the weights in inception v3
def setup_to_transfer_learn(model, base_model):
"""Freeze all layers and compile the model"""
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss=loss_mode, metrics=['accuracy'])
Add last layer to do two classes classification.
def add_new_last_layer(base_model, nb_classes):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top
nb_classes: # of classes
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
if args.class_mode == "binary":
predictions = Dense(1, activation='sigmoid')(x) #new softmax layer
else:
predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer
model = Model(inputs=base_model.input, outputs=predictions)
return model
Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers,
and fine tune weights.
def setup_to_finetune(model):
"""Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.
note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch
Args:
model: keras model
"""
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer="rmsprop", loss=loss_mode, metrics=['accuracy'])
#model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
def train(args):
"""Use transfer learning and fine-tuning to train a network on a new dataset"""
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
nb_epoch = int(args.nb_epoch)
batch_size = int(args.batch_size)
print("nb_classes:{}".format(nb_classes))
data prepare
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
#class_mode='binary'
class_mode=args.class_mode
)
validation_generator = test_datagen.flow_from_directory(
args.val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
#class_mode='binary'
class_mode=args.class_mode
)
setup model
base_model = InceptionV3(weights='imagenet', include_top=False) #include_top=False excludes final FC layer
model = add_new_last_layer(base_model, nb_classes)
transfer learning
setup_to_transfer_learn(model, base_model)
#model.summary()
history_tl = model.fit_generator(
train_generator,
epochs=nb_epoch,
steps_per_epoch=nb_train_samples//BAT_SIZE,
validation_data=validation_generator,
validation_steps=nb_val_samples//BAT_SIZE)
fine-tuning
setup_to_finetune(model)
history_ft = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples//BAT_SIZE,
epochs=nb_epoch,
validation_data=validation_generator,
validation_steps=nb_val_samples//BAT_SIZE)
model.save(args.output_model_file)
if args.plot:
plot_training(history_ft)
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'r.')
plt.plot(epochs, val_loss, 'r-')
plt.title('Training and validation loss')
plt.show()
main func
if __name__=="__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir", default="train2")
a.add_argument("--val_dir", default="test2")
a.add_argument("--nb_epoch", default=NB_EPOCHS)
a.add_argument("--batch_size", default=BAT_SIZE)
a.add_argument("--output_model_file", default="inceptionv3-ft.model")
a.add_argument("--plot", action="store_true")
a.add_argument("--class_mode", default="binary")
args = a.parse_args()
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if args.class_mode != "binary" and args.class_mode != "categorical":
print("set class_mode as 'binary' or 'categorical'")
if args.class_mode == "categorical":
loss_mode = "categorical_crossentropy"
#set class_mode
print("class_mode:{}, loss_mode:{}".format(args.class_mode, loss_mode))
if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args)
I had this problem on several tasks when the learning rate was too high. Try something like 0.0001 or even less.
According to the Keras Documentation, the default rate ist 0.001:
keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
See https://keras.io/optimizers/#rmsprop
I found that if I use SDG or Adam optimizer, the accuracy can go up normally. So is there something wrong using RMSprop optimizer with default learning rate=0.001?

Simple regression with Keras seems not working properly

I am trying, just for practising with Keras, to train a network to learn a very easy function.
The input of the network is 2Dimensional . The output is one dimensional.
The function can indeed represented with an image, and the same is for the approximate function.
At the moment I'm not looking for any good generalization, I just want that the network is at least good in representing the training set.
Here I place my code:
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import random as rnd
import math
m = [
[1,1,1,1,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,1,1],
[1,0,0,0,1,1,0,1,0,0],
[1,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,1],
[0,0,1,0,1,1,0,0,0,1],
[1,1,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,1,1,1,1]] #A representation of the function that I would like to approximize
matrix = np.matrix(m)
evaluation = np.zeros((100,100))
x_train = np.zeros((10000,2))
y_train = np.zeros((10000,1))
for x in range(0,100):
for y in range(0,100):
x_train[x+100*y,0] = x/100. #I normilize the input of the function, between [0,1)
x_train[x+100*y,1] = y/100.
y_train[x+100*y,0] = matrix[int(x/10),int(y/10)] +0.0
#Here I show graphically what I would like to have
plt.matshow(matrix, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
#Here I built the model
model = Sequential()
model.add(Dense(20, input_dim=2, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, init='uniform'))
model.add(Activation('sigmoid'))
#Here I train it
sgd = SGD(lr=0.5)
model.compile(loss='mean_squared_error', optimizer=sgd)
model.fit(x_train, y_train,
nb_epoch=100,
batch_size=100,
show_accuracy=True)
#Here (I'm not sure), I'm using the network over the given example
x = model.predict(x_train,batch_size=1)
#Here I show the approximated function
print x
print x_train
for i in range(0, 10000):
evaluation[int(x_train[i,0]*100),int(x_train[i,1]*100)] = x[i]
plt.matshow(evaluation, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
plt.colorbar()
plt.show()
As you can see, the two function are completely different, and I can't understand why.
I think that maybe model.predict doesn't work as I axpect.
Your understanding is correct; it's just a question of hyperparameter tuning.
I just tried your code, and it looks like you're not giving your training enough time:
Look at the loss, under 100 epochs, it's stuck at around 0.23. But try using the 'adam' otimizer instead of SGD, and increase the number of epochs up to 10,000: the loss now decreases down to 0.09 and your picture looks much better.
If it's still not precise enough for you, you may also want to try increasing the number of parameters: just add a few layers; this will make overfitting much easier ! :-)
I have changed just your network structure and added a training dataset. The loss decreases down to 0.01.
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 15:26:52 2017
#author: Administrator
"""
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import random as rnd
import math
from keras.optimizers import Adam,SGD
m = [
[1,1,1,1,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,1,1],
[1,0,0,0,1,1,0,1,0,0],
[1,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,1],
[0,0,1,0,1,1,0,0,0,1],
[1,1,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,1,1,1,1]] #A representation of the function that I would like to approximize
matrix = np.matrix(m)
evaluation = np.zeros((1000,1000))
x_train = np.zeros((1000000,2))
y_train = np.zeros((1000000,1))
for x in range(0,1000):
for y in range(0,1000):
x_train[x+1000*y,0] = x/1000. #I normilize the input of the function, between [0,1)
x_train[x+1000*y,1] = y/1000.
y_train[x+1000*y,0] = matrix[int(x/100),int(y/100)] +0.0
#Here I show graphically what I would like to have
plt.matshow(matrix, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
#Here I built the model
model = Sequential()
model.add(Dense(50, input_dim=2, init='uniform'))## init是关键字,’uniform’表示用均匀分布去初始化权重
model.add(Activation('tanh'))
model.add(Dense(20, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, init='uniform'))
model.add(Activation('sigmoid'))
#Here I train it
#sgd = SGD(lr=0.01)
adam = Adam(lr = 0.01)
model.compile(loss='mean_squared_error', optimizer=adam)
model.fit(x_train, y_train,
nb_epoch=100,
batch_size=100,
show_accuracy=True)
#Here (I'm not sure), I'm using the network over the given example
x = model.predict(x_train,batch_size=1)
#Here I show the approximated function
print (x)
print (x_train)
for i in range(0, 1000000):
evaluation[int(x_train[i,0]*1000),int(x_train[i,1]*1000)] = x[i]
plt.matshow(evaluation, interpolation='nearest', cmap=plt.cm.ocean, extent=(0,1,0,1))
plt.colorbar()
plt.show()