1 Star 0 Fork 1

Jaye_W1997 / r.landslide

forked from chewenchao / r.landslide 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
ANN_module.py 35.78 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 16:52:59 2018
@author: Lucimara Bragagnolo
"""
def ann_module(input_data,output_data,reckon,num_hidden,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import grass.script as gscript
os.mkdir(directory) #Create directory
#Scaling the data
max_in = np.zeros((1,input_data.shape[1]))
min_in = np.zeros((1,input_data.shape[1]))
max_out = 1
min_out = 0
alldata = np.concatenate((input_data,reckon))
for i in range(0,input_data.shape[1]):
max_in[0,i] = np.nanmax(alldata[:,i])
min_in[0,i] = np.nanmin(alldata[:,i])
os.chdir(directory)
np.savetxt('max_in.txt', (max_in), delimiter=',')
np.savetxt('min_in.txt', (min_in), delimiter=',')
os.chdir('../')
for j in range(0,input_data.shape[1]):
if max_in[0,j] != 0:
input_data[:,j] = (input_data[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
for j in range(0,reckon.shape[1]):
if max_in[0,j] != 0:
reckon[:,j] = (reckon[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
#Resample data
m = 1
n = input_data.shape[0]
n_test = int(math.ceil(test_samples*n)) #TORNAR GENERICA ESSAS %%
n_val = int(math.ceil(val_samples*n))
n_train = int(n - n_test - n_val)
reg_set = np.arange(0,n)
T = np.zeros((n_train,1))
V = np.zeros((n_val,1))
flag_test = 0
B_ind = ((n-m)*np.random.rand(2*n_test,1)+m)
B_ind = [math.floor(x) for x in B_ind]
while flag_test == 0:
if len(np.unique(B_ind)) == n_test:
flag_test = 1
else:
del B_ind[0]
B_ind = np.unique(B_ind)
B_ind = B_ind.astype(int)
B = reg_set[B_ind]-1 #Test
reg_rest = np.setdiff1d(reg_set,B) #Dataset for TRAINING and VALIDATIONS
flag_val = 0
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
while flag_val == 0:
if len(np.unique(V_buff)) == n_val:
flag_val = 1
elif len(np.unique(V_buff)) > n_val:
del V_buff[0]
else:
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
V_buff = [int(x) for x in V_buff] #Transform to integer
V[:,0] = (reg_rest[np.unique(V_buff)])
T[:,0] = np.setdiff1d(reg_rest,V[:,0])
T = T.astype(np.int64) #Training
V = V.astype(np.int64) #Validations
B = B.astype(np.int64) #Test
p = input_data.shape[1]
input_test = np.zeros((B.shape[0],p))
output_test = np.zeros((B.shape[0],1))
input_train = np.zeros((T.shape[0],p))
output_train = np.zeros((T.shape[0],1))
input_val = np.zeros((B.shape[0],p))
output_val = np.zeros((T.shape[0],1))
n1 = T.shape[0]
n2 = B.shape[0]
n3 = V.shape[0]
for i in range(0,n1-1):
input_train[i,:] = input_data[T[i],:]
output_train[i,:] = output_data[T[i]]
for i in range(0,n2-1):
input_test[i,:] = input_data[B[i],:]
output_test[i,:] = output_data[B[i]]
for i in range(0,n3-1):
input_val[i,:] = input_data[V[i],:]
output_val[i,:] = output_data[V[i]]
output_data = output_data.reshape((output_data.size,1))
#Validations
def ann_validate(input_data,output_data,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out):
#Collect dimensions
num_input = input_data.shape[1] #Number of parameters
reg_size = input_data.shape[0] #Number of records
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1 #Number of lines of weights matrix of the output layer - 1
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,reg_size))
def activation(x): #Sigmoid as activation function
fx = 1/(1+math.exp(-x))
return fx
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro[0,k] = output[k,:] - output_data[k,:]
return output,erro
#Test
def ann_test(input_data,output_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1
def activation(x): #activation function
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro_dec = np.zeros((1,reg_size))
erro_round = np.zeros((1,reg_size))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro_dec[0,k] = output[k,:] - output_data[k,:]
erro_round[0,k] = np.around(erro_dec[0,k])
return output,erro_dec,erro_round
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
#Training
num_input = input_train.shape[1] #Number of parameters
reg_size = input_train.shape[0] #Number of examples
num_output = output_train.shape[1] #Number of outputs (in this case, just 1 - susceptibility)
weights = np.random.rand(num_input+1,num_hidden)
peights = np.random.rand(num_hidden+1,num_output)
biasH = np.ones((num_hidden,1))
biasO = np.ones((num_output,1))
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,nr_epochs))
erro_train = np.zeros((1,nr_epochs))
erro_validate = np.zeros((1,nr_epochs))
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
for epoch in range(0,nr_epochs):
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_train[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
#Backpropagation
#Uptade the weights in the output layer
for i in range(0,num_hidden):
for j in range(0,num_output):
if i < (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*H[i,0]
elif i == (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*biasO[j,0]
#Uptade the weights in the hidden layer
buff = 0
for j in range(0,num_hidden):
for i in range(0,num_input):
for k1 in range(0,num_output):
buff = buff + (output_train[k,k1] - output[k,k1])*output[k,k1]*(1-output[k,k1])*peights[j,k1]
if i < (num_input+1):
weights[i,j] = weights[i,j] + coef*buff*H[j,0]*(1-H[j,0])*input_train[k,i]
elif i == num_input+1:
weights[i,j] = weights[i,j] + coef*buff*H[j,i]*(1-H[j,0])*biasH[j,0]
buff = 0 #Zeroes the buffer variable
erro_train[0,epoch] = np.linalg.norm((output - output_train)/reg_size)
output_val,erro_val = ann_validate(input_val,output_val,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out)
erro_validate[0,epoch] = np.linalg.norm(erro_val)
#Collects the weights in the minimum validation error
if epoch == 0:
W = weights
P = peights
epoch_min = epoch
erro_min = 1000
elif erro_min > np.linalg.norm(erro_validate[0,epoch]):
W = weights
P = peights
epoch_min = epoch
erro_min = np.linalg.norm(erro_validate[0,epoch])
[output,erro_test,erro_round] = ann_test(input_test,output_test,weights,peights,biasH,biasO)
x = np.arange(1,nr_epochs+1).reshape((nr_epochs,1))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plt.subplot(1,2,1)
plt.plot(x,erro_train.T,x,erro_validate.T,'g-', epoch_min, erro_min,'g*')
plt.xlabel('Epochs')
plt.ylabel('Root mean square output error')
plt.legend(('Training','Validation','Early stop'))
plt.subplot(1,2,2)
plt.bar(np.arange(1,(erro_test.shape[1])+1),erro_test.reshape(erro_test.shape[1]))
plt.xlabel('Instances')
plt.ylabel('Error (ANN output - real output)')
os.chdir(directory)
plt.savefig('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
rounded = np.asarray(erro_round).reshape((B.shape[0],1))
error = sum(abs(erro_round.T))
gscript.message(_("Test set error: "))
gscript.message(error)
#Assign the minimum weights to their original names
weights = W
peights = P
os.chdir(directory)
np.savetxt('weights.txt', (weights), delimiter=',')
np.savetxt('peights.txt', (peights), delimiter=',')
np.savetxt('biasH.txt', (biasH), delimiter=',')
np.savetxt('biasO.txt', (biasO), delimiter=',')
os.mkdir('Inputs and outputs')
os.chdir('Inputs and outputs')
np.savetxt('Input_test.txt', (input_test), delimiter=',')
np.savetxt('Output_test.txt', (output_test), delimiter=',')
np.savetxt('Input_val.txt', (input_val), delimiter=',')
np.savetxt('Output_val.txt', (output_val), delimiter=',')
np.savetxt('Input_train.txt', (input_train), delimiter=',')
np.savetxt('Output_train.txt', (output_train), delimiter=',')
np.savetxt('Error_train.txt', (erro_train), delimiter=',')
np.savetxt('Error_val.txt', (erro_validate), delimiter=',')
np.savetxt('Epoch_and_error_min.txt', (epoch_min,erro_min), delimiter=',')
np.savetxt('Test_set_TOTAL_error.txt', (error), delimiter=',')
np.savetxt('Error_test.txt', (erro_test), delimiter=',')
param = open('ANN_Parameters.txt','w')
param.write('Hidden neurons: '+str(num_hidden))
param.write('\n Learning rate: '+str(coef))
param.write('\n Epochs: '+str(nr_epochs))
param.close()
os.chdir('../')
os.chdir('../')
#Sensitivity analysis
def sensitivity(input_data,output_size,weights,peights,biasH,biasO):
input_size = input_data.shape[1] #Number of columns (parameters)
npts = 200 #Number of samples in the sensitivity evaluation set
sens_set = np.random.random_sample((npts,1)) #Return random floats in the half-open interval [0.0, 1.0)
fixed_par_value = np.empty([input_size,1])
ones_sens = np.ones((200,1))
#Calculation of the normalized mean value of each entry
for k in range(0,input_size):
fixed_par_value[k] = (np.mean(input_data[:,k]))
#Pre-allocation
input_sens = [[([1] * npts) for j in range(input_size)] for i in range(input_size)]
output_sens = [[([0] * npts) for j in range(input_size)] for i in range(input_size)]
input_sens = np.asarray(input_sens,dtype=float)
for k1 in range(0,input_size):
for k2 in range(0,input_size):
if k1 == k2:
input_sens[k1,k2,:] = sens_set.reshape(sens_set.shape[0]).T
else:
input_sens[k1,k2,:] = (ones_sens*fixed_par_value[k2]).reshape(ones_sens.shape[0])
for k1 in range(0,input_size):
input_sens2 = np.asarray(input_sens[k1]).T
output = ann_reckon(input_sens2,weights,peights,biasH,biasO)
output_sens[k1] = output
return sens_set,fixed_par_value,input_sens,output_sens
[sens_set,fixed_par_value,input_sens,output_sens] = sensitivity(input_data,output_data.shape[1],weights,peights,biasH,biasO)
for k in range(0,input_data.shape[1]):
plt.figure()
plt.plot(sens_set,output_sens[k],'.')
plt.title('Sensitivity analysis. Parameter: '+columnst[k])
plt.ylabel('Output response')
plt.xlabel('Parameter: '+columnst[k])
os.chdir(directory)
plt.savefig('SensitivityAnalysisVar_'+columnst[k], dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
#Reckon
if not flag_train:
output_reckon = ann_reckon(reckon,weights,peights,biasH,biasO)
a = np.reshape(output_reckon,(col,row),order='F')
a = np.transpose(a)
else:
a = 0
return a
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
import numpy as np
import math
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
def ANN_batch(input_data,output_data,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
#hidden is a vector now
#trials is the number of initial conditions
#Train a set of neural networks and select the best one
#Different number of hidden neurons
#Different number of initial conditions
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import grass.script as gscript
os.mkdir(directory) #Create directory -> REVER HERE
#Scaling the data
max_in = np.zeros((1,input_data.shape[1]))
min_in = np.zeros((1,input_data.shape[1]))
max_out = 1
min_out = 0
alldata = np.concatenate((input_data,reckon))
for i in range(0,input_data.shape[1]):
max_in[0,i] = np.nanmax(alldata[:,i])
min_in[0,i] = np.nanmin(alldata[:,i])
os.chdir(directory)
np.savetxt('max_in.txt', (max_in), delimiter=',')
np.savetxt('min_in.txt', (min_in), delimiter=',')
os.chdir('../')
for j in range(0,input_data.shape[1]):
if max_in[0,j] != 0:
input_data[:,j] = (input_data[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
for j in range(0,reckon.shape[1]):
if max_in[0,j] != 0:
reckon[:,j] = (reckon[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
#Resample data
m = 1
n = input_data.shape[0]
n_test = int(math.ceil(test_samples*n)) #TORNAR GENERICA ESSAS %%
n_val = int(math.ceil(val_samples*n))
n_train = int(n - n_test - n_val)
reg_set = np.arange(0,n)
T = np.zeros((n_train,1))
V = np.zeros((n_val,1))
flag_test = 0
B_ind = ((n-m)*np.random.rand(2*n_test,1)+m)
B_ind = [math.floor(x) for x in B_ind]
while flag_test == 0:
if len(np.unique(B_ind)) == n_test:
flag_test = 1
else:
del B_ind[0]
B_ind = np.unique(B_ind)
B_ind = B_ind.astype(int)
B = reg_set[B_ind]-1 #Test
reg_rest = np.setdiff1d(reg_set,B) #Dataset for TRAINING and VALIDATIONS
flag_val = 0
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
while flag_val == 0:
if len(np.unique(V_buff)) == n_val:
flag_val = 1
elif len(np.unique(V_buff)) > n_val:
del V_buff[0]
else:
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
V_buff = [int(x) for x in V_buff] #Transform to integer
V[:,0] = (reg_rest[np.unique(V_buff)])
T[:,0] = np.setdiff1d(reg_rest,V[:,0])
T = T.astype(np.int64) #Training
V = V.astype(np.int64) #Validations
B = B.astype(np.int64) #Test
p = input_data.shape[1]
input_test = np.zeros((B.shape[0],p))
output_test = np.zeros((B.shape[0],1))
input_train = np.zeros((T.shape[0],p))
output_train = np.zeros((T.shape[0],1))
input_val = np.zeros((B.shape[0],p))
output_val = np.zeros((T.shape[0],1))
n1 = T.shape[0]
n2 = B.shape[0]
n3 = V.shape[0]
for i in range(0,n1-1):
input_train[i,:] = input_data[T[i],:]
output_train[i,:] = output_data[T[i]]
for i in range(0,n2-1):
input_test[i,:] = input_data[B[i],:]
output_test[i,:] = output_data[B[i]]
for i in range(0,n3-1):
input_val[i,:] = input_data[V[i],:]
output_val[i,:] = output_data[V[i]]
output_data = output_data.reshape((output_data.size,1))
def ann_train(input_train,output_train,input_val,output_val,input_test,output_test,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
#Training
num_input = input_train.shape[1] #Number of parameters
reg_size = input_train.shape[0] #Number of examples
num_output = output_train.shape[1] #Number of outputs (in this case, just 1 - susceptibility)
weights = np.random.rand(num_input+1,num_hidden)
peights = np.random.rand(num_hidden+1,num_output)
biasH = np.ones((num_hidden,1))
biasO = np.ones((num_output,1))
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,nr_epochs))
erro_train = np.zeros((1,nr_epochs))
erro_validate = np.zeros((1,nr_epochs))
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
for epoch in range(0,nr_epochs):
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_train[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
#Backpropagation
#Uptade the weights in the output layer
for i in range(0,num_hidden):
for j in range(0,num_output):
if i < (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*H[i,0]
elif i == (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*biasO[j,0]
#Uptade the weights in the hidden layer
buff = 0
for j in range(0,num_hidden):
for i in range(0,num_input):
for k1 in range(0,num_output):
buff = buff + (output_train[k,k1] - output[k,k1])*output[k,k1]*(1-output[k,k1])*peights[j,k1]
if i < (num_input+1):
weights[i,j] = weights[i,j] + coef*buff*H[j,0]*(1-H[j,0])*input_train[k,i]
elif i == num_input+1:
weights[i,j] = weights[i,j] + coef*buff*H[j,i]*(1-H[j,0])*biasH[j,0]
buff = 0 #Zeroes the buffer variable
erro_train[0,epoch] = np.linalg.norm((output - output_train)/reg_size)
output_val,erro_val = ann_validate(input_val,output_val,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out)
erro_validate[0,epoch] = np.linalg.norm(erro_val)
#Collects the weights in the minimum validation error
if epoch == 0:
W = weights
P = peights
epoch_min = epoch
erro_min = 1000
elif erro_min > np.linalg.norm(erro_validate[0,epoch]):
W = weights
P = peights
epoch_min = epoch
erro_min = np.linalg.norm(erro_validate[0,epoch])
[output,erro_test,erro_round] = ann_test(input_test,output_test,weights,peights,biasH,biasO)
error = np.linalg.norm(erro_test)
rounded = np.asarray(erro_round).reshape((B.shape[0],1))
error_total = sum(abs(erro_round.T))
#Assign the minimum weights to their original names
weights = W
peights = P
return output,weights,peights,biasH,biasO,erro_train,erro_validate,epoch_min,erro_min,error,erro_test,error_total
#Validations
def ann_validate(input_data,output_data,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out):
#Collect dimensions
num_input = input_data.shape[1] #Number of parameters
reg_size = input_data.shape[0] #Number of records
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1 #Number of lines of weights matrix of the output layer - 1
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,reg_size))
def activation(x): #Sigmoid as activation function
fx = 1/(1+math.exp(-x))
return fx
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro[0,k] = output[k,:] - output_data[k,:]
return output,erro
#Test
def ann_test(input_data,output_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1
def activation(x): #activation function
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro_dec = np.zeros((1,reg_size))
erro_round = np.zeros((1,reg_size))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro_dec[0,k] = output[k,:] - output_data[k,:]
erro_round[0,k] = np.around(erro_dec[0,k])
return output,erro_dec,erro_round
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
num_hidden = len(hidden)
erro_buff = 9999999
for k1 in range(0,num_hidden): #hidden neurons tested
for k2 in range(0,trials): #initial conditions
[output,Weights,Peights,BiasH,BiasO,erro_train,erro_validate,epoch_min,erro_min,error,erro_test,error_total] = ann_train(input_train,output_train,input_val,output_val,input_test,output_test,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train)
gscript.message(_("Hidden neuron: "))
neuron_tested = hidden[k1]
gscript.message(neuron_tested)
gscript.message(_("Initial condition: "))
gscript.message(k2+1)
gscript.message(_("---------------------------------"))
if error < erro_buff:
erro_buff = error
#then save the data in variables
weights = Weights
peights = Peights
biasH = BiasH
biasO = BiasO
Erro_train = erro_train
Erro_val = erro_validate
Early_stop = np.array([epoch_min,erro_min])
Erro_test = erro_test
Erro_test_norm = erro_buff
neurons = hidden[k1]
Epoch_min = epoch_min
Erro_min = erro_min
Total_erro = error_total
gscript.message(_("Test set error from the best ANN: "))
gscript.message(Total_erro)
gscript.message(_("Best ANN hidden neurons: "))
gscript.message(neurons)
x = np.arange(1,nr_epochs+1).reshape((nr_epochs,1))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plt.subplot(1,2,1)
plt.plot(x,Erro_train.T,x,Erro_val.T,'g-', Epoch_min, Erro_min,'g*')
plt.xlabel('Epochs')
plt.ylabel('Root mean square output error')
plt.legend(('Training','Validation','Early stop'))
plt.subplot(1,2,2)
plt.bar(np.arange(1,(Erro_test.shape[1])+1),Erro_test.reshape(Erro_test.shape[1]))
plt.xlabel('Instances')
plt.ylabel('Error (ANN output - real output)')
os.chdir(directory)
plt.savefig('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
#Sensitivity analysis
def sensitivity(input_data,output_size,weights,peights,biasH,biasO):
input_size = input_data.shape[1] #Number of columns (parameters)
npts = 200 #Number of samples in the sensitivity evaluation set
sens_set = np.random.random_sample((npts,1)) #Return random floats in the half-open interval [0.0, 1.0)
fixed_par_value = np.empty([input_size,1])
ones_sens = np.ones((200,1))
#Calculation of the normalized mean value of each entry
for k in range(0,input_size):
fixed_par_value[k] = (np.mean(input_data[:,k]))
#Pre-allocation
input_sens = [[([1] * npts) for j in range(input_size)] for i in range(input_size)]
output_sens = [[([0] * npts) for j in range(input_size)] for i in range(input_size)]
input_sens = np.asarray(input_sens,dtype=float)
for k1 in range(0,input_size):
for k2 in range(0,input_size):
if k1 == k2:
input_sens[k1,k2,:] = sens_set.reshape(sens_set.shape[0]).T
else:
input_sens[k1,k2,:] = (ones_sens*fixed_par_value[k2]).reshape(ones_sens.shape[0])
for k1 in range(0,input_size):
input_sens2 = np.asarray(input_sens[k1]).T
output = ann_reckon(input_sens2,weights,peights,biasH,biasO)
output_sens[k1] = output
return sens_set,fixed_par_value,input_sens,output_sens
[sens_set,fixed_par_value,input_sens,output_sens] = sensitivity(input_data,output_data.shape[1],weights,peights,biasH,biasO)
for k in range(0,input_data.shape[1]):
plt.figure()
plt.plot(sens_set,output_sens[k],'.')
plt.title('Sensitivity analysis. Parameter: '+columnst[k])
plt.ylabel('Output response')
plt.xlabel('Parameter: '+columnst[k])
os.chdir(directory)
plt.savefig('SensitivityAnalysisVar_'+columnst[k], dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
os.chdir(directory)
np.savetxt('weights.txt', (weights), delimiter=',')
np.savetxt('peights.txt', (peights), delimiter=',')
np.savetxt('biasH.txt', (biasH), delimiter=',')
np.savetxt('biasO.txt', (biasO), delimiter=',')
os.mkdir('Inputs and outputs')
os.chdir('Inputs and outputs')
np.savetxt('Input_test.txt', (input_test), delimiter=',')
np.savetxt('Output_test.txt', (output_test), delimiter=',')
np.savetxt('Input_val.txt', (input_val), delimiter=',')
np.savetxt('Output_val.txt', (output_val), delimiter=',')
np.savetxt('Input_train.txt', (input_train), delimiter=',')
np.savetxt('Output_train.txt', (output_train), delimiter=',')
np.savetxt('Error_train.txt', (Erro_train), delimiter=',')
np.savetxt('Error_val.txt', (Erro_val), delimiter=',')
np.savetxt('Epoch_and_error_min.txt', (Early_stop), delimiter=',')
np.savetxt('Test_set_TOTAL_error.txt', (Total_erro), delimiter=',')
np.savetxt('Error_test.txt', (Erro_test), delimiter=',')
param = open('ANN_Parameters.txt','w')
param.write('Hidden neurons of best ANN: '+str(neurons))
param.write('\n Learning rate: '+str(coef))
param.write('\n Epochs: '+str(nr_epochs))
param.write('\n Hidden neurons tested: '+str(hidden))
param.write('\n Number of initial conditions tested: '+str(trials))
param.close()
os.chdir('../')
os.chdir('../')
#Reckon
if not flag_train:
output_reckon = ann_reckon(reckon,weights,peights,biasH,biasO)
a = np.reshape(output_reckon,(col,row),order='F')
a = np.transpose(a)
else:
a = 0
return a
1
https://gitee.com/Jaye_W1997/r.landslide.git
git@gitee.com:Jaye_W1997/r.landslide.git
Jaye_W1997
r.landslide
r.landslide
master

搜索帮助