1 Star 4 Fork 0

斗大的熊猫 / brain

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
neuralnetwork.cpp 4.32 KB
一键复制 编辑 原始数据 按行查看 历史
tianshuai 提交于 2015-07-29 17:49 . net
//
// neuralnetwork.cpp
// neuralnetwork
//
// Created by tianshuai on 7/13/15.
//
#include "neuralnetwork.h"
float Node::alpha = 0.40;
float Node::eta = 0.10;
Node::Node(int OutCount, int idx)
{
for(int i = 0; i < OutCount; ++i)
{
Link con;
con.DerivWeight = 0;
con.weight = rand0to1();
OutWeights.push_back(con);
}
index = idx;
}
void Node::FeedFwd(const std::vector<Node>& prevLevel)
{
double v = 0.0;
for(int n = 0; n < prevLevel.size(); ++n)
{
v += prevLevel[n].getOutValue() * prevLevel[n].OutWeights[index].weight;
}
setOut(TransFunc(v));
}
void Node::calcOutGradients(double GoalValue)
{
double delta = GoalValue - OutValue;
gradient = delta * TransFuncDer(OutValue);
}
double Node::sumDerivWeights(const std::vector<Node>& nextLevel) const
{
/* Sum errors of the nodes */
double DerivWeights = 0;
for(int i = 0; i < nextLevel.size() - 1; ++i)
{
DerivWeights += OutWeights[i].weight * nextLevel[i].gradient;
}
return DerivWeights;
}
void Node::calcHiddenGradients(const std::vector<Node>& nextLevel)
{
double DerivWeights = sumDerivWeights(nextLevel);
gradient = DerivWeights * TransFuncDer(OutValue);
}
void Node::updateInWeights(std::vector<Node>& prevLevel)
{
for(int i = 0; i < prevLevel.size(); ++i)
{
Node& Node = prevLevel[i];
double oldDerivWeight = Node.OutWeights[index].DerivWeight;
double newDerivWeight = eta * Node.OutValue * gradient + alpha * oldDerivWeight;
Node.OutWeights[index].DerivWeight = newDerivWeight;
Node.OutWeights[index].weight += newDerivWeight;
}
}
NeuralNetwork::NeuralNetwork(const Topology& Topol)
{
int levelCount = Topol.size();
for(int levelNum = 0; levelNum < levelCount; ++levelNum)
{
levels.push_back(Level());
int OutCount = levelNum == levelCount - 1 ? 0 : Topol[levelNum+1];
/* question mark means 0 if else */
Level& currentLevel = levels.back();
for(int n = 0; n <= Topol[levelNum]; ++n)i
{
currentLevel.push_back(Node(OutCount, n));
}
currentLevel.back().setOut(1.0);
}
srand(time(NULL));
}
void NeuralNetwork::FeedForward(const Value& InVals)
{
for(int i = 0; i < InVals.size(); ++i)
{
levels[0][i].setOut(InVals[i]);
}
for(int levelNum = 1; levelNum < levels.size(); ++levelNum)
{
Level& level = levels[levelNum];
const Level& lastLevel = levels[levelNum - 1];
for(int n = 0; n < level.size() - 1; ++n)
{
level[n].FeedFwd(lastLevel);
}
}
}
void NeuralNetwork::BackPropagation(const Value& Goal)
{
/* Calc RMS error */
Level& OutLevel = levels.back();
error = 0.0;/* initialize */
for(int i = 0; i < Goal.size(); ++i)
{
double delta = Goal[i] - OutLevel[i].getOutValue();
error += delta * delta;
}
error = std::sqrt(error / Goal.size());
/* current error */
DisplayError = (DisplayError * DisplaySmoothingFactor + error) / (DisplaySmoothingFactor + 1.0);
/* output gradient */
for(int i = 0; i < OutLevel.size() - 1; ++i)
{
OutLevel[i].calcOutGradients(Goal[i]);
}
/* hidden gradients */
for(int i = levels.size() - 2; i > 0; --i)
{
Level& level = levels[i];
Level& nextLevel = levels[i+1];
for(int j = 0; j < level.size(); ++j)
{
level[j].calcHiddenGradients(nextLevel);
}
}
/* Update weights */
for(int i = levels.size() - 1; i > 0; i--)
{
Level& level = levels[i];
Level& prevLevel = levels[i-1];
for(int j = 0; j < level.size() - 1; ++j)
{
level[j].updateInWeights(prevLevel);
}
}
}
void NeuralNetwork::getOutput(Value& results) const
{
results.clear();
const Level& OutLevel = levels.back();
for(int i = 0; i < OutLevel.size() - 1; ++i)
{
results.push_back(OutLevel[i].getOutValue());
}
}
void NeuralNetwork::train(Value&& In, Value&& Goal)
{
FeedForward(In);
BackPropagation(Goal);
}
Value NeuralNetwork::run(Value&& In)
{
FeedForward(In);
Value r;
getOutput(r);
return r;
}
C++
1
https://gitee.com/androidsourcecode/brain.git
git@gitee.com:androidsourcecode/brain.git
androidsourcecode
brain
brain
master

搜索帮助