public void testFeedForward()
{
// example 11.14 of Neural Network Design by Hagan, Demuth and Beale
// lots of tedious tests necessary to ensure nn is fundamentally correct
Matrix weightMatrix1 = new Matrix(2, 1);
weightMatrix1.set(0, 0, -0.27);
weightMatrix1.set(1, 0, -0.41);
Vector biasVector1 = new Vector(2);
biasVector1.setValue(0, -0.48);
biasVector1.setValue(1, -0.13);
Layer layer1 = new Layer(weightMatrix1, biasVector1,
new LogSigActivationFunction());
Vector inputVector1 = new Vector(1);
inputVector1.setValue(0, 1);
Vector expected = new Vector(2);
expected.setValue(0, 0.321);
expected.setValue(1, 0.368);
Vector result1 = layer1.feedForward(inputVector1);
Assert.AreEqual(expected.getValue(0), result1.getValue(0), 0.001);
Assert.AreEqual(expected.getValue(1), result1.getValue(1), 0.001);
Matrix weightMatrix2 = new Matrix(1, 2);
weightMatrix2.set(0, 0, 0.09);
weightMatrix2.set(0, 1, -0.17);
Vector biasVector2 = new Vector(1);
biasVector2.setValue(0, 0.48);
Layer layer2 = new Layer(weightMatrix2, biasVector2,
new PureLinearActivationFunction());
Vector inputVector2 = layer1.getLastActivationValues();
Vector result2 = layer2.feedForward(inputVector2);
Assert.AreEqual(0.446, result2.getValue(0), 0.001);
}