public void PredictTest2()
{
// Create continuous sequences. In the sequence below, there
// seems to be two states, one for values equal to 1 and another
// for values equal to 2.
double[][] sequences = new double[][]
{
new double[] { 1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2 }
};
// Specify a initial normal distribution for the samples.
NormalDistribution density = new NormalDistribution();
// Creates a continuous hidden Markov Model with two states organized in a forward
// topology and an underlying univariate Normal distribution as probability density.
var model = new HiddenMarkovModel<NormalDistribution>(new Ergodic(2), density);
// Configure the learning algorithms to train the sequence classifier until the
// difference in the average log-likelihood changes only by as little as 0.0001
var teacher = new BaumWelchLearning<NormalDistribution>(model)
{
Tolerance = 0.0001,
Iterations = 0,
// However, we will need to specify a regularization constant as the
// variance of each state will likely be zero (all values are equal)
FittingOptions = new NormalOptions() { Regularization = double.Epsilon }
};
// Fit the model
double likelihood = teacher.Run(sequences);
double a1 = model.Predict(new double[] { 1, 2, 1 });
double a2 = model.Predict(new double[] { 1, 2, 1, 2 });
Assert.AreEqual(2, a1, 1e-10);
Assert.AreEqual(1, a2, 1e-10);
Assert.IsFalse(Double.IsNaN(a1));
Assert.IsFalse(Double.IsNaN(a2));
double p1, p2;
Mixture<NormalDistribution> d1, d2;
double b1 = model.Predict(new double[] { 1, 2, 1 }, out p1, out d1);
double b2 = model.Predict(new double[] { 1, 2, 1, 2 }, out p2, out d2);
Assert.AreEqual(2, b1, 1e-10);
Assert.AreEqual(1, b2, 1e-10);
Assert.IsFalse(Double.IsNaN(b1));
Assert.IsFalse(Double.IsNaN(b2));
Assert.AreEqual(0, d1.Coefficients[0]);
Assert.AreEqual(1, d1.Coefficients[1]);
Assert.AreEqual(1, d2.Coefficients[0]);
Assert.AreEqual(0, d2.Coefficients[1]);
}