public void T04_SingleSegmentRootMetadataLogRecovery()
{
// TEST: test multiple segments flushed, and "log resumed" (walk .ROOT range map)
// perform the previous test
T03_SegmentLayerGetRecordApplicationOrder();
// ... and then perform a resume
LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");
String[] keys = { "test-1", "test-2", "test-3" };
String[] values = { "a-second", "b-second", "c-second" };
// verify that it has the same data as before the RESUME
{
// working segment should be empty
for (int i = 0; i < keys.Length; i++) {
RecordKey key = new RecordKey();
key.appendKeyPart(keys[i]);
// look directly in the working segment, they should be MISSING
// This is testing the checkpoint as well. If log resume didn't
// CHECKPOINT_DROP, then the values will be duplicated in the working segment.
{
RecordUpdate update;
GetStatus status =
db.workingSegment.getRecordUpdate(key, out update);
Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
}
// assure the global query interface finds the NEW VALUES
{
RecordData data;
GetStatus status = db.getRecord(key, out data);
Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
}
}
// now generate a BUNCH of new segments...
{
String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" };
String[] secondvalues = { "a-second", "b-second", "c-second" };
// put each new record in its OWN segment
for (int i = 0; i < secondkeys.Length; i++) {
LayerWriteGroup txn = db.newWriteGroup();
txn.setValueParsed(secondkeys[i], secondvalues[i]);
txn.finish();
db.flushWorkingSegment();
}
db.Dispose();
db.debugDump();
// RESUME
db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");
// first test records should still be visible
for (int i = 0; i < keys.Length; i++) {
RecordKey key = new RecordKey();
key.appendKeyPart(keys[i]);
// look directly in the working segment, they should be MISSING
// This is testing the checkpoint as well. If log resume didn't
// CHECKPOINT_DROP, then the values will be duplicated in the working segment.
{
RecordUpdate update;
GetStatus status =
db.workingSegment.getRecordUpdate(key, out update);
Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key);
}
// assure the global query interface finds the NEW VALUES
{
RecordData data;
GetStatus status = db.getRecord(key, out data);
Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key);
Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key);
}
}
db.debugDump();
// verify that the secondkeys/values are still in there
for (int i = 0; i < secondkeys.Length; i++) {
RecordKey key = new RecordKey();
key.appendKeyPart(secondkeys[i]);
// look directly in the working segment, they should be MISSING
// This is testing the checkpoint as well. If log resume didn't
// CHECKPOINT_DROP, then the values will be duplicated in the working segment.
{
RecordUpdate update;
GetStatus status =
db.workingSegment.getRecordUpdate(key, out update);
Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
}
// assure the global query interface finds the NEW VALUES
{
RecordData data;
GetStatus status = db.getRecord(key, out data);
Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key);
Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
}
}
}
db.Dispose();
}
}