private int MergeFields()
{
if (!mergeDocStores)
{
// When we are not merging by doc stores, their field
// name -> number mapping are the same. So, we start
// with the fieldInfos of the last segment in this
// case, to keep that numbering.
SegmentReader sr = (SegmentReader) readers[readers.Count - 1];
fieldInfos = (FieldInfos) sr.core.fieldInfos.Clone();
}
else
{
fieldInfos = new FieldInfos(); // merge field names
}
foreach(IndexReader reader in readers)
{
if (reader is SegmentReader)
{
SegmentReader segmentReader = (SegmentReader) reader;
FieldInfos readerFieldInfos = segmentReader.FieldInfos();
int numReaderFieldInfos = readerFieldInfos.Size();
for (int j = 0; j < numReaderFieldInfos; j++)
{
FieldInfo fi = readerFieldInfos.FieldInfo(j);
fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTermFreqAndPositions);
}
}
else
{
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.INDEXED), false, false, false, false, false);
fieldInfos.Add(reader.GetFieldNames(FieldOption.UNINDEXED), false);
}
}
fieldInfos.Write(directory, segment + ".fnm");
int docCount = 0;
SetMatchingSegmentReaders();
if (mergeDocStores)
{
// merge field values
FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);
try
{
int idx = 0;
foreach(IndexReader reader in readers)
{
SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
FieldsReader matchingFieldsReader = null;
if (matchingSegmentReader != null)
{
FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
if (fieldsReader != null && fieldsReader.CanReadRawDocs())
{
matchingFieldsReader = fieldsReader;
}
}
if (reader.HasDeletions)
{
docCount += CopyFieldsWithDeletions(fieldsWriter, reader, matchingFieldsReader);
}
else
{
docCount += CopyFieldsNoDeletions(fieldsWriter, reader, matchingFieldsReader);
}
}
}
finally
{
fieldsWriter.Dispose();
}
System.String fileName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
long fdxFileLength = directory.FileLength(fileName);
if (4 + ((long) docCount) * 8 != fdxFileLength)
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
// we detect that the bug has struck, here, and
// throw an exception to prevent the corruption from
// entering the index. See LUCENE-1282 for
// details.
throw new System.SystemException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
}
// If we are skipping the doc stores, that means there
// are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount
else
{
foreach(IndexReader reader in readers)
{
docCount += reader.NumDocs();
}
}
return docCount;
}