/// <summary>
/// Merges the provided indexes into this index.
///
/// <p>
/// The provided IndexReaders are not closed.
///
/// <p>
/// See <seealso cref="#addIndexes"/> for details on transactional semantics, temporary
/// free space required in the Directory, and non-CFS segments on an Exception.
///
/// <p>
/// <b>NOTE</b>: if this method hits an OutOfMemoryError you should immediately
/// close the writer. See <a href="#OOME">above</a> for details.
///
/// <p>
/// <b>NOTE:</b> empty segments are dropped by this method and not added to this
/// index.
///
/// <p>
/// <b>NOTE:</b> this method merges all given <seealso cref="IndexReader"/>s in one
/// merge. If you intend to merge a large number of readers, it may be better
/// to call this method multiple times, each time with a small set of readers.
/// In principle, if you use a merge policy with a {@code mergeFactor} or
/// {@code maxMergeAtOnce} parameter, you should pass that many readers in one
/// call. Also, if the given readers are <seealso cref="DirectoryReader"/>s, they can be
/// opened with {@code termIndexInterval=-1} to save RAM, since during merge
/// the in-memory structure is not used. See
/// <seealso cref="DirectoryReader#open(Directory, int)"/>.
///
/// <p>
/// <b>NOTE</b>: if you call <seealso cref="#close(boolean)"/> with <tt>false</tt>, which
/// aborts all running merges, then any thread still running this method might
/// hit a <seealso cref="MergePolicy.MergeAbortedException"/>.
/// </summary>
/// <exception cref="CorruptIndexException">
/// if the index is corrupt </exception>
/// <exception cref="IOException">
/// if there is a low-level IO error </exception>
public virtual void AddIndexes(params IndexReader[] readers)
{
EnsureOpen();
int numDocs = 0;
try
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "flush at addIndexes(IndexReader...)");
}
Flush(false, true);
string mergedName = NewSegmentName();
IList<AtomicReader> mergeReaders = new List<AtomicReader>();
foreach (IndexReader indexReader in readers)
{
numDocs += indexReader.NumDocs();
foreach (AtomicReaderContext ctx in indexReader.Leaves())
{
mergeReaders.Add(ctx.AtomicReader);
}
}
IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));
// TODO: somehow we should fix this merge so it's
// abortable so that IW.close(false) is able to stop it
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1, false, Codec, null);
SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, Config_Renamed.TermIndexInterval, MergeState.CheckAbort.NONE, GlobalFieldNumberMap, context, Config_Renamed.CheckIntegrityAtMerge);
if (!merger.ShouldMerge())
{
return;
}
MergeState mergeState;
bool success = false;
try
{
mergeState = merger.Merge(); // merge 'em
success = true;
}
finally
{
if (!success)
{
lock (this)
{
Deleter.Refresh(info.Name);
}
}
}
SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L);
info.Files = new HashSet<string>(trackingDir.CreatedFiles);
trackingDir.CreatedFiles.Clear();
SetDiagnostics(info, SOURCE_ADDINDEXES_READERS);
bool useCompoundFile;
lock (this) // Guard segmentInfos
{
if (StopMerges)
{
Deleter.DeleteNewFiles(infoPerCommit.Files());
return;
}
EnsureOpen();
useCompoundFile = mergePolicy.UseCompoundFile(segmentInfos, infoPerCommit);
}
// Now create the compound file if needed
if (useCompoundFile)
{
ICollection<string> filesToDelete = infoPerCommit.Files();
try
{
CreateCompoundFile(infoStream, directory, MergeState.CheckAbort.NONE, info, context);
}
finally
{
// delete new non cfs files directly: they were never
// registered with IFD
lock (this)
{
Deleter.DeleteNewFiles(filesToDelete);
}
}
info.UseCompoundFile = true;
}
// Have codec write SegmentInfo. Must do this after
// creating CFS so that 1) .si isn't slurped into CFS,
// and 2) .si reflects useCompoundFile=true change
// above:
success = false;
try
{
Codec.SegmentInfoFormat().SegmentInfoWriter.Write(trackingDir, info, mergeState.FieldInfos, context);
success = true;
}
finally
{
if (!success)
{
lock (this)
{
Deleter.Refresh(info.Name);
}
}
}
info.AddFiles(trackingDir.CreatedFiles);
// Register the new segment
lock (this)
{
if (StopMerges)
{
Deleter.DeleteNewFiles(info.Files);
return;
}
EnsureOpen();
segmentInfos.Add(infoPerCommit);
Checkpoint();
}
}
catch (System.OutOfMemoryException oom)
{
HandleOOM(oom, "addIndexes(IndexReader...)");
}
}