public virtual void AddIndexes(params IndexReader[] readers)
{
EnsureOpen();
// Do not allow add docs or deletes while we are running:
docWriter.PauseAllThreads();
// We must pre-acquire a read lock here (and upgrade to
// write lock in startTransaction below) so that no
// other addIndexes is allowed to start up after we have
// flushed & optimized but before we then start our
// transaction. This is because the merging below
// requires that only one segment is present in the
// index:
AcquireRead();
try
{
SegmentInfo info = null;
System.String mergedName = null;
SegmentMerger merger = null;
bool success = false;
try
{
Flush(true, false, true);
Optimize(); // start with zero or 1 seg
success = true;
}
finally
{
// Take care to release the read lock if we hit an
// exception before starting the transaction
if (!success)
ReleaseRead();
}
// true means we already have a read lock; if this
// call hits an exception it will release the write
// lock:
StartTransaction(true);
try
{
mergedName = NewSegmentName();
merger = new SegmentMerger(this, mergedName, null);
SegmentReader sReader = null;
lock (this)
{
if (segmentInfos.Count == 1)
{
// add existing index, if any
sReader = readerPool.Get(segmentInfos.Info(0), true, BufferedIndexInput.BUFFER_SIZE, - 1);
}
}
success = false;
try
{
if (sReader != null)
merger.Add(sReader);
for (int i = 0; i < readers.Length; i++)
// add new indexes
merger.Add(readers[i]);
int docCount = merger.Merge(); // merge 'em
lock (this)
{
segmentInfos.Clear(); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true, - 1, null, false, merger.HasProx());
SetDiagnostics(info, "addIndexes(params IndexReader[])");
segmentInfos.Add(info);
}
// Notify DocumentsWriter that the flushed count just increased
docWriter.UpdateFlushedDocCount(docCount);
success = true;
}
finally
{
if (sReader != null)
{
readerPool.Release(sReader);
}
}
}
finally
{
if (!success)
{
if (infoStream != null)
Message("hit exception in addIndexes during merge");
RollbackTransaction();
}
else
{
CommitTransaction();
}
}
if (mergePolicy is LogMergePolicy && UseCompoundFile)
{
IList<string> files = null;
lock (this)
{
// Must incRef our files so that if another thread
// is running merge/optimize, it doesn't delete our
// segment's files before we have a change to
// finish making the compound file.
if (segmentInfos.Contains(info))
{
files = info.Files();
deleter.IncRef(files);
}
}
if (files != null)
{
success = false;
StartTransaction(false);
try
{
merger.CreateCompoundFile(mergedName + ".cfs");
lock (this)
{
info.SetUseCompoundFile(true);
}
success = true;
}
finally
{
lock (this)
{
deleter.DecRef(files);
}
if (!success)
{
if (infoStream != null)
Message("hit exception building compound file in addIndexes during merge");
RollbackTransaction();
}
else
{
CommitTransaction();
}
}
}
}
}
catch (System.OutOfMemoryException oom)
{
HandleOOM(oom, "addIndexes(params IndexReader[])");
}
finally
{
if (docWriter != null)
{
docWriter.ResumeAllThreads();
}
}
}