Lucene.Net.Index.IndexWriter._MergeInit C# (CSharp) Method

_MergeInit() private method

private _MergeInit ( MergePolicy merge ) : void
merge MergePolicy
return void
		private void  _MergeInit(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				
				System.Diagnostics.Debug.Assert(TestPoint("startMergeInit"));
				
				System.Diagnostics.Debug.Assert(merge.registerDone);
				System.Diagnostics.Debug.Assert(!merge.optimize || merge.maxNumSegmentsOptimize > 0);
				
				if (hitOOM)
				{
					throw new System.SystemException("this writer hit an OutOfMemoryError; cannot merge");
				}
				
				if (merge.info != null)
				// mergeInit already done
					return ;
				
				if (merge.IsAborted())
					return ;
				
				ApplyDeletes();
				
				SegmentInfos sourceSegments = merge.segments;
				int end = sourceSegments.Count;
				
				// Check whether this merge will allow us to skip
				// merging the doc stores (stored field & vectors).
				// This is a very substantial optimization (saves tons
				// of IO).
				
				Directory lastDir = directory;
				System.String lastDocStoreSegment = null;
				int next = - 1;
				
				bool mergeDocStores = false;
				bool doFlushDocStore = false;
				System.String currentDocStoreSegment = docWriter.DocStoreSegment;
				
				// Test each segment to be merged: check if we need to
				// flush/merge doc stores
				for (int i = 0; i < end; i++)
				{
					SegmentInfo si = sourceSegments.Info(i);
					
					// If it has deletions we must merge the doc stores
					if (si.HasDeletions())
						mergeDocStores = true;
					
					// If it has its own (private) doc stores we must
					// merge the doc stores
					if (- 1 == si.DocStoreOffset)
						mergeDocStores = true;
					
					// If it has a different doc store segment than
					// previous segments, we must merge the doc stores
					System.String docStoreSegment = si.DocStoreSegment;
					if (docStoreSegment == null)
						mergeDocStores = true;
					else if (lastDocStoreSegment == null)
						lastDocStoreSegment = docStoreSegment;
					else if (!lastDocStoreSegment.Equals(docStoreSegment))
						mergeDocStores = true;
					
					// Segments' docScoreOffsets must be in-order,
					// contiguous.  For the default merge policy now
					// this will always be the case but for an arbitrary
					// merge policy this may not be the case
					if (- 1 == next)
						next = si.DocStoreOffset + si.docCount;
					else if (next != si.DocStoreOffset)
						mergeDocStores = true;
					else
						next = si.DocStoreOffset + si.docCount;
					
					// If the segment comes from a different directory
					// we must merge
					if (lastDir != si.dir)
						mergeDocStores = true;
					
					// If the segment is referencing the current "live"
					// doc store outputs then we must merge
					if (si.DocStoreOffset != - 1 && currentDocStoreSegment != null && si.DocStoreSegment.Equals(currentDocStoreSegment))
					{
						doFlushDocStore = true;
					}
				}

                // if a mergedSegmentWarmer is installed, we must merge
                // the doc stores because we will open a full
                // SegmentReader on the merged segment:
                if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.Equals(currentDocStoreSegment))
                {
                    mergeDocStores = true;
                }

				int docStoreOffset;
				System.String docStoreSegment2;
				bool docStoreIsCompoundFile;
				
				if (mergeDocStores)
				{
					docStoreOffset = - 1;
					docStoreSegment2 = null;
					docStoreIsCompoundFile = false;
				}
				else
				{
					SegmentInfo si = sourceSegments.Info(0);
					docStoreOffset = si.DocStoreOffset;
					docStoreSegment2 = si.DocStoreSegment;
					docStoreIsCompoundFile = si.DocStoreIsCompoundFile;
				}
				
				if (mergeDocStores && doFlushDocStore)
				{
					// SegmentMerger intends to merge the doc stores
					// (stored fields, vectors), and at least one of the
					// segments to be merged refers to the currently
					// live doc stores.
					
					// TODO: if we know we are about to merge away these
					// newly flushed doc store files then we should not
					// make compound file out of them...
					if (infoStream != null)
						Message("now flush at merge");
					DoFlush(true, false);
				}
				
				merge.mergeDocStores = mergeDocStores;
				
				// Bind a new segment name here so even with
				// ConcurrentMergePolicy we keep deterministic segment
				// names.
				merge.info = new SegmentInfo(NewSegmentName(), 0, directory, false, true, docStoreOffset, docStoreSegment2, docStoreIsCompoundFile, false);


                IDictionary<string, string> details = new Dictionary<string, string>();
				details["optimize"] = merge.optimize + "";
				details["mergeFactor"] = end + "";
				details["mergeDocStores"] = mergeDocStores + "";
				SetDiagnostics(merge.info, "merge", details);
				
				// Also enroll the merged segment into mergingSegments;
				// this prevents it from getting selected for a merge
				// after our merge is done but while we are building the
				// CFS:
                mergingSegments.Add(merge.info);
			}
		}
		
IndexWriter