Lucene.Net.Index.IndexWriter.DoFlushInternal C# (CSharp) Method

DoFlushInternal() private method

private DoFlushInternal ( bool flushDocStores, bool flushDeletes ) : bool
flushDocStores bool
flushDeletes bool
return bool
		private bool DoFlushInternal(bool flushDocStores, bool flushDeletes)
		{
			lock (this)
			{
				if (hitOOM)
				{
					throw new System.SystemException("this writer hit an OutOfMemoryError; cannot flush");
				}
				
				EnsureOpen(false);
				
				System.Diagnostics.Debug.Assert(TestPoint("startDoFlush"));

                DoBeforeFlush();
				
				flushCount++;
				
				// If we are flushing because too many deletes
				// accumulated, then we should apply the deletes to free
				// RAM:
				flushDeletes |= docWriter.DoApplyDeletes();
				
				// Make sure no threads are actively adding a document.
				// Returns true if docWriter is currently aborting, in
				// which case we skip flushing this segment
                if (infoStream != null)
                {
                    Message("flush: now pause all indexing threads");
                }
				if (docWriter.PauseAllThreads())
				{
					docWriter.ResumeAllThreads();
					return false;
				}
				
				try
				{
					
					SegmentInfo newSegment = null;
					
					int numDocs = docWriter.NumDocsInRAM;
					
					// Always flush docs if there are any
					bool flushDocs = numDocs > 0;
					
					System.String docStoreSegment = docWriter.DocStoreSegment;

                    System.Diagnostics.Debug.Assert(docStoreSegment != null || numDocs == 0, "dss=" + docStoreSegment + " numDocs=" + numDocs);
					
					if (docStoreSegment == null)
						flushDocStores = false;
					
					int docStoreOffset = docWriter.DocStoreOffset;
					
					bool docStoreIsCompoundFile = false;
					
					if (infoStream != null)
					{
						Message("  flush: segment=" + docWriter.Segment + " docStoreSegment=" + docWriter.DocStoreSegment + " docStoreOffset=" + docStoreOffset + " flushDocs=" + flushDocs + " flushDeletes=" + flushDeletes + " flushDocStores=" + flushDocStores + " numDocs=" + numDocs + " numBufDelTerms=" + docWriter.GetNumBufferedDeleteTerms());
						Message("  index before flush " + SegString());
					}
					
					// Check if the doc stores must be separately flushed
					// because other segments, besides the one we are about
					// to flush, reference it
					if (flushDocStores && (!flushDocs || !docWriter.Segment.Equals(docWriter.DocStoreSegment)))
					{
						// We must separately flush the doc store
						if (infoStream != null)
							Message("  flush shared docStore segment " + docStoreSegment);
						
						docStoreIsCompoundFile = FlushDocStores();
						flushDocStores = false;
					}
					
					System.String segment = docWriter.Segment;
					
					// If we are flushing docs, segment must not be null:
					System.Diagnostics.Debug.Assert(segment != null || !flushDocs);
					
					if (flushDocs)
					{
						
						bool success = false;
						int flushedDocCount;
						
						try
						{
							flushedDocCount = docWriter.Flush(flushDocStores);
                            if (infoStream != null)
                            {
                                Message("flushedFiles=" + docWriter.GetFlushedFiles());
                            }
							success = true;
						}
						finally
						{
							if (!success)
							{
								if (infoStream != null)
									Message("hit exception flushing segment " + segment);
								deleter.Refresh(segment);
							}
						}
						
						if (0 == docStoreOffset && flushDocStores)
						{
							// This means we are flushing private doc stores
							// with this segment, so it will not be shared
							// with other segments
							System.Diagnostics.Debug.Assert(docStoreSegment != null);
							System.Diagnostics.Debug.Assert(docStoreSegment.Equals(segment));
							docStoreOffset = - 1;
							docStoreIsCompoundFile = false;
							docStoreSegment = null;
						}
						
						// Create new SegmentInfo, but do not add to our
						// segmentInfos until deletes are flushed
						// successfully.
						newSegment = new SegmentInfo(segment, flushedDocCount, directory, false, true, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, docWriter.HasProx());
						SetDiagnostics(newSegment, "flush");
					}
					
					docWriter.PushDeletes();
					
					if (flushDocs)
					{
						segmentInfos.Add(newSegment);
						Checkpoint();
					}
					
					if (flushDocs && mergePolicy.UseCompoundFile(segmentInfos, newSegment))
					{
						// Now build compound file
						bool success = false;
						try
						{
							docWriter.CreateCompoundFile(segment);
							success = true;
						}
						finally
						{
							if (!success)
							{
								if (infoStream != null)
									Message("hit exception creating compound file for newly flushed segment " + segment);
								deleter.DeleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
							}
						}
						
						newSegment.SetUseCompoundFile(true);
						Checkpoint();
					}
					
					if (flushDeletes)
					{
						ApplyDeletes();
					}
					
					if (flushDocs)
						Checkpoint();
					
					DoAfterFlush();
					
					return flushDocs;
				}
				catch (System.OutOfMemoryException oom)
				{
					HandleOOM(oom, "doFlush");
					// never hit
					return false;
				}
				finally
				{
					docWriter.ResumeAllThreads();
				}
			}
		}
		
IndexWriter