19 using System.Collections.Generic;
 
   21 using Lucene.Net.Support;
 
   23 using Document = Lucene.Net.Documents.Document;
 
   28 using Lock = Lucene.Net.Store.Lock;
 
   31 using Query = Lucene.Net.Search.Query;
 
   34 namespace Lucene.Net.Index
 
  162         private void  InitBlock()
 
  166             readerPool = 
new ReaderPool(
this);
 
  172         public static long WRITE_LOCK_TIMEOUT = 1000;
 
  174         private long writeLockTimeout = WRITE_LOCK_TIMEOUT;
 
  177         public const System.String WRITE_LOCK_NAME = 
"write.lock";
 
  180         public const int DISABLE_AUTO_FLUSH = - 1;
 
  185         public static readonly 
int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
 
  190         public const double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
 
  195         public static readonly 
int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
 
  198         public const int DEFAULT_MAX_FIELD_LENGTH = 10000;
 
  201         public const int DEFAULT_TERM_INDEX_INTERVAL = 128;
 
  216         private const int MERGE_READ_BUFFER_SIZE = 4096;
 
  219         private static System.Object MESSAGE_ID_LOCK = 
new System.Object();
 
  220         private static int MESSAGE_ID = 0;
 
  221         private int messageID = - 1;
 
  222         private volatile bool hitOOM;
 
  229         private volatile uint changeCount; 
 
  230         private long lastCommitChangeCount; 
 
  233         private HashMap<SegmentInfo, int?> rollbackSegments;
 
  236         internal volatile uint pendingCommitChangeCount;
 
  239         private int localFlushedDocCount; 
 
  242         private int optimizeMaxNumSegments;
 
  247         private ISet<SegmentInfo> segmentsToOptimize = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<
SegmentInfo>(); 
 
  249         private Lock writeLock;
 
  251         private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
 
  254         private bool closing;
 
  258         private HashSet<SegmentInfo> mergingSegments = 
new HashSet<SegmentInfo>();
 
  263         private ISet<
MergePolicy.OneMerge> runningMerges = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<
MergePolicy.OneMerge>();
 
  265         private long mergeGen;
 
  266         private bool stopMerges;
 
  268         private int flushCount;
 
  269         private int flushDeletesCount;
 
  273         private int readCount; 
 
  275         internal ReaderPool readerPool;
 
  276         private int upgradeCount;
 
  278         private int readerTermsIndexDivisor = 
IndexReader.DEFAULT_TERMS_INDEX_DIVISOR;
 
  289         private volatile bool poolReaders;
 
  353             return GetReader(readerTermsIndexDivisor);
 
  375             if (infoStream != null)
 
  377                 Message(
"flush at getReader");
 
  391                 Flush(
false, 
true, 
true);
 
  406         internal class ReaderPool : IDisposable
 
  410                 InitBlock(enclosingInstance);
 
  412             private void  InitBlock(
IndexWriter enclosingInstance)
 
  414                 this.enclosingInstance = enclosingInstance;
 
  421                     return enclosingInstance;
 
  426             private IDictionary<SegmentInfo, SegmentReader> readerMap = 
new HashMap<SegmentInfo, SegmentReader>();
 
  431             internal virtual void  Clear(SegmentInfos infos)
 
  437                         foreach(KeyValuePair<SegmentInfo, SegmentReader> ent 
in readerMap)
 
  439                             ent.Value.hasChanges = 
false;
 
  444                         foreach(SegmentInfo info 
in infos)
 
  446                             if (readerMap.ContainsKey(info))
 
  448                                 readerMap[info].hasChanges = 
false;
 
  456             public virtual bool InfoIsLive(SegmentInfo info)
 
  460                     int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
 
  461                     System.Diagnostics.Debug.Assert(idx != -1);
 
  462                     System.Diagnostics.Debug.Assert(Enclosing_Instance.segmentInfos[idx] == info);
 
  467             public virtual SegmentInfo MapToLive(SegmentInfo info)
 
  471                     int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
 
  474                         info = Enclosing_Instance.segmentInfos[idx];
 
  486             public virtual void  Release(SegmentReader sr)
 
  501             public virtual void  Release(SegmentReader sr, 
bool drop)
 
  506                     bool pooled = readerMap.ContainsKey(sr.SegmentInfo);
 
  508                     System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.SegmentInfo] == sr);
 
  514                     if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.RefCount == 1)))
 
  526                         sr.hasChanges &= !drop;
 
  528                         bool hasChanges = sr.hasChanges;
 
  536                         readerMap.Remove(sr.SegmentInfo);
 
  543                             enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, 
false);
 
  552             public void Dispose()
 
  557             protected void Dispose(
bool disposing)
 
  568                         foreach (var ent 
in readerMap)
 
  570                             SegmentReader sr = ent.Value;
 
  573                                 System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
 
  578                                 enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, 
false);
 
  599             internal virtual void  Commit()
 
  606                     foreach(KeyValuePair<SegmentInfo,SegmentReader> ent 
in readerMap)
 
  608                         SegmentReader sr = ent.Value;
 
  611                             System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
 
  616                             enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, 
false);
 
  626             public virtual SegmentReader GetReadOnlyClone(SegmentInfo info, 
bool doOpenStores, 
int termInfosIndexDivisor)
 
  630                     SegmentReader sr = Get(info, doOpenStores, 
BufferedIndexInput.BUFFER_SIZE, termInfosIndexDivisor);
 
  633                         return (SegmentReader) sr.Clone(
true);
 
  652             public virtual SegmentReader Get(SegmentInfo info, 
bool doOpenStores)
 
  656                     return Get(info, doOpenStores, 
BufferedIndexInput.BUFFER_SIZE, enclosingInstance.readerTermsIndexDivisor);
 
  674             public virtual SegmentReader Get(SegmentInfo info, 
bool doOpenStores, 
int readBufferSize, 
int termsIndexDivisor)
 
  678                     if (Enclosing_Instance.poolReaders)
 
  683                     SegmentReader sr = readerMap[info];
 
  689                         sr = SegmentReader.Get(
false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
 
  690                         if (info.dir == enclosingInstance.directory)
 
  702                         if (termsIndexDivisor != - 1 && !sr.TermsIndexLoaded())
 
  710                             sr.LoadTermsIndex(termsIndexDivisor);
 
  715                     if (info.dir == enclosingInstance.directory)
 
  725             public virtual SegmentReader GetIfExists(SegmentInfo info)
 
  729                     SegmentReader sr = readerMap[info];
 
  761                     readerPool.Release(reader);
 
  766         internal virtual void  AcquireWrite()
 
  771                 while (writeThread != null || readCount > 0)
 
  781         internal virtual void  ReleaseWrite()
 
  787                 System.Threading.Monitor.PulseAll(
this);
 
  791         internal virtual void  AcquireRead()
 
  796                 while (writeThread != null && writeThread != current)
 
  806         internal virtual void  UpgradeReadToWrite()
 
  810                 System.Diagnostics.Debug.Assert(readCount > 0);
 
  812                 while (readCount > upgradeCount || writeThread != null)
 
  823         internal virtual void  ReleaseRead()
 
  828                 System.Diagnostics.Debug.Assert(readCount >= 0);
 
  829                 System.Threading.Monitor.PulseAll(
this);
 
  833         internal bool IsOpen(
bool includePendingClose)
 
  837                 return !(closed || (includePendingClose && closing));
 
  846         protected internal void  EnsureOpen(
bool includePendingClose)
 
  850                 if (!IsOpen(includePendingClose))
 
  857         protected internal void  EnsureOpen()
 
  869         public virtual void  Message(System.String message)
 
  871             if (infoStream != null)
 
  872                 infoStream.WriteLine(
"IW " + messageID + 
" [" + DateTime.Now.ToString() + 
"; " + 
ThreadClass.
Current().
Name + 
"]: " + message);
 
  875         private void  SetMessageID(System.IO.StreamWriter infoStream)
 
  879                 if (infoStream != null && messageID == - 1)
 
  881                     lock (MESSAGE_ID_LOCK)
 
  883                         messageID = MESSAGE_ID++;
 
  886                 this.infoStream = infoStream;
 
  893         private LogMergePolicy LogMergePolicy
 
  897                 if (mergePolicy is LogMergePolicy)
 
  898                     return (LogMergePolicy) mergePolicy;
 
  900                 throw new System.ArgumentException(
 
  901                     "this method can only be called when the merge policy is the default LogMergePolicy");
 
  918         public virtual bool UseCompoundFile
 
  920             get { 
return LogMergePolicy.GetUseCompoundFile(); }
 
  923                 LogMergePolicy.SetUseCompoundFile(value);
 
  924                 LogMergePolicy.SetUseCompoundDocStore(value);
 
  933             this.similarity = similarity;
 
  934             docWriter.SetSimilarity(similarity);
 
  946                 return this.similarity;
 
  973         public virtual int TermIndexInterval
 
  979                 return termIndexInterval;
 
  984                 this.termIndexInterval = value;
 
 1018             Init(d, a, create, null, mfl.
Limit, null, null);
 
 1045             Init(d, a, null, mfl.
Limit, null, null);
 
 1074             Init(d, a, deletionPolicy, mfl.
Limit, null, null);
 
 1110             Init(d, a, create, deletionPolicy, mfl.
Limit, null, null);
 
 1152             Init(d, a, create, deletionPolicy, mfl.Limit, indexingChain, commit);
 
 1196             Init(d, a, 
false, deletionPolicy, mfl.
Limit, null, commit);
 
 1203                 Init(d, a, 
false, deletionPolicy, maxFieldLength, indexingChain, commit);
 
 1207                 Init(d, a, 
true, deletionPolicy, maxFieldLength, indexingChain, commit);
 
 1211         private void  Init(
Directory d, 
Analyzer a, 
bool create, IndexDeletionPolicy deletionPolicy, 
int maxFieldLength, 
IndexingChain indexingChain, IndexCommit commit)
 
 1215             SetMessageID(defaultInfoStream);
 
 1216             this.maxFieldLength = maxFieldLength;
 
 1218             if (indexingChain == null)
 
 1219                 indexingChain = DocumentsWriter.DefaultIndexingChain;
 
 1224                 directory.ClearLock(WRITE_LOCK_NAME);
 
 1227             Lock writeLock = directory.MakeLock(WRITE_LOCK_NAME);
 
 1228             if (!writeLock.Obtain(writeLockTimeout))
 
 1233             this.writeLock = writeLock; 
 
 1235             bool success = 
false;
 
 1247                         segmentInfos.Read(directory);
 
 1248                         segmentInfos.Clear();
 
 1251                     catch (System.IO.IOException)
 
 1261                         segmentInfos.Commit(directory);
 
 1262                         synced.UnionWith(segmentInfos.Files(directory, 
true));
 
 1273                     segmentInfos.Read(directory);
 
 1282                         if (commit.Directory != directory)
 
 1283                             throw new System.ArgumentException(
"IndexCommit's directory doesn't match my directory");
 
 1284                         SegmentInfos oldInfos = 
new SegmentInfos();
 
 1285                         oldInfos.Read(directory, commit.SegmentsFileName);
 
 1286                         segmentInfos.Replace(oldInfos);
 
 1288                         if (infoStream != null)
 
 1289                             Message(
"init: loaded commit \"" + commit.SegmentsFileName + 
"\"");
 
 1294                     synced.UnionWith(segmentInfos.Files(directory, 
true));
 
 1297                 SetRollbackSegmentInfos(segmentInfos);
 
 1299                 docWriter = 
new DocumentsWriter(directory, 
this, indexingChain);
 
 1300                 docWriter.SetInfoStream(infoStream);
 
 1301                 docWriter.SetMaxFieldLength(maxFieldLength);
 
 1305                 deleter = 
new IndexFileDeleter(directory, deletionPolicy == null?
new KeepOnlyLastCommitDeletionPolicy():deletionPolicy, segmentInfos, infoStream, docWriter, synced);
 
 1307                 if (deleter.startingCommitDeleted)
 
 1314                 PushMaxBufferedDocs();
 
 1316                 if (infoStream != null)
 
 1318                     Message(
"init: create=" + create);
 
 1328                     if (infoStream != null)
 
 1330                         Message(
"init: hit exception on init; releasing write lock");
 
 1334                         writeLock.Release();
 
 1345         private void  SetRollbackSegmentInfos(SegmentInfos infos)
 
 1349                 rollbackSegmentInfos = (SegmentInfos) infos.Clone();
 
 1350                 System.Diagnostics.Debug.Assert(!rollbackSegmentInfos.HasExternalSegments(directory));
 
 1351                 rollbackSegments = 
new HashMap<SegmentInfo, int?>();
 
 1352                 int size = rollbackSegmentInfos.Count;
 
 1353                 for (
int i = 0; i < size; i++)
 
 1354                     rollbackSegments[rollbackSegmentInfos.Info(i)] = i;
 
 1363                 throw new System.NullReferenceException(
"MergePolicy must be non-null");
 
 1365             if (mergePolicy != mp)
 
 1366                 mergePolicy.Close();
 
 1368             PushMaxBufferedDocs();
 
 1369             if (infoStream != null)
 
 1371                 Message(
"setMergePolicy " + mp);
 
 1393                 if (mergeScheduler == null)
 
 1394                     throw new System.NullReferenceException(
"MergeScheduler must be non-null");
 
 1396                 if (this.mergeScheduler != mergeScheduler)
 
 1399                     this.mergeScheduler.
Close();
 
 1401                 this.mergeScheduler = mergeScheduler;
 
 1402                 if (infoStream != null)
 
 1404                     Message(
"setMergeScheduler " + mergeScheduler);
 
 1419                 return mergeScheduler;
 
 1446         public virtual int MaxMergeDocs
 
 1448             get { 
return LogMergePolicy.MaxMergeDocs; }
 
 1449             set { LogMergePolicy.MaxMergeDocs = value; }
 
 1465         public virtual void  SetMaxFieldLength(
int maxFieldLength)
 
 1468             this.maxFieldLength = maxFieldLength;
 
 1469             docWriter.SetMaxFieldLength(maxFieldLength);
 
 1470             if (infoStream != null)
 
 1471                 Message(
"setMaxFieldLength " + maxFieldLength);
 
 1479         [System.Diagnostics.CodeAnalysis.SuppressMessage(
"Microsoft.Design", 
"CA1024:UsePropertiesWhereAppropriate")]
 
 1480         public virtual int GetMaxFieldLength()
 
 1483             return maxFieldLength;
 
 1491         public int ReaderTermsIndexDivisor
 
 1496                 return readerTermsIndexDivisor;
 
 1503                     throw new ArgumentException(
"divisor must be >= 1 (got " + value + 
")");
 
 1505                 readerTermsIndexDivisor = value;
 
 1506                 if (infoStream != null)
 
 1508                     Message(
"setReaderTermsIndexDivisor " + readerTermsIndexDivisor);
 
 1534         public virtual void  SetMaxBufferedDocs(
int maxBufferedDocs)
 
 1537             if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
 
 1538                 throw new ArgumentException(
"maxBufferedDocs must at least be 2 when enabled");
 
 1540             if (maxBufferedDocs == DISABLE_AUTO_FLUSH && (
int)GetRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
 
 1541                 throw new ArgumentException(
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
 
 1543             docWriter.MaxBufferedDocs = maxBufferedDocs;
 
 1544             PushMaxBufferedDocs();
 
 1545             if (infoStream != null)
 
 1546                 Message(
"setMaxBufferedDocs " + maxBufferedDocs);
 
 1553         private void  PushMaxBufferedDocs()
 
 1555             if (docWriter.MaxBufferedDocs != DISABLE_AUTO_FLUSH)
 
 1560                     LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
 
 1561                     int maxBufferedDocs = docWriter.MaxBufferedDocs;
 
 1564                         if (infoStream != null)
 
 1565                             Message(
"now push maxBufferedDocs " + maxBufferedDocs + 
" to LogDocMergePolicy");
 
 1577         [System.Diagnostics.CodeAnalysis.SuppressMessage(
"Microsoft.Design", 
"CA1024:UsePropertiesWhereAppropriate")]
 
 1578         public virtual int GetMaxBufferedDocs()
 
 1581             return docWriter.MaxBufferedDocs;
 
 1623         public virtual void  SetRAMBufferSizeMB(
double mb)
 
 1627                 throw new System.ArgumentException(
"ramBufferSize " + mb + 
" is too large; should be comfortably less than 2048");
 
 1629             if (mb != DISABLE_AUTO_FLUSH && mb <= 0.0)
 
 1630                 throw new System.ArgumentException(
"ramBufferSize should be > 0.0 MB when enabled");
 
 1631             if (mb == DISABLE_AUTO_FLUSH && GetMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
 
 1632                 throw new System.ArgumentException(
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
 
 1633             docWriter.SetRAMBufferSizeMB(mb);
 
 1634             if (infoStream != null)
 
 1635                 Message(
"setRAMBufferSizeMB " + mb);
 
 1639         [System.Diagnostics.CodeAnalysis.SuppressMessage(
"Microsoft.Design", 
"CA1024:UsePropertiesWhereAppropriate")]
 
 1640         public virtual double GetRAMBufferSizeMB()
 
 1642             return docWriter.GetRAMBufferSizeMB();
 
 1657         public virtual void  SetMaxBufferedDeleteTerms(
int maxBufferedDeleteTerms)
 
 1660             if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH && maxBufferedDeleteTerms < 1)
 
 1661                 throw new System.ArgumentException(
"maxBufferedDeleteTerms must at least be 1 when enabled");
 
 1662             docWriter.MaxBufferedDeleteTerms = maxBufferedDeleteTerms;
 
 1663             if (infoStream != null)
 
 1664                 Message(
"setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
 
 1672         [System.Diagnostics.CodeAnalysis.SuppressMessage(
"Microsoft.Design", 
"CA1024:UsePropertiesWhereAppropriate")]
 
 1673         public virtual int GetMaxBufferedDeleteTerms()
 
 1676             return docWriter.MaxBufferedDeleteTerms;
 
 1697         public virtual int MergeFactor
 
 1699             set { LogMergePolicy.MergeFactor = value; }
 
 1700             get { 
return LogMergePolicy.MergeFactor; }
 
 1709         public static StreamWriter DefaultInfoStream
 
 1719         public virtual void  SetInfoStream(System.IO.StreamWriter infoStream)
 
 1722             SetMessageID(infoStream);
 
 1723             docWriter.SetInfoStream(infoStream);
 
 1724             deleter.SetInfoStream(infoStream);
 
 1725             if (infoStream != null)
 
 1729         private void  MessageState()
 
 1731             Message(
"setInfoStream: dir=" + directory + 
 
 1732                     " mergePolicy=" + mergePolicy + 
 
 1733                     " mergeScheduler=" + mergeScheduler +
 
 1734                     " ramBufferSizeMB=" + docWriter.GetRAMBufferSizeMB() + 
 
 1735                     " maxBufferedDocs=" +  docWriter.MaxBufferedDocs +
 
 1736                     " maxBuffereDeleteTerms=" + docWriter.MaxBufferedDeleteTerms +
 
 1737                     " maxFieldLength=" + maxFieldLength + 
 
 1738                     " index=" + SegString());
 
 1744         public virtual StreamWriter InfoStream
 
 1754         public virtual bool Verbose
 
 1756             get { 
return infoStream != null; }
 
 1760         public virtual long WriteLockTimeout
 
 1765                 return writeLockTimeout;
 
 1770                 this.writeLockTimeout = value;
 
 1777         public static long DefaultWriteLockTimeout
 
 1824         [Obsolete(
"Use Dispose() instead")]
 
 1871         public virtual void Dispose()
 
 1899         public virtual void Dispose(
bool waitForMerges)
 
 1901             Dispose(
true, waitForMerges);
 
 1904         protected virtual void Dispose(
bool disposing, 
bool waitForMerges)
 
 1917                         CloseInternal(waitForMerges);
 
 1945         [Obsolete(
"Use Dispose(bool) instead")]
 
 1946         public virtual void Close(
bool waitForMerges)
 
 1948             Dispose(waitForMerges);
 
 1954         private bool ShouldClose()
 
 1981         private void CloseInternal(
bool waitForMerges)
 
 1984             docWriter.PauseAllThreads();
 
 1988                 if (infoStream != null)
 
 1989                     Message(
"now flush at close");
 
 1991                 docWriter.Dispose();
 
 1997                     Flush(waitForMerges, 
true, 
true);
 
 2003                     mergeScheduler.Merge(
this);
 
 2005                 mergePolicy.Close();
 
 2007                 FinishMerges(waitForMerges);
 
 2010                 mergeScheduler.Close();
 
 2012                 if (infoStream != null)
 
 2013                     Message(
"now call final commit()");
 
 2020                 if (infoStream != null)
 
 2021                     Message(
"at close: " + SegString());
 
 2025                     readerPool.Dispose();
 
 2030                 if (writeLock != null)
 
 2032                     writeLock.Release(); 
 
 2040             catch (System.OutOfMemoryException oom)
 
 2042                 HandleOOM(oom, 
"closeInternal");
 
 2049                     System.Threading.Monitor.PulseAll(
this);
 
 2052                         if (docWriter != null)
 
 2053                             docWriter.ResumeAllThreads();
 
 2054                         if (infoStream != null)
 
 2055                             Message(
"hit exception while closing");
 
 2065         private bool FlushDocStores()
 
 2069                 if (infoStream != null)
 
 2071                     Message(
"flushDocStores segment=" + docWriter.DocStoreSegment);
 
 2074                 bool useCompoundDocStore = 
false;
 
 2075                 if (infoStream != null)
 
 2077                     Message(
"closeDocStores segment=" + docWriter.DocStoreSegment);
 
 2080                 System.String docStoreSegment;
 
 2082                 bool success = 
false;
 
 2085                     docStoreSegment = docWriter.CloseDocStore();
 
 2090                     if (!success && infoStream != null)
 
 2092                         Message(
"hit exception closing doc store segment");
 
 2096                 if (infoStream != null)
 
 2098                     Message(
"flushDocStores files=" + docWriter.ClosedFiles());
 
 2101                 useCompoundDocStore = mergePolicy.UseCompoundDocStore(segmentInfos);
 
 2103                 if (useCompoundDocStore && docStoreSegment != null && docWriter.ClosedFiles().Count != 0)
 
 2107                     if (infoStream != null)
 
 2109                         Message(
"create compound file " + docStoreSegment + 
"." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
 
 2114                     int numSegments = segmentInfos.Count;
 
 2115                     System.String compoundFileName = docStoreSegment + 
"." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
 
 2119                         CompoundFileWriter cfsWriter = 
new CompoundFileWriter(directory, compoundFileName);
 
 2120                         foreach(
string file 
in docWriter.closedFiles)
 
 2122                             cfsWriter.AddFile(file);
 
 2133                             if (infoStream != null)
 
 2134                                 Message(
"hit exception building compound file doc store for segment " + docStoreSegment);
 
 2135                             deleter.DeleteFile(compoundFileName);
 
 2140                     for (
int i = 0; i < numSegments; i++)
 
 2142                         SegmentInfo si = segmentInfos.Info(i);
 
 2143                         if (si.DocStoreOffset != - 1 && si.DocStoreSegment.Equals(docStoreSegment))
 
 2144                             si.DocStoreIsCompoundFile = 
true;
 
 2151                     deleter.DeleteNewFiles(docWriter.ClosedFiles());
 
 2154                 return useCompoundDocStore;
 
 2185         public virtual int MaxDoc()
 
 2190                 if (docWriter != null)
 
 2191                     count = docWriter.NumDocsInRAM;
 
 2195                 for (
int i = 0; i < segmentInfos.Count; i++)
 
 2196                     count += segmentInfos.Info(i).docCount;
 
 2209         public virtual int NumDocs()
 
 2214                 if (docWriter != null)
 
 2215                     count = docWriter.NumDocsInRAM;
 
 2219                 for (
int i = 0; i < segmentInfos.Count; i++)
 
 2228         public virtual bool HasDeletions()
 
 2233                 if (docWriter.HasDeletes())
 
 2235                 for (
int i = 0; i < segmentInfos.Count; i++)
 
 2236                     if (segmentInfos.Info(i).HasDeletions())
 
 2256         private int maxFieldLength;
 
 2303             AddDocument(doc, analyzer);
 
 2325             bool doFlush = 
false;
 
 2326             bool success = 
false;
 
 2331                     doFlush = docWriter.AddDocument(doc, analyzer);
 
 2339                         if (infoStream != null)
 
 2340                             Message(
"hit exception adding document");
 
 2346                             if (docWriter != null)
 
 2348                                 ICollection<string> files = docWriter.AbortedFiles();
 
 2350                                     deleter.DeleteNewFiles(files);
 
 2356                     Flush(
true, 
false, 
false);
 
 2358             catch (System.OutOfMemoryException oom)
 
 2360                 HandleOOM(oom, 
"addDocument");
 
 2375         public virtual void  DeleteDocuments(
Term term)
 
 2380                 bool doFlush = docWriter.BufferDeleteTerm(term);
 
 2382                     Flush(
true, 
false, 
false);
 
 2384             catch (System.OutOfMemoryException oom)
 
 2386                 HandleOOM(oom, 
"deleteDocuments(Term)");
 
 2403         public virtual void  DeleteDocuments(params 
Term[] terms)
 
 2408                 bool doFlush = docWriter.BufferDeleteTerms(terms);
 
 2410                     Flush(
true, 
false, 
false);
 
 2412             catch (System.OutOfMemoryException oom)
 
 2414                 HandleOOM(oom, 
"deleteDocuments(params Term[])");
 
 2429         public virtual void  DeleteDocuments(
Query query)
 
 2432             bool doFlush = docWriter.BufferDeleteQuery(query);
 
 2434                 Flush(
true, 
false, 
false);
 
 2450         public virtual void  DeleteDocuments(params 
Query[] queries)
 
 2453             bool doFlush = docWriter.BufferDeleteQueries(queries);
 
 2455                 Flush(
true, 
false, 
false);
 
 2479             UpdateDocument(term, doc, 
Analyzer);
 
 2507                 bool doFlush = 
false;
 
 2508                 bool success = 
false;
 
 2511                     doFlush = docWriter.UpdateDocument(term, doc, analyzer);
 
 2519                         if (infoStream != null)
 
 2520                             Message(
"hit exception updating document");
 
 2526                             ICollection<string> files = docWriter.AbortedFiles();
 
 2528                                 deleter.DeleteNewFiles(files);
 
 2533                     Flush(
true, 
false, 
false);
 
 2535             catch (System.OutOfMemoryException oom)
 
 2537                 HandleOOM(oom, 
"updateDocument");
 
 2542         internal int GetSegmentCount()
 
 2546                 return segmentInfos.Count;
 
 2551         internal int GetNumBufferedDocuments()
 
 2555                 return docWriter.NumDocsInRAM;
 
 2560         public  int GetDocCount(
int i)
 
 2564                 if (i >= 0 && i < segmentInfos.Count)
 
 2566                     return segmentInfos.Info(i).docCount;
 
 2576         internal int GetFlushCount()
 
 2585         internal int GetFlushDeletesCount()
 
 2589                 return flushDeletesCount;
 
 2593         internal System.String NewSegmentName()
 
 2610         private System.IO.StreamWriter infoStream = null;
 
 2611         private static System.IO.StreamWriter defaultInfoStream = null;
 
 2671         public virtual void  Optimize()
 
 2688         public virtual void  Optimize(
int maxNumSegments)
 
 2690             Optimize(maxNumSegments, 
true);
 
 2703         public virtual void  Optimize(
bool doWait)
 
 2705             Optimize(1, doWait);
 
 2718         public virtual void  Optimize(
int maxNumSegments, 
bool doWait)
 
 2722             if (maxNumSegments < 1)
 
 2723                 throw new System.ArgumentException(
"maxNumSegments must be >= 1; got " + maxNumSegments);
 
 2725             if (infoStream != null)
 
 2726                 Message(
"optimize: index now " + SegString());
 
 2728             Flush(
true, 
false, 
true);
 
 2732                 ResetMergeExceptions();
 
 2733                 segmentsToOptimize = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<
SegmentInfo>();
 
 2734                 optimizeMaxNumSegments = maxNumSegments;
 
 2735                 int numSegments = segmentInfos.Count;
 
 2736                 for (
int i = 0; i < numSegments; i++)
 
 2737                     segmentsToOptimize.Add(segmentInfos.Info(i));
 
 2741                 foreach(
MergePolicy.OneMerge merge in pendingMerges)
 
 2743                     merge.optimize = 
true;
 
 2744                     merge.maxNumSegmentsOptimize = maxNumSegments;
 
 2747                 foreach(
MergePolicy.OneMerge merge in runningMerges)
 
 2749                     merge.optimize = 
true;
 
 2750                     merge.maxNumSegmentsOptimize = maxNumSegments;
 
 2754             MaybeMerge(maxNumSegments, 
true);
 
 2765                             throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot complete optimize");
 
 2768                         if (mergeExceptions.Count > 0)
 
 2772                             int size = mergeExceptions.Count;
 
 2773                             for (
int i = 0; i < size; i++)
 
 2778                                     System.IO.IOException err;
 
 2779                                     System.Exception t = merge.GetException();
 
 2781                                         err = 
new System.IO.IOException(
"background merge hit exception: " + merge.SegString(directory), t);
 
 2783                                         err = 
new System.IO.IOException(
"background merge hit exception: " + merge.SegString(directory));
 
 2789                         if (OptimizeMergesPending())
 
 2811         private bool OptimizeMergesPending()
 
 2815                 foreach (
MergePolicy.OneMerge merge in pendingMerges)
 
 2817                     if (merge.optimize) 
return true;
 
 2820                 foreach(MergePolicy.OneMerge merge in runningMerges)
 
 2822                     if (merge.optimize) 
return true;
 
 2839         public virtual void  ExpungeDeletes(
bool doWait)
 
 2843             if (infoStream != null)
 
 2844                 Message(
"expungeDeletes: index now " + SegString());
 
 2853                     int numMerges = spec.
merges.Count;
 
 2854                     for (
int i = 0; i < numMerges; i++)
 
 2855                         RegisterMerge(spec.merges[i]);
 
 2859             mergeScheduler.Merge(
this);
 
 2861             if (spec != null && doWait)
 
 2863                 int numMerges = spec.merges.Count;
 
 2866                     bool running = 
true;
 
 2872                             throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot complete expungeDeletes");
 
 2879                         for (
int i = 0; i < numMerges; i++)
 
 2882                             if (pendingMerges.Contains(merge) || runningMerges.Contains(merge))
 
 2884                             System.Exception t = merge.GetException();
 
 2887                                 System.IO.IOException ioe = 
new System.IO.IOException(
"background merge hit exception: " + merge.SegString(directory), t);
 
 2925         public virtual void  ExpungeDeletes()
 
 2927             ExpungeDeletes(
true);
 
 2943         public void  MaybeMerge()
 
 2948         private void  MaybeMerge(
bool optimize)
 
 2950             MaybeMerge(1, optimize);
 
 2953         private void  MaybeMerge(
int maxNumSegmentsOptimize, 
bool optimize)
 
 2955             UpdatePendingMerges(maxNumSegmentsOptimize, optimize);
 
 2956             mergeScheduler.Merge(
this);
 
 2959         private void  UpdatePendingMerges(
int maxNumSegmentsOptimize, 
bool optimize)
 
 2963                 System.Diagnostics.Debug.Assert(!optimize || maxNumSegmentsOptimize > 0);
 
 2976                 MergePolicy.MergeSpecification spec;
 
 2979                     spec = mergePolicy.FindMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, segmentsToOptimize);
 
 2983                         int numMerges = spec.merges.Count;
 
 2984                         for (
int i = 0; i < numMerges; i++)
 
 2986                             MergePolicy.OneMerge merge = spec.merges[i];
 
 2987                             merge.optimize = 
true;
 
 2988                             merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
 
 2994                     spec = mergePolicy.FindMerges(segmentInfos);
 
 2999                     int numMerges = spec.merges.Count;
 
 3000                     for (
int i = 0; i < numMerges; i++)
 
 3001                         RegisterMerge(spec.merges[i]);
 
 3010         internal virtual MergePolicy.OneMerge GetNextMerge()
 
 3014                 if (pendingMerges.Count == 0)
 
 3019                     MergePolicy.OneMerge merge = pendingMerges.First.Value;
 
 3020                     pendingMerges.RemoveFirst();
 
 3021                     runningMerges.Add(merge);
 
 3030         private MergePolicy.OneMerge GetNextExternalMerge()
 
 3034                 if (pendingMerges.Count == 0)
 
 3038                     var it = pendingMerges.GetEnumerator();
 
 3039                     while (it.MoveNext())
 
 3041                         MergePolicy.OneMerge merge = it.Current;
 
 3042                         if (merge.isExternal)
 
 3045                             pendingMerges.Remove(merge);  
 
 3046                             runningMerges.Add(merge);
 
 3070         private void  StartTransaction(
bool haveReadLock)
 
 3075                 bool success = 
false;
 
 3078                     if (infoStream != null)
 
 3079                         Message(
"now start transaction");
 
 3081                     System.Diagnostics.Debug.Assert(docWriter.GetNumBufferedDeleteTerms() == 0 , 
 
 3082                         "calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.GetNumBufferedDeleteTerms());
 
 3083                     System.Diagnostics.Debug.Assert(docWriter.NumDocsInRAM == 0 , 
 
 3084                         "calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.NumDocsInRAM);
 
 3102                     if (!success && haveReadLock)
 
 3108                     UpgradeReadToWrite();
 
 3118                     localRollbackSegmentInfos = (SegmentInfos) segmentInfos.Clone();
 
 3120                     System.Diagnostics.Debug.Assert(!HasExternalSegments());
 
 3122                     localFlushedDocCount = docWriter.GetFlushedDocCount();
 
 3125                     deleter.IncRef(segmentInfos, 
false);
 
 3141         private void  RollbackTransaction()
 
 3146                 if (infoStream != null)
 
 3147                     Message(
"now rollback transaction");
 
 3149                 if (docWriter != null)
 
 3151                     docWriter.SetFlushedDocCount(localFlushedDocCount);
 
 3158                 FinishMerges(
false);
 
 3164                 segmentInfos.Clear();
 
 3165                 segmentInfos.AddRange(localRollbackSegmentInfos);
 
 3166                 localRollbackSegmentInfos = null;
 
 3175                 deleter.Checkpoint(segmentInfos, 
false);
 
 3178                 deleter.DecRef(segmentInfos);
 
 3187                 System.Threading.Monitor.PulseAll(
this);
 
 3189                 System.Diagnostics.Debug.Assert(!HasExternalSegments());
 
 3198         private void  CommitTransaction()
 
 3203                 if (infoStream != null)
 
 3204                     Message(
"now commit transaction");
 
 3210                 deleter.DecRef(localRollbackSegmentInfos);
 
 3212                 localRollbackSegmentInfos = null;
 
 3214                 System.Diagnostics.Debug.Assert(!HasExternalSegments());
 
 3230         public virtual void  Rollback()
 
 3239         private void  RollbackInternal()
 
 3242             bool success = 
false;
 
 3244             if (infoStream != null)
 
 3246                 Message(
"rollback");
 
 3249             docWriter.PauseAllThreads();
 
 3253                 FinishMerges(
false);
 
 3258                 mergePolicy.Close();
 
 3259                 mergeScheduler.Close();
 
 3264                     if (pendingCommit != null)
 
 3266                         pendingCommit.RollbackCommit(directory);
 
 3267                         deleter.DecRef(pendingCommit);
 
 3268                         pendingCommit = null;
 
 3269                         System.Threading.Monitor.PulseAll(
this);
 
 3277                     segmentInfos.Clear();
 
 3278                     segmentInfos.AddRange(rollbackSegmentInfos);
 
 3280                     System.Diagnostics.Debug.Assert(!HasExternalSegments());
 
 3284                     System.Diagnostics.Debug.Assert(TestPoint(
"rollback before checkpoint"));
 
 3288                     deleter.Checkpoint(segmentInfos, 
false);
 
 3293                 readerPool.Clear(null);
 
 3295                 lastCommitChangeCount = changeCount;
 
 3299             catch (System.OutOfMemoryException oom)
 
 3301                 HandleOOM(oom, 
"rollbackInternal");
 
 3309                         docWriter.ResumeAllThreads();
 
 3311                         System.Threading.Monitor.PulseAll(
this);
 
 3312                         if (infoStream != null)
 
 3313                             Message(
"hit exception during rollback");
 
 3318             CloseInternal(
false);
 
 3335         public virtual void  DeleteAll()
 
 3339                 docWriter.PauseAllThreads();
 
 3344                     FinishMerges(
false);
 
 3348                     docWriter.SetFlushedDocCount(0);
 
 3351                     segmentInfos.Clear();
 
 3354                     deleter.Checkpoint(segmentInfos, 
false);
 
 3358                     readerPool.Clear(null);
 
 3363                 catch (System.OutOfMemoryException oom)
 
 3365                     HandleOOM(oom, 
"deleteAll");
 
 3369                     docWriter.ResumeAllThreads();
 
 3370                     if (infoStream != null)
 
 3372                         Message(
"hit exception during deleteAll");
 
 3378         private void  FinishMerges(
bool waitForMerges)
 
 3388                     foreach(
MergePolicy.OneMerge merge in pendingMerges)
 
 3390                         if (infoStream != null)
 
 3391                             Message(
"now abort pending merge " + merge.SegString(directory));
 
 3395                     pendingMerges.Clear();
 
 3397                     foreach(MergePolicy.OneMerge merge in runningMerges)
 
 3399                         if (infoStream != null)
 
 3400                             Message(
"now abort running merge " + merge.SegString(directory));
 
 3415                     while (runningMerges.Count > 0)
 
 3417                         if (infoStream != null)
 
 3418                             Message(
"now wait for " + runningMerges.Count + 
" running merge to abort");
 
 3423                     System.Threading.Monitor.PulseAll(
this);
 
 3425                     System.Diagnostics.Debug.Assert(0 == mergingSegments.Count);
 
 3427                     if (infoStream != null)
 
 3428                         Message(
"all running merges have aborted");
 
 3447         public virtual void  WaitForMerges()
 
 3455                 while (pendingMerges.Count > 0 || runningMerges.Count > 0)
 
 3461                 System.Diagnostics.Debug.Assert(0 == mergingSegments.Count);
 
 3470         private void  Checkpoint()
 
 3475                 deleter.Checkpoint(segmentInfos, 
false);
 
 3479         private void  FinishAddIndexes()
 
 3484         private void  BlockAddIndexes(
bool includePendingClose)
 
 3489             bool success = 
false;
 
 3495                 EnsureOpen(includePendingClose);
 
 3505         private void  ResumeAddIndexes()
 
 3510         private void  ResetMergeExceptions()
 
 3514                 mergeExceptions = 
new List<MergePolicy.OneMerge>();
 
 3519         private void  NoDupDirs(
Directory[] dirs)
 
 3521             HashSet<Directory> dups = 
new HashSet<Directory>();
 
 3522             for (
int i = 0; i < dirs.Length; i++)
 
 3524                 if (dups.Contains(dirs[i]))
 
 3526                     throw new System.ArgumentException(
"Directory " + dirs[i] + 
" appears more than once");
 
 3528                 if (dirs[i] == directory)
 
 3529                     throw new System.ArgumentException(
"Cannot add directory to itself");
 
 3583         public virtual void  AddIndexesNoOptimize(params 
Directory[] dirs)
 
 3591             docWriter.PauseAllThreads();
 
 3595                 if (infoStream != null)
 
 3596                     Message(
"flush at addIndexesNoOptimize");
 
 3597                 Flush(
true, 
false, 
true);
 
 3599                 bool success = 
false;
 
 3601                 StartTransaction(
false);
 
 3611                         for (
int i = 0; i < dirs.Length; i++)
 
 3613                             if (directory == dirs[i])
 
 3616                                 throw new System.ArgumentException(
"Cannot add this index to itself");
 
 3621                             for (
int j = 0; j < sis.Count; j++)
 
 3624                                 System.
Diagnostics.Debug.Assert(!segmentInfos.Contains(info), 
"dup info dir=" + info.
dir + 
" name=" + info.
name);
 
 3626                                 segmentInfos.Add(info); 
 
 3632                     docWriter.UpdateFlushedDocCount(docCount);
 
 3643                     ResolveExternalSegments();
 
 3653                         CommitTransaction();
 
 3657                         RollbackTransaction();
 
 3661             catch (System.OutOfMemoryException oom)
 
 3663                 HandleOOM(oom, 
"addIndexesNoOptimize");
 
 3667                 if (docWriter != null)
 
 3669                     docWriter.ResumeAllThreads();
 
 3674         private bool HasExternalSegments()
 
 3676             return segmentInfos.HasExternalSegments(directory);
 
 3686         private void  ResolveExternalSegments()
 
 3695                 SegmentInfo info = null;
 
 3696                 MergePolicy.OneMerge merge = null;
 
 3701                         throw new MergePolicy.MergeAbortedException(
"rollback() was called or addIndexes* hit an unhandled exception");
 
 3703                     int numSegments = segmentInfos.Count;
 
 3706                     for (
int i = 0; i < numSegments; i++)
 
 3708                         info = segmentInfos.Info(i);
 
 3709                         if (info.dir != directory)
 
 3712                             MergePolicy.OneMerge newMerge = 
new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && UseCompoundFile);
 
 3718                             if (RegisterMerge(newMerge))
 
 3725                                 pendingMerges.Remove(merge);    
 
 3726                                 runningMerges.Add(merge);
 
 3732                     if (!done && merge == null)
 
 3738                         merge = GetNextExternalMerge();
 
 3740                     if (!done && merge == null)
 
 3758                 mergeScheduler.Merge(
this);
 
 3787             docWriter.PauseAllThreads();
 
 3802                 System.String mergedName = null;
 
 3805                 bool success = 
false;
 
 3809                     Flush(
true, 
false, 
true);
 
 3824                 StartTransaction(
true);
 
 3828                     mergedName = NewSegmentName();
 
 3834                         if (segmentInfos.Count == 1)
 
 3845                         if (sReader != null)
 
 3846                             merger.
Add(sReader);
 
 3848                         for (
int i = 0; i < readers.Length; i++)
 
 3850                             merger.
Add(readers[i]);
 
 3852                         int docCount = merger.
Merge(); 
 
 3856                             segmentInfos.Clear(); 
 
 3857                             info = 
new SegmentInfo(mergedName, docCount, directory, 
false, 
true, - 1, null, 
false, merger.HasProx());
 
 3858                             SetDiagnostics(info, 
"addIndexes(params IndexReader[])");
 
 3859                             segmentInfos.Add(info);
 
 3863                         docWriter.UpdateFlushedDocCount(docCount);
 
 3869                         if (sReader != null)
 
 3871                             readerPool.Release(sReader);
 
 3879                         if (infoStream != null)
 
 3880                             Message(
"hit exception in addIndexes during merge");
 
 3881                         RollbackTransaction();
 
 3885                         CommitTransaction();
 
 3889                 if (mergePolicy is LogMergePolicy && UseCompoundFile)
 
 3892                     IList<string> files = null;
 
 3900                         if (segmentInfos.Contains(info))
 
 3902                             files = info.
Files();
 
 3903                             deleter.IncRef(files);
 
 3912                         StartTransaction(
false);
 
 3919                                 info.SetUseCompoundFile(
true);
 
 3928                                 deleter.DecRef(files);
 
 3933                                 if (infoStream != null)
 
 3934                                     Message(
"hit exception building compound file in addIndexes during merge");
 
 3936                                 RollbackTransaction();
 
 3940                                 CommitTransaction();
 
 3946             catch (System.OutOfMemoryException oom)
 
 3948                 HandleOOM(oom, 
"addIndexes(params IndexReader[])");
 
 3952                 if (docWriter != null)
 
 3954                     docWriter.ResumeAllThreads();
 
 3964         protected  virtual void  DoAfterFlush()
 
 3972         protected virtual void DoBeforeFlush() 
 
 3985         public void  PrepareCommit()
 
 3988             PrepareCommit(null);
 
 4021         private void PrepareCommit(IDictionary<string, string> commitUserData)
 
 4025                 throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot commit");
 
 4028             if (pendingCommit != null)
 
 4029                 throw new System.SystemException(
"prepareCommit was already called with no corresponding call to commit");
 
 4031             if (infoStream != null)
 
 4032                 Message(
"prepareCommit: flush");
 
 4034             Flush(
true, 
true, 
true);
 
 4036             StartCommit(0, commitUserData);
 
 4040         private Object commitLock = 
new Object();
 
 4042         private void  Commit(
long sizeInBytes)
 
 4045                 StartCommit(sizeInBytes, null);
 
 4081         public void  Commit()
 
 4095         public void Commit(IDictionary<string, string> commitUserData)
 
 4099             if (infoStream != null)
 
 4101                 Message(
"commit: start");
 
 4106                 if (infoStream != null)
 
 4108                     Message(
"commit: enter lock");
 
 4110                 if (pendingCommit == null)
 
 4112                     if (infoStream != null)
 
 4114                         Message(
"commit: now prepare");
 
 4116                     PrepareCommit(commitUserData);
 
 4118                 else if (infoStream != null)
 
 4120                     Message(
"commit: already prepared");
 
 4127         private void  FinishCommit()
 
 4132                 if (pendingCommit != null)
 
 4136                         if (infoStream != null)
 
 4137                             Message(
"commit: pendingCommit != null");
 
 4138                         pendingCommit.FinishCommit(directory);
 
 4139                         if (infoStream != null)
 
 4140                             Message(
"commit: wrote segments file \"" + pendingCommit.GetCurrentSegmentFileName() + 
"\"");
 
 4141                         lastCommitChangeCount = pendingCommitChangeCount;
 
 4142                         segmentInfos.UpdateGeneration(pendingCommit);
 
 4143                         segmentInfos.UserData = pendingCommit.UserData;
 
 4144                         SetRollbackSegmentInfos(pendingCommit);
 
 4145                         deleter.Checkpoint(pendingCommit, 
true);
 
 4149                         deleter.DecRef(pendingCommit);
 
 4150                         pendingCommit = null;
 
 4151                         System.Threading.Monitor.PulseAll(
this);
 
 4154                 else if (infoStream != null)
 
 4156                     Message(
"commit: pendingCommit == null; skip");
 
 4159                 if (infoStream != null)
 
 4161                     Message(
"commit: done");
 
 4178         public  void  Flush(
bool triggerMerge, 
bool flushDocStores, 
bool flushDeletes)
 
 4182             if (DoFlush(flushDocStores, flushDeletes) && triggerMerge)
 
 4189         private bool DoFlush(
bool flushDocStores, 
bool flushDeletes)
 
 4197                         return DoFlushInternal(flushDocStores, flushDeletes);
 
 4201                         if (docWriter.DoBalanceRAM())
 
 4203                             docWriter.BalanceRAM();
 
 4209                     docWriter.ClearFlushPending();
 
 4217         private bool DoFlushInternal(
bool flushDocStores, 
bool flushDeletes)
 
 4223                     throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot flush");
 
 4228                 System.Diagnostics.Debug.Assert(TestPoint(
"startDoFlush"));
 
 4237                 flushDeletes |= docWriter.DoApplyDeletes();
 
 4242                 if (infoStream != null)
 
 4244                     Message(
"flush: now pause all indexing threads");
 
 4246                 if (docWriter.PauseAllThreads())
 
 4248                     docWriter.ResumeAllThreads();
 
 4255                     SegmentInfo newSegment = null;
 
 4257                     int numDocs = docWriter.NumDocsInRAM;
 
 4260                     bool flushDocs = numDocs > 0;
 
 4262                     System.String docStoreSegment = docWriter.DocStoreSegment;
 
 4264                     System.Diagnostics.Debug.Assert(docStoreSegment != null || numDocs == 0, 
"dss=" + docStoreSegment + 
" numDocs=" + numDocs);
 
 4266                     if (docStoreSegment == null)
 
 4267                         flushDocStores = 
false;
 
 4269                     int docStoreOffset = docWriter.DocStoreOffset;
 
 4271                     bool docStoreIsCompoundFile = 
false;
 
 4273                     if (infoStream != null)
 
 4275                         Message(
"  flush: segment=" + docWriter.Segment + 
" docStoreSegment=" + docWriter.DocStoreSegment + 
" docStoreOffset=" + docStoreOffset + 
" flushDocs=" + flushDocs + 
" flushDeletes=" + flushDeletes + 
" flushDocStores=" + flushDocStores + 
" numDocs=" + numDocs + 
" numBufDelTerms=" + docWriter.GetNumBufferedDeleteTerms());
 
 4276                         Message(
"  index before flush " + SegString());
 
 4282                     if (flushDocStores && (!flushDocs || !docWriter.Segment.Equals(docWriter.DocStoreSegment)))
 
 4285                         if (infoStream != null)
 
 4286                             Message(
"  flush shared docStore segment " + docStoreSegment);
 
 4288                         docStoreIsCompoundFile = FlushDocStores();
 
 4289                         flushDocStores = 
false;
 
 4292                     System.String segment = docWriter.Segment;
 
 4295                     System.Diagnostics.Debug.Assert(segment != null || !flushDocs);
 
 4300                         bool success = 
false;
 
 4301                         int flushedDocCount;
 
 4305                             flushedDocCount = docWriter.Flush(flushDocStores);
 
 4306                             if (infoStream != null)
 
 4308                                 Message(
"flushedFiles=" + docWriter.GetFlushedFiles());
 
 4316                                 if (infoStream != null)
 
 4317                                     Message(
"hit exception flushing segment " + segment);
 
 4318                                 deleter.Refresh(segment);
 
 4322                         if (0 == docStoreOffset && flushDocStores)
 
 4327                             System.Diagnostics.Debug.Assert(docStoreSegment != null);
 
 4328                             System.Diagnostics.Debug.Assert(docStoreSegment.Equals(segment));
 
 4329                             docStoreOffset = - 1;
 
 4330                             docStoreIsCompoundFile = 
false;
 
 4331                             docStoreSegment = null;
 
 4337                         newSegment = 
new SegmentInfo(segment, flushedDocCount, directory, 
false, 
true, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, docWriter.HasProx());
 
 4338                         SetDiagnostics(newSegment, 
"flush");
 
 4341                     docWriter.PushDeletes();
 
 4345                         segmentInfos.Add(newSegment);
 
 4349                     if (flushDocs && mergePolicy.UseCompoundFile(segmentInfos, newSegment))
 
 4352                         bool success = 
false;
 
 4355                             docWriter.CreateCompoundFile(segment);
 
 4362                                 if (infoStream != null)
 
 4363                                     Message(
"hit exception creating compound file for newly flushed segment " + segment);
 
 4364                                 deleter.DeleteFile(segment + 
"." + IndexFileNames.COMPOUND_FILE_EXTENSION);
 
 4368                         newSegment.SetUseCompoundFile(
true);
 
 4384                 catch (System.OutOfMemoryException oom)
 
 4386                     HandleOOM(oom, 
"doFlush");
 
 4392                     docWriter.ResumeAllThreads();
 
 4400         public long RamSizeInBytes()
 
 4403             return docWriter.GetRAMUsed();
 
 4409         public int NumRamDocs()
 
 4414                 return docWriter.NumDocsInRAM;
 
 4418         private int EnsureContiguousMerge(
MergePolicy.OneMerge merge)
 
 4421             int first = segmentInfos.IndexOf(merge.segments.Info(0));
 
 4423                 throw new MergePolicy.MergeException(
"could not find segment " + merge.segments.Info(0).name + 
" in current index " + SegString(), directory);
 
 4425             int numSegments = segmentInfos.Count;
 
 4427             int numSegmentsToMerge = merge.segments.Count;
 
 4428             for (
int i = 0; i < numSegmentsToMerge; i++)
 
 4432                 if (first + i >= numSegments || !segmentInfos.Info(first + i).Equals(info))
 
 4434                     if (segmentInfos.IndexOf(info) == - 1)
 
 4435                         throw new MergePolicy.MergeException(
"MergePolicy selected a segment (" + info.
name + 
") that is not in the current index " + SegString(), directory);
 
 4437                         throw new MergePolicy.MergeException(
"MergePolicy selected non-contiguous segments to merge (" + merge.SegString(directory) + 
" vs " + SegString() + 
"), which IndexWriter (currently) cannot handle", directory);
 
 4454         private void  CommitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader)
 
 4459                 System.Diagnostics.Debug.Assert(TestPoint(
"startCommitMergeDeletes"));
 
 4461                 SegmentInfos sourceSegments = merge.segments;
 
 4463                 if (infoStream != null)
 
 4464                     Message(
"commitMergeDeletes " + merge.SegString(directory));
 
 4471                 for (
int i = 0; i < sourceSegments.Count; i++)
 
 4473                     SegmentInfo info = sourceSegments.Info(i);
 
 4475                     SegmentReader previousReader = merge.readersClone[i];
 
 4476                     SegmentReader currentReader = merge.readers[i];
 
 4477                     if (previousReader.HasDeletions)
 
 4487                         if (currentReader.NumDeletedDocs > previousReader.NumDeletedDocs)
 
 4492                             for (
int j = 0; j < docCount; j++)
 
 4494                                 if (previousReader.IsDeleted(j))
 
 4496                                     System.Diagnostics.Debug.Assert(currentReader.IsDeleted(j));
 
 4500                                     if (currentReader.IsDeleted(j))
 
 4502                                         mergeReader.DoDelete(docUpto);
 
 4511                             docUpto += docCount - previousReader.NumDeletedDocs;
 
 4514                     else if (currentReader.HasDeletions)
 
 4518                         for (
int j = 0; j < docCount; j++)
 
 4520                             if (currentReader.IsDeleted(j))
 
 4522                                 mergeReader.DoDelete(docUpto);
 
 4530                         docUpto += info.docCount;
 
 4533                 System.Diagnostics.Debug.Assert(mergeReader.NumDeletedDocs == delCount);
 
 4535                 mergeReader.hasChanges = delCount > 0;
 
 4540         private bool CommitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, 
int mergedDocCount, SegmentReader mergedReader)
 
 4545                 System.Diagnostics.Debug.Assert(TestPoint(
"startCommitMerge"));
 
 4549                     throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot complete merge");
 
 4552                 if (infoStream != null)
 
 4553                     Message(
"commitMerge: " + merge.SegString(directory) + 
" index=" + SegString());
 
 4555                 System.Diagnostics.Debug.Assert(merge.registerDone);
 
 4563                 if (merge.IsAborted())
 
 4565                     if (infoStream != null)
 
 4566                         Message(
"commitMerge: skipping merge " + merge.SegString(directory) + 
": it was aborted");
 
 4571                 int start = EnsureContiguousMerge(merge);
 
 4573                 CommitMergedDeletes(merge, mergedReader);
 
 4574                 docWriter.RemapDeletes(segmentInfos, merger.GetDocMaps(), merger.GetDelCounts(), merge, mergedDocCount);
 
 4580                 SetMergeDocStoreIsCompoundFile(merge);
 
 4582                 merge.info.HasProx = merger.HasProx();
 
 4584                 segmentInfos.RemoveRange(start, start + merge.segments.Count - start);
 
 4585                 System.Diagnostics.Debug.Assert(!segmentInfos.Contains(merge.info));
 
 4586                 segmentInfos.Insert(start, merge.info);
 
 4588                 CloseMergeReaders(merge, 
false);
 
 4597                 readerPool.Clear(merge.segments);
 
 4602                     segmentsToOptimize.Add(merge.info);
 
 4608         private void  HandleMergeException(System.Exception t, MergePolicy.OneMerge merge)
 
 4611             if (infoStream != null)
 
 4613                 Message(
"handleMergeException: merge=" + merge.SegString(directory) + 
" exc=" + t);
 
 4619             merge.SetException(t);
 
 4620             AddMergeException(merge);
 
 4622             if (t is MergePolicy.MergeAbortedException)
 
 4630                 if (merge.isExternal)
 
 4633             else if (t is System.IO.IOException || t is System.SystemException || t is System.ApplicationException)
 
 4640                 System.Diagnostics.Debug.Fail(
"Exception is not expected type!");
 
 4641                 throw new System.SystemException(null, t);
 
 4655             bool success = 
false;
 
 4665                         if (infoStream != null)
 
 4667                             Message(
"now merge\n  merge=" + merge.SegString(directory) + 
"\n  merge=" + merge + 
"\n  index=" + SegString());
 
 4671                         MergeSuccess(merge);
 
 4674                     catch (System.Exception t)
 
 4676                         HandleMergeException(t, merge);
 
 4687                             if (infoStream != null)
 
 4688                                 Message(
"hit exception during merge");
 
 4689                             if (merge.info != null && !segmentInfos.Contains(merge.info))
 
 4690                                 deleter.Refresh(merge.info.name);
 
 4696                         if (success && !merge.IsAborted() && !closed && !closing)
 
 4697                             UpdatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
 
 4701             catch (System.OutOfMemoryException oom)
 
 4703                 HandleOOM(oom, 
"merge");
 
 4708         internal virtual void  MergeSuccess(MergePolicy.OneMerge merge)
 
 4719         internal bool RegisterMerge(MergePolicy.OneMerge merge)
 
 4724                 if (merge.registerDone)
 
 4730                     throw new MergePolicy.MergeAbortedException(
"merge is aborted: " + merge.SegString(directory));
 
 4733                 int count = merge.segments.Count;
 
 4734                 bool isExternal = 
false;
 
 4735                 for (
int i = 0; i < count; i++)
 
 4737                     SegmentInfo info = merge.segments.Info(i);
 
 4738                     if (mergingSegments.Contains(info))
 
 4742                     if (segmentInfos.IndexOf(info) == -1)
 
 4746                     if (info.dir != directory)
 
 4750                     if (segmentsToOptimize.Contains(info))
 
 4752                         merge.optimize = 
true;
 
 4753                         merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
 
 4757                 EnsureContiguousMerge(merge);
 
 4759                 pendingMerges.AddLast(merge);
 
 4761                 if (infoStream != null)
 
 4762                     Message(
"add merge to pendingMerges: " + merge.SegString(directory) + 
" [total " + pendingMerges.Count + 
" pending]");
 
 4764                 merge.mergeGen = mergeGen;
 
 4765                 merge.isExternal = isExternal;
 
 4771                 for (
int i = 0; i < count; i++)
 
 4773                     SegmentInfo si = merge.segments.Info(i);
 
 4774                     mergingSegments.Add(si);
 
 4778                 merge.registerDone = 
true;
 
 4786         internal void  MergeInit(MergePolicy.OneMerge merge)
 
 4790                 bool success = 
false;
 
 4806         private void  _MergeInit(MergePolicy.OneMerge merge)
 
 4811                 System.Diagnostics.Debug.Assert(TestPoint(
"startMergeInit"));
 
 4813                 System.Diagnostics.Debug.Assert(merge.registerDone);
 
 4814                 System.Diagnostics.Debug.Assert(!merge.optimize || merge.maxNumSegmentsOptimize > 0);
 
 4818                     throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot merge");
 
 4821                 if (merge.info != null)
 
 4825                 if (merge.IsAborted())
 
 4830                 SegmentInfos sourceSegments = merge.segments;
 
 4831                 int end = sourceSegments.Count;
 
 4839                 System.String lastDocStoreSegment = null;
 
 4842                 bool mergeDocStores = 
false;
 
 4843                 bool doFlushDocStore = 
false;
 
 4844                 System.String currentDocStoreSegment = docWriter.DocStoreSegment;
 
 4848                 for (
int i = 0; i < end; i++)
 
 4850                     SegmentInfo si = sourceSegments.Info(i);
 
 4853                     if (si.HasDeletions())
 
 4854                         mergeDocStores = 
true;
 
 4858                     if (- 1 == si.DocStoreOffset)
 
 4859                         mergeDocStores = 
true;
 
 4863                     System.String docStoreSegment = si.DocStoreSegment;
 
 4864                     if (docStoreSegment == null)
 
 4865                         mergeDocStores = 
true;
 
 4866                     else if (lastDocStoreSegment == null)
 
 4867                         lastDocStoreSegment = docStoreSegment;
 
 4868                     else if (!lastDocStoreSegment.Equals(docStoreSegment))
 
 4869                         mergeDocStores = 
true;
 
 4876                         next = si.DocStoreOffset + si.docCount;
 
 4877                     else if (next != si.DocStoreOffset)
 
 4878                         mergeDocStores = 
true;
 
 4880                         next = si.DocStoreOffset + si.docCount;
 
 4884                     if (lastDir != si.dir)
 
 4885                         mergeDocStores = 
true;
 
 4889                     if (si.DocStoreOffset != - 1 && currentDocStoreSegment != null && si.DocStoreSegment.Equals(currentDocStoreSegment))
 
 4891                         doFlushDocStore = 
true;
 
 4898                 if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.Equals(currentDocStoreSegment))
 
 4900                     mergeDocStores = 
true;
 
 4904                 System.String docStoreSegment2;
 
 4905                 bool docStoreIsCompoundFile;
 
 4909                     docStoreOffset = - 1;
 
 4910                     docStoreSegment2 = null;
 
 4911                     docStoreIsCompoundFile = 
false;
 
 4915                     SegmentInfo si = sourceSegments.Info(0);
 
 4916                     docStoreOffset = si.DocStoreOffset;
 
 4917                     docStoreSegment2 = si.DocStoreSegment;
 
 4918                     docStoreIsCompoundFile = si.DocStoreIsCompoundFile;
 
 4921                 if (mergeDocStores && doFlushDocStore)
 
 4931                     if (infoStream != null)
 
 4932                         Message(
"now flush at merge");
 
 4933                     DoFlush(
true, 
false);
 
 4936                 merge.mergeDocStores = mergeDocStores;
 
 4941                 merge.info = 
new SegmentInfo(NewSegmentName(), 0, directory, 
false, 
true, docStoreOffset, docStoreSegment2, docStoreIsCompoundFile, 
false);
 
 4944                 IDictionary<string, string> details = 
new Dictionary<string, string>();
 
 4945                 details[
"optimize"] = merge.optimize + 
"";
 
 4946                 details[
"mergeFactor"] = end + 
"";
 
 4947                 details[
"mergeDocStores"] = mergeDocStores + 
"";
 
 4948                 SetDiagnostics(merge.info, 
"merge", details);
 
 4954                 mergingSegments.Add(merge.info);
 
 4958         private void  SetDiagnostics(SegmentInfo info, System.String source)
 
 4960             SetDiagnostics(info, source, null);
 
 4963         private void SetDiagnostics(SegmentInfo info, System.String source, IDictionary<string, string> details)
 
 4965             IDictionary<string, string> diagnostics = 
new Dictionary<string,string>();
 
 4966             diagnostics[
"source"] = source;
 
 4967             diagnostics[
"lucene.version"] = 
Constants.LUCENE_VERSION;
 
 4968             diagnostics[
"os"] = 
Constants.OS_NAME + 
"";
 
 4969             diagnostics[
"os.arch"] = 
Constants.OS_ARCH + 
"";
 
 4970             diagnostics[
"os.version"] = 
Constants.OS_VERSION + 
"";
 
 4971             diagnostics[
"java.version"] = 
Constants.JAVA_VERSION + 
"";
 
 4972             diagnostics[
"java.vendor"] = 
Constants.JAVA_VENDOR + 
"";
 
 4973             if (details != null)
 
 4977                 foreach (
string key 
in details.Keys)
 
 4979                     diagnostics[key] = details[key];
 
 4982             info.Diagnostics = diagnostics;
 
 4988         internal void  MergeFinish(MergePolicy.OneMerge merge)
 
 4995                 System.Threading.Monitor.PulseAll(
this);
 
 4999                 if (merge.registerDone)
 
 5001                     SegmentInfos sourceSegments = merge.segments;
 
 5002                     int end = sourceSegments.Count;
 
 5003                     for (
int i = 0; i < end; i++)
 
 5004                         mergingSegments.Remove(sourceSegments.Info(i));
 
 5005                     if(merge.info != null)
 
 5006                         mergingSegments.Remove(merge.info);
 
 5007                     merge.registerDone = 
false;
 
 5010                 runningMerges.Remove(merge);
 
 5014         private void SetMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge)
 
 5018                 string mergeDocStoreSegment = merge.info.DocStoreSegment;
 
 5019                 if (mergeDocStoreSegment != null && !merge.info.DocStoreIsCompoundFile)
 
 5021                     int size = segmentInfos.Count;
 
 5022                     for (
int i = 0; i < size; i++)
 
 5024                         SegmentInfo info = segmentInfos.Info(i);
 
 5025                         string docStoreSegment = info.DocStoreSegment;
 
 5026                         if (docStoreSegment != null &&
 
 5027                             docStoreSegment.Equals(mergeDocStoreSegment) &&
 
 5028                             info.DocStoreIsCompoundFile)
 
 5030                             merge.info.DocStoreIsCompoundFile = 
true;
 
 5038         private void CloseMergeReaders(MergePolicy.OneMerge merge, 
bool suppressExceptions)
 
 5042                 int numSegments = merge.segments.Count;
 
 5043                 if (suppressExceptions)
 
 5047                     for (
int i = 0; i < numSegments; i++)
 
 5049                         if (merge.readers[i] != null)
 
 5053                                 readerPool.Release(merge.readers[i], 
false);
 
 5058                             merge.readers[i] = null;
 
 5061                         if (merge.readersClone[i] != null)
 
 5065                                 merge.readersClone[i].Close();
 
 5072                             System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0); 
 
 5073                             merge.readersClone[i] = null;
 
 5079                     for (
int i = 0; i < numSegments; i++)
 
 5081                         if (merge.readers[i] != null)
 
 5083                             readerPool.Release(merge.readers[i], 
true);
 
 5084                             merge.readers[i] = null;
 
 5087                         if (merge.readersClone[i] != null)
 
 5089                             merge.readersClone[i].Close();
 
 5091                             System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0);
 
 5092                             merge.readersClone[i] = null;
 
 5104         private int MergeMiddle(MergePolicy.OneMerge merge)
 
 5107             merge.CheckAborted(directory);
 
 5109             System.String mergedName = merge.info.name;
 
 5111             SegmentMerger merger = null;
 
 5113             int mergedDocCount = 0;
 
 5115             SegmentInfos sourceSegments = merge.segments;
 
 5116             int numSegments = sourceSegments.Count;
 
 5118             if (infoStream != null)
 
 5119                 Message(
"merging " + merge.SegString(directory));
 
 5121             merger = 
new SegmentMerger(
this, mergedName, merge);
 
 5123             merge.readers = 
new SegmentReader[numSegments];
 
 5124             merge.readersClone = 
new SegmentReader[numSegments];
 
 5126             bool mergeDocStores = 
false;
 
 5128             String currentDocStoreSegment;
 
 5130                 currentDocStoreSegment = docWriter.DocStoreSegment;
 
 5132             bool currentDSSMerged = 
false;
 
 5136             bool success = 
false;
 
 5139                 int totDocCount = 0;
 
 5141                 for (
int i = 0; i < numSegments; i++)
 
 5144                     SegmentInfo info = sourceSegments.Info(i);
 
 5148                     SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, -1);
 
 5153                     SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(
true);
 
 5156                     if (clone.HasDeletions)
 
 5158                         mergeDocStores = 
true;
 
 5161                     if (info.DocStoreOffset != -1 && currentDocStoreSegment != null)
 
 5163                         currentDSSMerged |= currentDocStoreSegment.Equals(info.DocStoreSegment);
 
 5166                     totDocCount += clone.NumDocs();
 
 5169                 if (infoStream != null)
 
 5171                     Message(
"merge: total " + totDocCount + 
" docs");
 
 5174                 merge.CheckAborted(directory);
 
 5178                 if (mergeDocStores && !merge.mergeDocStores)
 
 5180                     merge.mergeDocStores = 
true;
 
 5183                         if (currentDSSMerged)
 
 5185                             if (infoStream != null)
 
 5187                                 Message(
"now flush at mergeMiddle");
 
 5189                             DoFlush(
true, 
false);
 
 5193                     for (
int i = 0; i < numSegments; i++)
 
 5195                         merge.readersClone[i].OpenDocStores();
 
 5199                     merge.info.SetDocStore(-1, null, 
false);
 
 5204                 mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);
 
 5206                 System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);
 
 5208                 if (merge.useCompoundFile)
 
 5212                     string compoundFileName = IndexFileNames.SegmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);
 
 5216                         if (infoStream != null)
 
 5218                             Message(
"create compound file " + compoundFileName);
 
 5220                         merger.CreateCompoundFile(compoundFileName);
 
 5223                     catch (System.IO.IOException ioe)
 
 5227                             if (merge.IsAborted())
 
 5235                                 HandleMergeException(ioe, merge);
 
 5241                         HandleMergeException(t, merge);
 
 5247                             if (infoStream != null)
 
 5249                                 Message(
"hit exception creating compound file during merge");
 
 5254                                 deleter.DeleteFile(compoundFileName);
 
 5255                                 deleter.DeleteNewFiles(merger.GetMergedFiles());
 
 5267                         deleter.DeleteNewFiles(merger.GetMergedFiles());
 
 5269                         if (merge.IsAborted())
 
 5271                             if (infoStream != null)
 
 5273                                 Message(
"abort merge after building CFS");
 
 5275                             deleter.DeleteFile(compoundFileName);
 
 5280                     merge.info.SetUseCompoundFile(
true);
 
 5283                 int termsIndexDivisor;
 
 5289                 bool canWarm = merge.info.DocStoreSegment == null || currentDocStoreSegment == null || !merge.info.DocStoreSegment.Equals(currentDocStoreSegment);
 
 5291                 if (poolReaders && mergedSegmentWarmer != null && canWarm)
 
 5296                     termsIndexDivisor = readerTermsIndexDivisor;
 
 5297                     loadDocStores = 
true;
 
 5301                     termsIndexDivisor = -1;
 
 5302                     loadDocStores = 
false;
 
 5309                 SegmentReader mergedReader = readerPool.Get(merge.info, loadDocStores, 
BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
 
 5312                     if (poolReaders && mergedSegmentWarmer != null)
 
 5314                         mergedSegmentWarmer.Warm(mergedReader);
 
 5316                     if (!CommitMerge(merge, merger, mergedDocCount, mergedReader))
 
 5326                         readerPool.Release(mergedReader);
 
 5338                     CloseMergeReaders(merge, 
true);
 
 5342             return mergedDocCount;
 
 5345         internal virtual void  AddMergeException(MergePolicy.OneMerge merge)
 
 5349                 System.Diagnostics.Debug.Assert(merge.GetException() != null);
 
 5350                 if (!mergeExceptions.Contains(merge) && mergeGen == merge.mergeGen)
 
 5351                     mergeExceptions.Add(merge);
 
 5356         private bool ApplyDeletes()
 
 5360                 System.Diagnostics.Debug.Assert(TestPoint(
"startApplyDeletes"));
 
 5361                 flushDeletesCount++;
 
 5363                 bool success = 
false;
 
 5367                     changed = docWriter.ApplyDeletes(segmentInfos);
 
 5372                     if (!success && infoStream != null)
 
 5374                         Message(
"hit exception flushing deletes");
 
 5385         internal int GetBufferedDeleteTermsSize()
 
 5389                 return docWriter.GetBufferedDeleteTerms().Count;
 
 5394         internal int GetNumBufferedDeleteTerms()
 
 5398                 return docWriter.GetNumBufferedDeleteTerms();
 
 5405             return segmentInfos.Count > 0 ? segmentInfos.Info(segmentInfos.Count - 1) : null;
 
 5408         public virtual System.String SegString()
 
 5412                 return SegString(segmentInfos);
 
 5420                 System.Text.StringBuilder buffer = 
new System.Text.StringBuilder();
 
 5421                 int count = infos.Count;
 
 5422                 for (
int i = 0; i < count; i++)
 
 5428                     SegmentInfo info = infos.
Info(i);
 
 5429                     buffer.Append(info.SegString(directory));
 
 5430                     if (info.dir != directory)
 
 5431                         buffer.Append(
"**");
 
 5438         private HashSet<string> synced = 
new HashSet<string>();
 
 5441         private HashSet<string> syncing = 
new HashSet<string>();
 
 5443         private bool StartSync(System.String fileName, ICollection<string> pending)
 
 5447                 if (!synced.Contains(fileName))
 
 5449                     if (!syncing.Contains(fileName))
 
 5451                         syncing.Add(fileName);
 
 5456                         pending.Add(fileName);
 
 5465         private void  FinishSync(System.String fileName, 
bool success)
 
 5469                 System.Diagnostics.Debug.Assert(syncing.Contains(fileName));
 
 5470                 syncing.Remove(fileName);
 
 5472                     synced.Add(fileName);
 
 5473                 System.Threading.Monitor.PulseAll(synced);
 
 5478         private bool WaitForAllSynced(ICollection<System.String> syncing)
 
 5482                 IEnumerator<string> it = syncing.GetEnumerator();
 
 5483                 while (it.MoveNext())
 
 5485                     System.String fileName = it.Current;
 
 5486                     while (!synced.Contains(fileName))
 
 5488                         if (!syncing.Contains(fileName))
 
 5493                             System.Threading.Monitor.Wait(synced);
 
 5501         private void  DoWait()
 
 5511                 System.Threading.Monitor.Wait(
this, TimeSpan.FromMilliseconds(1000));
 
 5522         private void StartCommit(
long sizeInBytes, IDictionary<string, string> commitUserData)
 
 5525             System.Diagnostics.Debug.Assert(TestPoint(
"startStartCommit"));
 
 5532                 throw new System.SystemException(
"this writer hit an OutOfMemoryError; cannot commit");
 
 5538                 if (infoStream != null)
 
 5539                     Message(
"startCommit(): start sizeInBytes=" + sizeInBytes);
 
 5541                 SegmentInfos toSync = null;
 
 5549                     BlockAddIndexes(
false);
 
 5553                     System.Diagnostics.Debug.Assert(!HasExternalSegments());
 
 5558                         System.Diagnostics.Debug.Assert(lastCommitChangeCount <= changeCount);
 
 5559                         myChangeCount = changeCount;
 
 5561                         if (changeCount == lastCommitChangeCount)
 
 5563                             if (infoStream != null)
 
 5564                                 Message(
"  skip startCommit(): no changes pending");
 
 5574                         if (infoStream != null)
 
 5575                             Message(
"startCommit index=" + SegString(segmentInfos) + 
" changeCount=" + changeCount);
 
 5577                         readerPool.Commit();
 
 5585                         toSync = (SegmentInfos) segmentInfos.Clone();
 
 5586                         string dss = docWriter.DocStoreSegment;
 
 5591                                 String dss2 = toSync.Info(toSync.Count - 1).DocStoreSegment;
 
 5592                                 if (dss2 == null || !dss2.Equals(dss))
 
 5596                                 toSync.RemoveAt(toSync.Count - 1);
 
 5601                         if (commitUserData != null)
 
 5602                             toSync.UserData = commitUserData;
 
 5604                         deleter.IncRef(toSync, 
false);
 
 5606                         ICollection<string> files = toSync.Files(directory, 
false);
 
 5607                         foreach(
string fileName 
in files)
 
 5609                             System.Diagnostics.Debug.Assert(directory.FileExists(fileName), 
"file " + fileName + 
" does not exist");
 
 5615                             System.Diagnostics.Debug.Assert(deleter.Exists(fileName));
 
 5624                 System.Diagnostics.Debug.Assert(TestPoint(
"midStartCommit"));
 
 5626                 bool setPending = 
false;
 
 5633                         ICollection<string> pending = 
new List<string>();
 
 5635                         IEnumerator<string> it = toSync.Files(directory, 
false).GetEnumerator();
 
 5636                         while (it.MoveNext())
 
 5638                             string fileName = it.Current;
 
 5639                             if (StartSync(fileName, pending))
 
 5641                                 bool success = 
false;
 
 5646                                     System.Diagnostics.Debug.Assert(directory.FileExists(fileName), 
"file '" + fileName + 
"' does not exist dir=" + directory);
 
 5647                                     if (infoStream != null)
 
 5648                                         Message(
"now sync " + fileName);
 
 5649                                     directory.Sync(fileName);
 
 5654                                     FinishSync(fileName, success);
 
 5665                         if (WaitForAllSynced(pending))
 
 5669                     System.Diagnostics.Debug.Assert(TestPoint(
"midStartCommit2"));
 
 5680                             if (myChangeCount <= lastCommitChangeCount)
 
 5682                                 if (infoStream != null)
 
 5684                                     Message(
"sync superseded by newer infos");
 
 5688                             else if (pendingCommit == null)
 
 5692                                 if (segmentInfos.Generation > toSync.Generation)
 
 5693                                     toSync.UpdateGeneration(segmentInfos);
 
 5695                                 bool success = 
false;
 
 5704                                         toSync.PrepareCommit(directory);
 
 5712                                         segmentInfos.UpdateGeneration(toSync);
 
 5715                                     System.Diagnostics.Debug.Assert(pendingCommit == null);
 
 5717                                     pendingCommit = toSync;
 
 5718                                     pendingCommitChangeCount = (uint) myChangeCount;
 
 5723                                     if (!success && infoStream != null)
 
 5724                                         Message(
"hit exception committing segments file");
 
 5736                     if (infoStream != null)
 
 5737                         Message(
"done all syncs");
 
 5739                     System.Diagnostics.Debug.Assert(TestPoint(
"midStartCommitSuccess"));
 
 5746                             deleter.DecRef(toSync);
 
 5750             catch (System.OutOfMemoryException oom)
 
 5752                 HandleOOM(oom, 
"startCommit");
 
 5754             System.Diagnostics.Debug.Assert(TestPoint(
"finishStartCommit"));
 
 5765             return directory.MakeLock(WRITE_LOCK_NAME).IsLocked();
 
 5787             private System.String name;
 
 5813                 get { 
return limit; }
 
 5816             public override System.String ToString()
 
 5818                 return name + 
":" + limit;
 
 5831                 LIMITED = 
new MaxFieldLength(
"LIMITED", Lucene.Net.Index.IndexWriter.DEFAULT_MAX_FIELD_LENGTH);
 
 5861             set { mergedSegmentWarmer = value; }
 
 5862             get { 
return mergedSegmentWarmer; }
 
 5865         private void  HandleOOM(System.OutOfMemoryException oom, System.String location)
 
 5867             if (infoStream != null)
 
 5869                 Message(
"hit OutOfMemoryError inside " + location);
 
 5887         public  virtual bool TestPoint(System.String name)
 
 5896                 if (!infos.
Equals(segmentInfos))
 
 5902                 else if (infos.
Generation != segmentInfos.Generation)
 
 5910                     return !docWriter.AnyChanges;
 
 5915         internal virtual bool IsClosed()
 
 5925             MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;