Lucene.Net  3.0.3
Lucene.Net is a port of the Lucene search engine library, written in C# and targeted at .NET runtime users.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Pages
SegmentReader.cs
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one or more
3  * contributor license agreements. See the NOTICE file distributed with
4  * this work for additional information regarding copyright ownership.
5  * The ASF licenses this file to You under the Apache License, Version 2.0
6  * (the "License"); you may not use this file except in compliance with
7  * the License. You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 using System;
19 using System.Linq;
20 using Lucene.Net.Support;
21 using Lucene.Net.Util;
22 using Document = Lucene.Net.Documents.Document;
23 using FieldSelector = Lucene.Net.Documents.FieldSelector;
24 using BufferedIndexInput = Lucene.Net.Store.BufferedIndexInput;
25 using Directory = Lucene.Net.Store.Directory;
26 using IndexInput = Lucene.Net.Store.IndexInput;
27 using IndexOutput = Lucene.Net.Store.IndexOutput;
28 using BitVector = Lucene.Net.Util.BitVector;
29 using DefaultSimilarity = Lucene.Net.Search.DefaultSimilarity;
30 
31 namespace Lucene.Net.Index
32 {
33 
34  /// <version> $Id
35  /// </version>
36  /// <summary> <p/><b>NOTE:</b> This API is new and still experimental
37  /// (subject to change suddenly in the next release)<p/>
38  /// </summary>
39  public class SegmentReader : IndexReader
40  {
41  public SegmentReader()
42  {
43  InitBlock();
44  }
45  private void InitBlock()
46  {
47  fieldsReaderLocal = new FieldsReaderLocal(this);
48  }
49  protected internal bool readOnly;
50 
51  private SegmentInfo si;
52  private int readBufferSize;
53 
54  internal CloseableThreadLocal<FieldsReader> fieldsReaderLocal;
56 
57  internal BitVector deletedDocs = null;
58  internal Ref deletedDocsRef = null;
59  private bool deletedDocsDirty = false;
60  private bool normsDirty = false;
61  private int pendingDeleteCount;
62 
63  private bool rollbackHasChanges = false;
64  private bool rollbackDeletedDocsDirty = false;
65  private bool rollbackNormsDirty = false;
66  private SegmentInfo rollbackSegmentInfo;
67  private int rollbackPendingDeleteCount;
68 
69  // optionally used for the .nrm file shared by multiple norms
70  private IndexInput singleNormStream;
71  private Ref singleNormRef;
72 
73  internal CoreReaders core;
74 
75  // Holds core readers that are shared (unchanged) when
76  // SegmentReader is cloned or reopened
77  public /*internal*/ sealed class CoreReaders
78  {
79 
80  // Counts how many other reader share the core objects
81  // (freqStream, proxStream, tis, etc.) of this reader;
82  // when coreRef drops to 0, these core objects may be
83  // closed. A given insance of SegmentReader may be
84  // closed, even those it shares core objects with other
85  // SegmentReaders:
86  private readonly Ref ref_Renamed = new Ref();
87 
88  internal System.String segment;
89  internal FieldInfos fieldInfos;
90  internal IndexInput freqStream;
91  internal IndexInput proxStream;
92  internal TermInfosReader tisNoIndex;
93 
94  internal Directory dir;
95  internal Directory cfsDir;
96  internal int readBufferSize;
97  internal int termsIndexDivisor;
98 
99  internal SegmentReader origInstance;
100 
101  internal TermInfosReader tis;
102  internal FieldsReader fieldsReaderOrig;
103  internal TermVectorsReader termVectorsReaderOrig;
104  internal CompoundFileReader cfsReader;
105  internal CompoundFileReader storeCFSReader;
106 
107  internal CoreReaders(SegmentReader origInstance, Directory dir, SegmentInfo si, int readBufferSize, int termsIndexDivisor)
108  {
109  segment = si.name;
110  this.readBufferSize = readBufferSize;
111  this.dir = dir;
112 
113  bool success = false;
114 
115  try
116  {
117  Directory dir0 = dir;
118  if (si.GetUseCompoundFile())
119  {
120  cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
121  dir0 = cfsReader;
122  }
123  cfsDir = dir0;
124 
125  fieldInfos = new FieldInfos(cfsDir, segment + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
126 
127  this.termsIndexDivisor = termsIndexDivisor;
128  var reader = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize, termsIndexDivisor);
129  if (termsIndexDivisor == - 1)
130  {
131  tisNoIndex = reader;
132  }
133  else
134  {
135  tis = reader;
136  tisNoIndex = null;
137  }
138 
139  // make sure that all index files have been read or are kept open
140  // so that if an index update removes them we'll still have them
141  freqStream = cfsDir.OpenInput(segment + "." + IndexFileNames.FREQ_EXTENSION, readBufferSize);
142 
143  proxStream = fieldInfos.HasProx() ? cfsDir.OpenInput(segment + "." + IndexFileNames.PROX_EXTENSION, readBufferSize) : null;
144  success = true;
145  }
146  finally
147  {
148  if (!success)
149  {
150  DecRef();
151  }
152  }
153 
154 
155  // Must assign this at the end -- if we hit an
156  // exception above core, we don't want to attempt to
157  // purge the FieldCache (will hit NPE because core is
158  // not assigned yet).
159  this.origInstance = origInstance;
160  }
161 
162  internal TermVectorsReader GetTermVectorsReaderOrig()
163  {
164  lock (this)
165  {
166  return termVectorsReaderOrig;
167  }
168  }
169 
170  internal FieldsReader GetFieldsReaderOrig()
171  {
172  lock (this)
173  {
174  return fieldsReaderOrig;
175  }
176  }
177 
178  internal void IncRef()
179  {
180  lock (this)
181  {
182  ref_Renamed.IncRef();
183  }
184  }
185 
186  internal Directory GetCFSReader()
187  {
188  lock (this)
189  {
190  return cfsReader;
191  }
192  }
193 
194  internal TermInfosReader GetTermsReader()
195  {
196  lock (this)
197  {
198  if (tis != null)
199  {
200  return tis;
201  }
202  else
203  {
204  return tisNoIndex;
205  }
206  }
207  }
208 
209  internal bool TermsIndexIsLoaded()
210  {
211  lock (this)
212  {
213  return tis != null;
214  }
215  }
216 
217  // NOTE: only called from IndexWriter when a near
218  // real-time reader is opened, or applyDeletes is run,
219  // sharing a segment that's still being merged. This
220  // method is not fully thread safe, and relies on the
221  // synchronization in IndexWriter
222  internal void LoadTermsIndex(SegmentInfo si, int termsIndexDivisor)
223  {
224  lock (this)
225  {
226  if (tis == null)
227  {
228  Directory dir0;
229  if (si.GetUseCompoundFile())
230  {
231  // In some cases, we were originally opened when CFS
232  // was not used, but then we are asked to open the
233  // terms reader with index, the segment has switched
234  // to CFS
235  if (cfsReader == null)
236  {
237  cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
238  }
239  dir0 = cfsReader;
240  }
241  else
242  {
243  dir0 = dir;
244  }
245 
246  tis = new TermInfosReader(dir0, segment, fieldInfos, readBufferSize, termsIndexDivisor);
247  }
248  }
249  }
250 
251  internal void DecRef()
252  {
253  lock (this)
254  {
255 
256  if (ref_Renamed.DecRef() == 0)
257  {
258 
259  // close everything, nothing is shared anymore with other readers
260  if (tis != null)
261  {
262  tis.Dispose();
263  // null so if an app hangs on to us we still free most ram
264  tis = null;
265  }
266 
267  if (tisNoIndex != null)
268  {
269  tisNoIndex.Dispose();
270  }
271 
272  if (freqStream != null)
273  {
274  freqStream.Close();
275  }
276 
277  if (proxStream != null)
278  {
279  proxStream.Close();
280  }
281 
282  if (termVectorsReaderOrig != null)
283  {
284  termVectorsReaderOrig.Dispose();
285  }
286 
287  if (fieldsReaderOrig != null)
288  {
289  fieldsReaderOrig.Dispose();
290  }
291 
292  if (cfsReader != null)
293  {
294  cfsReader.Close();
295  }
296 
297  if (storeCFSReader != null)
298  {
299  storeCFSReader.Close();
300  }
301 
302  // Force FieldCache to evict our entries at this point
303  if (origInstance != null)
304  {
305  Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(origInstance);
306  }
307  }
308  }
309  }
310 
311  internal void OpenDocStores(SegmentInfo si)
312  {
313  lock (this)
314  {
315 
316  System.Diagnostics.Debug.Assert(si.name.Equals(segment));
317 
318  if (fieldsReaderOrig == null)
319  {
320  Directory storeDir;
321  if (si.DocStoreOffset != - 1)
322  {
323  if (si.DocStoreIsCompoundFile)
324  {
325  System.Diagnostics.Debug.Assert(storeCFSReader == null);
326  storeCFSReader = new CompoundFileReader(dir, si.DocStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION, readBufferSize);
327  storeDir = storeCFSReader;
328  System.Diagnostics.Debug.Assert(storeDir != null);
329  }
330  else
331  {
332  storeDir = dir;
333  System.Diagnostics.Debug.Assert(storeDir != null);
334  }
335  }
336  else if (si.GetUseCompoundFile())
337  {
338  // In some cases, we were originally opened when CFS
339  // was not used, but then we are asked to open doc
340  // stores after the segment has switched to CFS
341  if (cfsReader == null)
342  {
343  cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
344  }
345  storeDir = cfsReader;
346  System.Diagnostics.Debug.Assert(storeDir != null);
347  }
348  else
349  {
350  storeDir = dir;
351  System.Diagnostics.Debug.Assert(storeDir != null);
352  }
353 
354  string storesSegment = si.DocStoreOffset != - 1 ? si.DocStoreSegment : segment;
355 
356  fieldsReaderOrig = new FieldsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
357 
358  // Verify two sources of "maxDoc" agree:
359  if (si.DocStoreOffset == - 1 && fieldsReaderOrig.Size() != si.docCount)
360  {
361  throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.Size() + " but segmentInfo shows " + si.docCount);
362  }
363 
364  if (fieldInfos.HasVectors())
365  {
366  // open term vector files only as needed
367  termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
368  }
369  }
370  }
371  }
372 
373  public FieldInfos fieldInfos_ForNUnit
374  {
375  get { return fieldInfos; }
376  }
377  }
378 
379  /// <summary> Sets the initial value </summary>
380  private class FieldsReaderLocal : CloseableThreadLocal<FieldsReader>
381  {
382  public FieldsReaderLocal(SegmentReader enclosingInstance)
383  {
384  InitBlock(enclosingInstance);
385  }
386  private void InitBlock(SegmentReader enclosingInstance)
387  {
388  this.enclosingInstance = enclosingInstance;
389  }
390  private SegmentReader enclosingInstance;
391  public SegmentReader Enclosing_Instance
392  {
393  get
394  {
395  return enclosingInstance;
396  }
397 
398  }
399  public /*protected internal*/ override FieldsReader InitialValue()
400  {
401  return (FieldsReader) Enclosing_Instance.core.GetFieldsReaderOrig().Clone();
402  }
403  }
404 
405  public /*internal*/ class Ref
406  {
407  private int refCount = 1;
408 
409  public override System.String ToString()
410  {
411  return "refcount: " + refCount;
412  }
413 
414  public virtual int RefCount()
415  {
416  lock (this)
417  {
418  return refCount;
419  }
420  }
421 
422  public virtual int IncRef()
423  {
424  lock (this)
425  {
426  System.Diagnostics.Debug.Assert(refCount > 0);
427  refCount++;
428  return refCount;
429  }
430  }
431 
432  public virtual int DecRef()
433  {
434  lock (this)
435  {
436  System.Diagnostics.Debug.Assert(refCount > 0);
437  refCount--;
438  return refCount;
439  }
440  }
441  }
442 
443  /// <summary> Byte[] referencing is used because a new norm object needs
444  /// to be created for each clone, and the byte array is all
445  /// that is needed for sharing between cloned readers. The
446  /// current norm referencing is for sharing between readers
447  /// whereas the byte[] referencing is for copy on write which
448  /// is independent of reader references (i.e. incRef, decRef).
449  /// </summary>
450 
451  public /*internal*/ sealed class Norm : System.ICloneable
452  {
453  private void InitBlock(SegmentReader enclosingInstance)
454  {
455  this.enclosingInstance = enclosingInstance;
456  }
457  private SegmentReader enclosingInstance;
458  public SegmentReader Enclosing_Instance
459  {
460  get
461  {
462  return enclosingInstance;
463  }
464 
465  }
466  internal /*private*/ int refCount = 1;
467 
468  // If this instance is a clone, the originalNorm
469  // references the Norm that has a real open IndexInput:
470  private Norm origNorm;
471 
472  private IndexInput in_Renamed;
473  private readonly long normSeek;
474 
475  // null until bytes is set
476  private Ref bytesRef;
477  internal /*private*/ byte[] bytes;
478  internal /*private*/ bool dirty;
479  internal /*private*/ int number;
480  internal /*private*/ bool rollbackDirty;
481 
482  public Norm(SegmentReader enclosingInstance, IndexInput in_Renamed, int number, long normSeek)
483  {
484  InitBlock(enclosingInstance);
485  this.in_Renamed = in_Renamed;
486  this.number = number;
487  this.normSeek = normSeek;
488  }
489 
490  public void IncRef()
491  {
492  lock (this)
493  {
494  System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
495  refCount++;
496  }
497  }
498 
499  private void CloseInput()
500  {
501  if (in_Renamed != null)
502  {
503  if (in_Renamed != Enclosing_Instance.singleNormStream)
504  {
505  // It's private to us -- just close it
506  in_Renamed.Dispose();
507  }
508  else
509  {
510  // We are sharing this with others -- decRef and
511  // maybe close the shared norm stream
512  if (Enclosing_Instance.singleNormRef.DecRef() == 0)
513  {
514  Enclosing_Instance.singleNormStream.Dispose();
515  Enclosing_Instance.singleNormStream = null;
516  }
517  }
518 
519  in_Renamed = null;
520  }
521  }
522 
523  public void DecRef()
524  {
525  lock (this)
526  {
527  System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
528 
529  if (--refCount == 0)
530  {
531  if (origNorm != null)
532  {
533  origNorm.DecRef();
534  origNorm = null;
535  }
536  else
537  {
538  CloseInput();
539  }
540 
541  if (bytes != null)
542  {
543  System.Diagnostics.Debug.Assert(bytesRef != null);
544  bytesRef.DecRef();
545  bytes = null;
546  bytesRef = null;
547  }
548  else
549  {
550  System.Diagnostics.Debug.Assert(bytesRef == null);
551  }
552  }
553  }
554  }
555 
556  // Load bytes but do not cache them if they were not
557  // already cached
558  public void Bytes(byte[] bytesOut, int offset, int len)
559  {
560  lock (this)
561  {
562  System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
563  if (bytes != null)
564  {
565  // Already cached -- copy from cache:
566  System.Diagnostics.Debug.Assert(len <= Enclosing_Instance.MaxDoc);
567  Array.Copy(bytes, 0, bytesOut, offset, len);
568  }
569  else
570  {
571  // Not cached
572  if (origNorm != null)
573  {
574  // Ask origNorm to load
575  origNorm.Bytes(bytesOut, offset, len);
576  }
577  else
578  {
579  // We are orig -- read ourselves from disk:
580  lock (in_Renamed)
581  {
582  in_Renamed.Seek(normSeek);
583  in_Renamed.ReadBytes(bytesOut, offset, len, false);
584  }
585  }
586  }
587  }
588  }
589 
590  // Load & cache full bytes array. Returns bytes.
591  public byte[] Bytes()
592  {
593  lock (this)
594  {
595  System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
596  if (bytes == null)
597  {
598  // value not yet read
599  System.Diagnostics.Debug.Assert(bytesRef == null);
600  if (origNorm != null)
601  {
602  // Ask origNorm to load so that for a series of
603  // reopened readers we share a single read-only
604  // byte[]
605  bytes = origNorm.Bytes();
606  bytesRef = origNorm.bytesRef;
607  bytesRef.IncRef();
608 
609  // Once we've loaded the bytes we no longer need
610  // origNorm:
611  origNorm.DecRef();
612  origNorm = null;
613  }
614  else
615  {
616  // We are the origNorm, so load the bytes for real
617  // ourself:
618  int count = Enclosing_Instance.MaxDoc;
619  bytes = new byte[count];
620 
621  // Since we are orig, in must not be null
622  System.Diagnostics.Debug.Assert(in_Renamed != null);
623 
624  // Read from disk.
625  lock (in_Renamed)
626  {
627  in_Renamed.Seek(normSeek);
628  in_Renamed.ReadBytes(bytes, 0, count, false);
629  }
630 
631  bytesRef = new Ref();
632  CloseInput();
633  }
634  }
635 
636  return bytes;
637  }
638  }
639 
640  // Only for testing
641  public /*internal*/ Ref BytesRef()
642  {
643  return bytesRef;
644  }
645 
646  // Called if we intend to change a norm value. We make a
647  // private copy of bytes if it's shared with others:
648  public byte[] CopyOnWrite()
649  {
650  lock (this)
651  {
652  System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
653  Bytes();
654  System.Diagnostics.Debug.Assert(bytes != null);
655  System.Diagnostics.Debug.Assert(bytesRef != null);
656  if (bytesRef.RefCount() > 1)
657  {
658  // I cannot be the origNorm for another norm
659  // instance if I'm being changed. Ie, only the
660  // "head Norm" can be changed:
661  System.Diagnostics.Debug.Assert(refCount == 1);
662  Ref oldRef = bytesRef;
663  bytes = Enclosing_Instance.CloneNormBytes(bytes);
664  bytesRef = new Ref();
665  oldRef.DecRef();
666  }
667  dirty = true;
668  return bytes;
669  }
670  }
671 
672  // Returns a copy of this Norm instance that shares
673  // IndexInput & bytes with the original one
674  public System.Object Clone()
675  {
676  lock (this) //LUCENENET-375
677  {
678  System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0));
679 
680  Norm clone;
681  try
682  {
683  clone = (Norm)base.MemberwiseClone();
684  }
685  catch (System.Exception cnse)
686  {
687  // Cannot happen
688  throw new System.SystemException("unexpected CloneNotSupportedException", cnse);
689  }
690  clone.refCount = 1;
691 
692  if (bytes != null)
693  {
694  System.Diagnostics.Debug.Assert(bytesRef != null);
695  System.Diagnostics.Debug.Assert(origNorm == null);
696 
697  // Clone holds a reference to my bytes:
698  clone.bytesRef.IncRef();
699  }
700  else
701  {
702  System.Diagnostics.Debug.Assert(bytesRef == null);
703  if (origNorm == null)
704  {
705  // I become the origNorm for the clone:
706  clone.origNorm = this;
707  }
708  clone.origNorm.IncRef();
709  }
710 
711  // Only the origNorm will actually readBytes from in:
712  clone.in_Renamed = null;
713 
714  return clone;
715  }
716  }
717 
718  // Flush all pending changes to the next generation
719  // separate norms file.
720  public void ReWrite(SegmentInfo si)
721  {
722  System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0), "refCount=" + refCount + " origNorm=" + origNorm);
723 
724  // NOTE: norms are re-written in regular directory, not cfs
725  si.AdvanceNormGen(this.number);
726  string normFileName = si.GetNormFileName(this.number);
727  IndexOutput @out = enclosingInstance.Directory().CreateOutput(normFileName);
728  bool success = false;
729  try
730  {
731  try {
732  @out.WriteBytes(bytes, enclosingInstance.MaxDoc);
733  } finally {
734  @out.Close();
735  }
736  success = true;
737  }
738  finally
739  {
740  if (!success)
741  {
742  try
743  {
744  enclosingInstance.Directory().DeleteFile(normFileName);
745  }
746  catch (Exception)
747  {
748  // suppress this so we keep throwing the
749  // original exception
750  }
751  }
752  }
753  this.dirty = false;
754  }
755  }
756 
757  internal System.Collections.Generic.IDictionary<string, Norm> norms = new HashMap<string, Norm>();
758 
759  /// <throws> CorruptIndexException if the index is corrupt </throws>
760  /// <throws> IOException if there is a low-level IO error </throws>
761  public static SegmentReader Get(bool readOnly, SegmentInfo si, int termInfosIndexDivisor)
762  {
763  return Get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
764  }
765 
766  /// <throws> CorruptIndexException if the index is corrupt </throws>
767  /// <throws> IOException if there is a low-level IO error </throws>
768  public static SegmentReader Get(bool readOnly, Directory dir, SegmentInfo si, int readBufferSize, bool doOpenStores, int termInfosIndexDivisor)
769  {
770  SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
771  instance.readOnly = readOnly;
772  instance.si = si;
773  instance.readBufferSize = readBufferSize;
774 
775  bool success = false;
776 
777  try
778  {
779  instance.core = new CoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
780  if (doOpenStores)
781  {
782  instance.core.OpenDocStores(si);
783  }
784  instance.LoadDeletedDocs();
785  instance.OpenNorms(instance.core.cfsDir, readBufferSize);
786  success = true;
787  }
788  finally
789  {
790 
791  // With lock-less commits, it's entirely possible (and
792  // fine) to hit a FileNotFound exception above. In
793  // this case, we want to explicitly close any subset
794  // of things that were opened so that we don't have to
795  // wait for a GC to do so.
796  if (!success)
797  {
798  instance.DoClose();
799  }
800  }
801  return instance;
802  }
803 
804  internal virtual void OpenDocStores()
805  {
806  core.OpenDocStores(si);
807  }
808 
809  private bool CheckDeletedCounts()
810  {
811  int recomputedCount = deletedDocs.GetRecomputedCount();
812 
813  System.Diagnostics.Debug.Assert(deletedDocs.Count() == recomputedCount, "deleted count=" + deletedDocs.Count() + " vs recomputed count=" + recomputedCount);
814 
815  System.Diagnostics.Debug.Assert(si.GetDelCount() == recomputedCount, "delete count mismatch: info=" + si.GetDelCount() + " vs BitVector=" + recomputedCount);
816 
817  // Verify # deletes does not exceed maxDoc for this
818  // segment:
819  System.Diagnostics.Debug.Assert(si.GetDelCount() <= MaxDoc, "delete count mismatch: " + recomputedCount + ") exceeds max doc (" + MaxDoc + ") for segment " + si.name);
820 
821  return true;
822  }
823 
824  private void LoadDeletedDocs()
825  {
826  // NOTE: the bitvector is stored using the regular directory, not cfs
827  //if(HasDeletions(si))
828  if (si.HasDeletions())
829  {
830  deletedDocs = new BitVector(Directory(), si.GetDelFileName());
831  deletedDocsRef = new Ref();
832 
833  System.Diagnostics.Debug.Assert(CheckDeletedCounts());
834  }
835  else
836  System.Diagnostics.Debug.Assert(si.GetDelCount() == 0);
837  }
838 
839  /// <summary> Clones the norm bytes. May be overridden by subclasses. New and experimental.</summary>
840  /// <param name="bytes">Byte array to clone
841  /// </param>
842  /// <returns> New BitVector
843  /// </returns>
844  protected internal virtual byte[] CloneNormBytes(byte[] bytes)
845  {
846  var cloneBytes = new byte[bytes.Length];
847  Array.Copy(bytes, 0, cloneBytes, 0, bytes.Length);
848  return cloneBytes;
849  }
850 
851  /// <summary> Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.</summary>
852  /// <param name="bv">BitVector to clone
853  /// </param>
854  /// <returns> New BitVector
855  /// </returns>
856  protected internal virtual BitVector CloneDeletedDocs(BitVector bv)
857  {
858  return (BitVector) bv.Clone();
859  }
860 
861  public override System.Object Clone()
862  {
863  lock (this)
864  {
865  try
866  {
867  return Clone(readOnly); // Preserve current readOnly
868  }
869  catch (System.Exception ex)
870  {
871  throw new System.SystemException(ex.Message, ex);
872  }
873  }
874  }
875 
876  public override IndexReader Clone(bool openReadOnly)
877  {
878  lock (this)
879  {
880  return ReopenSegment(si, true, openReadOnly);
881  }
882  }
883 
884  internal virtual SegmentReader ReopenSegment(SegmentInfo si, bool doClone, bool openReadOnly)
885  {
886  lock (this)
887  {
888  bool deletionsUpToDate = (this.si.HasDeletions() == si.HasDeletions()) && (!si.HasDeletions() || this.si.GetDelFileName().Equals(si.GetDelFileName()));
889  bool normsUpToDate = true;
890 
891  bool[] fieldNormsChanged = new bool[core.fieldInfos.Size()];
892  int fieldCount = core.fieldInfos.Size();
893  for (int i = 0; i < fieldCount; i++)
894  {
895  if (!this.si.GetNormFileName(i).Equals(si.GetNormFileName(i)))
896  {
897  normsUpToDate = false;
898  fieldNormsChanged[i] = true;
899  }
900  }
901 
902  // if we're cloning we need to run through the reopenSegment logic
903  // also if both old and new readers aren't readonly, we clone to avoid sharing modifications
904  if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly)
905  {
906  return this;
907  }
908 
909  // When cloning, the incoming SegmentInfos should not
910  // have any changes in it:
911  System.Diagnostics.Debug.Assert(!doClone ||(normsUpToDate && deletionsUpToDate));
912 
913  // clone reader
914  SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
915 
916  bool success = false;
917  try
918  {
919  core.IncRef();
920  clone.core = core;
921  clone.readOnly = openReadOnly;
922  clone.si = si;
923  clone.readBufferSize = readBufferSize;
924 
925  if (!openReadOnly && hasChanges)
926  {
927  // My pending changes transfer to the new reader
928  clone.pendingDeleteCount = pendingDeleteCount;
929  clone.deletedDocsDirty = deletedDocsDirty;
930  clone.normsDirty = normsDirty;
931  clone.hasChanges = hasChanges;
932  hasChanges = false;
933  }
934 
935  if (doClone)
936  {
937  if (deletedDocs != null)
938  {
939  deletedDocsRef.IncRef();
940  clone.deletedDocs = deletedDocs;
941  clone.deletedDocsRef = deletedDocsRef;
942  }
943  }
944  else
945  {
946  if (!deletionsUpToDate)
947  {
948  // load deleted docs
949  System.Diagnostics.Debug.Assert(clone.deletedDocs == null);
950  clone.LoadDeletedDocs();
951  }
952  else if (deletedDocs != null)
953  {
954  deletedDocsRef.IncRef();
955  clone.deletedDocs = deletedDocs;
956  clone.deletedDocsRef = deletedDocsRef;
957  }
958  }
959 
960  clone.norms = new HashMap<string, Norm>();
961 
962  // Clone norms
963  for (int i = 0; i < fieldNormsChanged.Length; i++)
964  {
965 
966  // Clone unchanged norms to the cloned reader
967  if (doClone || !fieldNormsChanged[i])
968  {
969  System.String curField = core.fieldInfos.FieldInfo(i).name;
970  Norm norm = this.norms[curField];
971  if (norm != null)
972  clone.norms[curField] = (Norm)norm.Clone();
973  }
974  }
975 
976  // If we are not cloning, then this will open anew
977  // any norms that have changed:
978  clone.OpenNorms(si.GetUseCompoundFile()?core.GetCFSReader():Directory(), readBufferSize);
979 
980  success = true;
981  }
982  finally
983  {
984  if (!success)
985  {
986  // An exception occured during reopen, we have to decRef the norms
987  // that we incRef'ed already and close singleNormsStream and FieldsReader
988  clone.DecRef();
989  }
990  }
991 
992  return clone;
993  }
994  }
995 
996  protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
997  {
998  if (hasChanges)
999  {
1000  StartCommit();
1001  bool success = false;
1002  try
1003  {
1004  CommitChanges(commitUserData);
1005  success = true;
1006  }
1007  finally
1008  {
1009  if (!success)
1010  {
1011  RollbackCommit();
1012  }
1013  }
1014  }
1015  }
1016 
1017  private void CommitChanges(System.Collections.Generic.IDictionary<string, string> commitUserData)
1018  {
1019  if (deletedDocsDirty)
1020  { // re-write deleted
1021  si.AdvanceDelGen();
1022 
1023  // We can write directly to the actual name (vs to a
1024  // .tmp & renaming it) because the file is not live
1025  // until segments file is written:
1026  string delFileName = si.GetDelFileName();
1027  bool success = false;
1028  try
1029  {
1030  deletedDocs.Write(Directory(), delFileName);
1031  success = true;
1032  }
1033  finally
1034  {
1035  if (!success)
1036  {
1037  try
1038  {
1039  Directory().DeleteFile(delFileName);
1040  }
1041  catch (Exception)
1042  {
1043  // suppress this so we keep throwing the
1044  // original exception
1045  }
1046  }
1047  }
1048 
1049  si.SetDelCount(si.GetDelCount() + pendingDeleteCount);
1050  pendingDeleteCount = 0;
1051  System.Diagnostics.Debug.Assert(deletedDocs.Count() == si.GetDelCount(), "delete count mismatch during commit: info=" + si.GetDelCount() + " vs BitVector=" + deletedDocs.Count());
1052  }
1053  else
1054  {
1055  System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
1056  }
1057 
1058  if (normsDirty)
1059  { // re-write norms
1060  si.SetNumFields(core.fieldInfos.Size());
1061  foreach (Norm norm in norms.Values)
1062  {
1063  if (norm.dirty)
1064  {
1065  norm.ReWrite(si);
1066  }
1067  }
1068  }
1069  deletedDocsDirty = false;
1070  normsDirty = false;
1071  hasChanges = false;
1072  }
1073 
1074  internal virtual FieldsReader GetFieldsReader()
1075  {
1076  return fieldsReaderLocal.Get();
1077  }
1078 
1079  protected internal override void DoClose()
1080  {
1081  termVectorsLocal.Close();
1082  fieldsReaderLocal.Close();
1083 
1084  if (deletedDocs != null)
1085  {
1086  deletedDocsRef.DecRef();
1087  // null so if an app hangs on to us we still free most ram
1088  deletedDocs = null;
1089  }
1090 
1091  foreach(Norm norm in norms.Values)
1092  {
1093  norm.DecRef();
1094  }
1095  if (core != null)
1096  {
1097  core.DecRef();
1098  }
1099  }
1100 
1101  //internal static bool HasDeletions(SegmentInfo si)
1102  //{
1103  // // Don't call ensureOpen() here (it could affect performance)
1104  // return si.HasDeletions();
1105  //}
1106 
1107  public override bool HasDeletions
1108  {
1109  get
1110  {
1111  // Don't call ensureOpen() here (it could affect performance)
1112  return deletedDocs != null;
1113  }
1114  }
1115 
1116  internal static bool UsesCompoundFile(SegmentInfo si)
1117  {
1118  return si.GetUseCompoundFile();
1119  }
1120 
1121  internal static bool HasSeparateNorms(SegmentInfo si)
1122  {
1123  return si.HasSeparateNorms();
1124  }
1125 
1126  protected internal override void DoDelete(int docNum)
1127  {
1128  if (deletedDocs == null)
1129  {
1130  deletedDocs = new BitVector(MaxDoc);
1131  deletedDocsRef = new Ref();
1132  }
1133  // there is more than 1 SegmentReader with a reference to this
1134  // deletedDocs BitVector so decRef the current deletedDocsRef,
1135  // clone the BitVector, create a new deletedDocsRef
1136  if (deletedDocsRef.RefCount() > 1)
1137  {
1138  Ref oldRef = deletedDocsRef;
1139  deletedDocs = CloneDeletedDocs(deletedDocs);
1140  deletedDocsRef = new Ref();
1141  oldRef.DecRef();
1142  }
1143  deletedDocsDirty = true;
1144  if (!deletedDocs.GetAndSet(docNum))
1145  pendingDeleteCount++;
1146  }
1147 
1148  protected internal override void DoUndeleteAll()
1149  {
1150  deletedDocsDirty = false;
1151  if (deletedDocs != null)
1152  {
1153  System.Diagnostics.Debug.Assert(deletedDocsRef != null);
1154  deletedDocsRef.DecRef();
1155  deletedDocs = null;
1156  deletedDocsRef = null;
1157  pendingDeleteCount = 0;
1158  si.ClearDelGen();
1159  si.SetDelCount(0);
1160  }
1161  else
1162  {
1163  System.Diagnostics.Debug.Assert(deletedDocsRef == null);
1164  System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
1165  }
1166  }
1167 
1168  internal virtual System.Collections.Generic.IList<string> Files()
1169  {
1170  return si.Files();
1171  }
1172 
1173  public override TermEnum Terms()
1174  {
1175  EnsureOpen();
1176  return core.GetTermsReader().Terms();
1177  }
1178 
1179  public override TermEnum Terms(Term t)
1180  {
1181  EnsureOpen();
1182  return core.GetTermsReader().Terms(t);
1183  }
1184 
1185  public /*internal*/ virtual FieldInfos FieldInfos()
1186  {
1187  return core.fieldInfos;
1188  }
1189 
1190  public override Document Document(int n, FieldSelector fieldSelector)
1191  {
1192  EnsureOpen();
1193  return GetFieldsReader().Doc(n, fieldSelector);
1194  }
1195 
1196  public override bool IsDeleted(int n)
1197  {
1198  lock (this)
1199  {
1200  return (deletedDocs != null && deletedDocs.Get(n));
1201  }
1202  }
1203 
1204  public override TermDocs TermDocs(Term term)
1205  {
1206  if (term == null)
1207  {
1208  return new AllTermDocs(this);
1209  }
1210  else
1211  {
1212  return base.TermDocs(term);
1213  }
1214  }
1215 
1216  public override TermDocs TermDocs()
1217  {
1218  EnsureOpen();
1219  return new SegmentTermDocs(this);
1220  }
1221 
1222  public override TermPositions TermPositions()
1223  {
1224  EnsureOpen();
1225  return new SegmentTermPositions(this);
1226  }
1227 
1228  public override int DocFreq(Term t)
1229  {
1230  EnsureOpen();
1231  TermInfo ti = core.GetTermsReader().Get(t);
1232  if (ti != null)
1233  return ti.docFreq;
1234  else
1235  return 0;
1236  }
1237 
1238  public override int NumDocs()
1239  {
1240  // Don't call ensureOpen() here (it could affect performance)
1241  int n = MaxDoc;
1242  if (deletedDocs != null)
1243  n -= deletedDocs.Count();
1244  return n;
1245  }
1246 
1247  public override int MaxDoc
1248  {
1249  get
1250  {
1251  // Don't call ensureOpen() here (it could affect performance)
1252  return si.docCount;
1253  }
1254  }
1255 
1256  /// <seealso cref="IndexReader.GetFieldNames(IndexReader.FieldOption)">
1257  /// </seealso>
1258  public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldOption)
1259  {
1260  EnsureOpen();
1261 
1262  System.Collections.Generic.ISet<string> fieldSet = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<string>();
1263  for (int i = 0; i < core.fieldInfos.Size(); i++)
1264  {
1265  FieldInfo fi = core.fieldInfos.FieldInfo(i);
1266  if (fieldOption == IndexReader.FieldOption.ALL)
1267  {
1268  fieldSet.Add(fi.name);
1269  }
1270  else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED)
1271  {
1272  fieldSet.Add(fi.name);
1273  }
1274  else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS)
1275  {
1276  fieldSet.Add(fi.name);
1277  }
1278  else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS)
1279  {
1280  fieldSet.Add(fi.name);
1281  }
1282  else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED)
1283  {
1284  fieldSet.Add(fi.name);
1285  }
1286  else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR)
1287  {
1288  fieldSet.Add(fi.name);
1289  }
1290  else if (fi.storeTermVector == true && fi.storePositionWithTermVector == false && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR)
1291  {
1292  fieldSet.Add(fi.name);
1293  }
1294  else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR)
1295  {
1296  fieldSet.Add(fi.name);
1297  }
1298  else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION)
1299  {
1300  fieldSet.Add(fi.name);
1301  }
1302  else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET)
1303  {
1304  fieldSet.Add(fi.name);
1305  }
1306  else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET)
1307  {
1308  fieldSet.Add(fi.name);
1309  }
1310  }
1311  return fieldSet;
1312  }
1313 
1314 
1315  public override bool HasNorms(System.String field)
1316  {
1317  lock (this)
1318  {
1319  EnsureOpen();
1320  return norms.ContainsKey(field);
1321  }
1322  }
1323 
1324  // can return null if norms aren't stored
1325  protected internal virtual byte[] GetNorms(System.String field)
1326  {
1327  lock (this)
1328  {
1329  Norm norm = norms[field];
1330  if (norm == null)
1331  return null; // not indexed, or norms not stored
1332  return norm.Bytes();
1333  }
1334  }
1335 
1336  // returns fake norms if norms aren't available
1337  public override byte[] Norms(System.String field)
1338  {
1339  lock (this)
1340  {
1341  EnsureOpen();
1342  byte[] bytes = GetNorms(field);
1343  return bytes;
1344  }
1345  }
1346 
1347  protected internal override void DoSetNorm(int doc, System.String field, byte value_Renamed)
1348  {
1349  Norm norm = norms[field];
1350  if (norm == null)
1351  // not an indexed field
1352  return ;
1353 
1354  normsDirty = true;
1355  norm.CopyOnWrite()[doc] = value_Renamed; // set the value
1356  }
1357 
1358  /// <summary>Read norms into a pre-allocated array. </summary>
1359  public override void Norms(System.String field, byte[] bytes, int offset)
1360  {
1361  lock (this)
1362  {
1363 
1364  EnsureOpen();
1365  Norm norm = norms[field];
1366  if (norm == null)
1367  {
1368  for (int i = offset; i < bytes.Length; i++)
1369  {
1370  bytes[i] = (byte) DefaultSimilarity.EncodeNorm(1.0f);
1371  }
1372  return ;
1373  }
1374 
1375  norm.Bytes(bytes, offset, MaxDoc);
1376  }
1377  }
1378 
1379 
1380  private void OpenNorms(Directory cfsDir, int readBufferSize)
1381  {
1382  long nextNormSeek = SegmentMerger.NORMS_HEADER.Length; //skip header (header unused for now)
1383  int maxDoc = MaxDoc;
1384  for (int i = 0; i < core.fieldInfos.Size(); i++)
1385  {
1386  FieldInfo fi = core.fieldInfos.FieldInfo(i);
1387  if (norms.ContainsKey(fi.name))
1388  {
1389  // in case this SegmentReader is being re-opened, we might be able to
1390  // reuse some norm instances and skip loading them here
1391  continue;
1392  }
1393  if (fi.isIndexed && !fi.omitNorms)
1394  {
1395  Directory d = Directory();
1396  System.String fileName = si.GetNormFileName(fi.number);
1397  if (!si.HasSeparateNorms(fi.number))
1398  {
1399  d = cfsDir;
1400  }
1401 
1402  // singleNormFile means multiple norms share this file
1403  bool singleNormFile = fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION);
1404  IndexInput normInput = null;
1405  long normSeek;
1406 
1407  if (singleNormFile)
1408  {
1409  normSeek = nextNormSeek;
1410  if (singleNormStream == null)
1411  {
1412  singleNormStream = d.OpenInput(fileName, readBufferSize);
1413  singleNormRef = new Ref();
1414  }
1415  else
1416  {
1417  singleNormRef.IncRef();
1418  }
1419  // All norms in the .nrm file can share a single IndexInput since
1420  // they are only used in a synchronized context.
1421  // If this were to change in the future, a clone could be done here.
1422  normInput = singleNormStream;
1423  }
1424  else
1425  {
1426  normSeek = 0;
1427  normInput = d.OpenInput(fileName);
1428  }
1429 
1430  norms[fi.name] = new Norm(this, normInput, fi.number, normSeek);
1431  nextNormSeek += maxDoc; // increment also if some norms are separate
1432  }
1433  }
1434  }
1435 
1436  public /*internal*/ virtual bool TermsIndexLoaded()
1437  {
1438  return core.TermsIndexIsLoaded();
1439  }
1440 
1441  // NOTE: only called from IndexWriter when a near
1442  // real-time reader is opened, or applyDeletes is run,
1443  // sharing a segment that's still being merged. This
1444  // method is not thread safe, and relies on the
1445  // synchronization in IndexWriter
1446  internal virtual void LoadTermsIndex(int termsIndexDivisor)
1447  {
1448  core.LoadTermsIndex(si, termsIndexDivisor);
1449  }
1450 
1451  // for testing only
1452  public /*internal*/ virtual bool NormsClosed()
1453  {
1454  if (singleNormStream != null)
1455  {
1456  return false;
1457  }
1458  return norms.Values.All(norm => norm.refCount <= 0);
1459  }
1460 
1461  // for testing only
1462  public /*internal*/ virtual bool NormsClosed(System.String field)
1463  {
1464  return norms[field].refCount == 0;
1465  }
1466 
1467  /// <summary> Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.</summary>
1468  /// <returns> TermVectorsReader
1469  /// </returns>
1470  internal virtual TermVectorsReader GetTermVectorsReader()
1471  {
1472  TermVectorsReader tvReader = termVectorsLocal.Get();
1473  if (tvReader == null)
1474  {
1475  TermVectorsReader orig = core.GetTermVectorsReaderOrig();
1476  if (orig == null)
1477  {
1478  return null;
1479  }
1480  else
1481  {
1482  try
1483  {
1484  tvReader = (TermVectorsReader) orig.Clone();
1485  }
1486  catch (System.Exception)
1487  {
1488  return null;
1489  }
1490  }
1491  termVectorsLocal.Set(tvReader);
1492  }
1493  return tvReader;
1494  }
1495 
1496  internal virtual TermVectorsReader GetTermVectorsReaderOrig()
1497  {
1498  return core.GetTermVectorsReaderOrig();
1499  }
1500 
1501  /// <summary>Return a term frequency vector for the specified document and field. The
1502  /// vector returned contains term numbers and frequencies for all terms in
1503  /// the specified field of this document, if the field had storeTermVector
1504  /// flag set. If the flag was not set, the method returns null.
1505  /// </summary>
1506  /// <throws> IOException </throws>
1507  public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
1508  {
1509  // Check if this field is invalid or has no stored term vector
1510  EnsureOpen();
1511  FieldInfo fi = core.fieldInfos.FieldInfo(field);
1512  if (fi == null || !fi.storeTermVector)
1513  return null;
1514 
1515  TermVectorsReader termVectorsReader = GetTermVectorsReader();
1516  if (termVectorsReader == null)
1517  return null;
1518 
1519  return termVectorsReader.Get(docNumber, field);
1520  }
1521 
1522 
1523  public override void GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
1524  {
1525  EnsureOpen();
1526  FieldInfo fi = core.fieldInfos.FieldInfo(field);
1527  if (fi == null || !fi.storeTermVector)
1528  return;
1529 
1530  TermVectorsReader termVectorsReader = GetTermVectorsReader();
1531  if (termVectorsReader == null)
1532  {
1533  return;
1534  }
1535  termVectorsReader.Get(docNumber, field, mapper);
1536  }
1537 
1538 
1539  public override void GetTermFreqVector(int docNumber, TermVectorMapper mapper)
1540  {
1541  EnsureOpen();
1542 
1543  TermVectorsReader termVectorsReader = GetTermVectorsReader();
1544  if (termVectorsReader == null)
1545  return ;
1546 
1547  termVectorsReader.Get(docNumber, mapper);
1548  }
1549 
1550  /// <summary>Return an array of term frequency vectors for the specified document.
1551  /// The array contains a vector for each vectorized field in the document.
1552  /// Each vector vector contains term numbers and frequencies for all terms
1553  /// in a given vectorized field.
1554  /// If no such fields existed, the method returns null.
1555  /// </summary>
1556  /// <throws> IOException </throws>
1557  public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
1558  {
1559  EnsureOpen();
1560 
1561  TermVectorsReader termVectorsReader = GetTermVectorsReader();
1562  if (termVectorsReader == null)
1563  return null;
1564 
1565  return termVectorsReader.Get(docNumber);
1566  }
1567 
1568  /// <summary> Return the name of the segment this reader is reading.</summary>
1569  public virtual string SegmentName
1570  {
1571  get { return core.segment; }
1572  }
1573 
1574  /// <summary> Return the SegmentInfo of the segment this reader is reading.</summary>
1575  internal virtual SegmentInfo SegmentInfo
1576  {
1577  get { return si; }
1578  set { si = value; }
1579  }
1580 
1581  internal virtual void StartCommit()
1582  {
1583  rollbackSegmentInfo = (SegmentInfo)si.Clone();
1584  rollbackHasChanges = hasChanges;
1585  rollbackDeletedDocsDirty = deletedDocsDirty;
1586  rollbackNormsDirty = normsDirty;
1587  rollbackPendingDeleteCount = pendingDeleteCount;
1588  foreach(Norm norm in norms.Values)
1589  {
1590  norm.rollbackDirty = norm.dirty;
1591  }
1592  }
1593 
1594  internal virtual void RollbackCommit()
1595  {
1596  si.Reset(rollbackSegmentInfo);
1597  hasChanges = rollbackHasChanges;
1598  deletedDocsDirty = rollbackDeletedDocsDirty;
1599  normsDirty = rollbackNormsDirty;
1600  pendingDeleteCount = rollbackPendingDeleteCount;
1601  foreach(Norm norm in norms.Values)
1602  {
1603  norm.dirty = norm.rollbackDirty;
1604  }
1605  }
1606 
1607  /// <summary>Returns the directory this index resides in. </summary>
1608  public override Directory Directory()
1609  {
1610  // Don't ensureOpen here -- in certain cases, when a
1611  // cloned/reopened reader needs to commit, it may call
1612  // this method on the closed original reader
1613  return core.dir;
1614  }
1615 
1616  // This is necessary so that cloned SegmentReaders (which
1617  // share the underlying postings data) will map to the
1618  // same entry in the FieldCache. See LUCENE-1579.
1619 
1620  public override object FieldCacheKey
1621  {
1622  get { return core.freqStream; }
1623  }
1624 
1625  public override object DeletesCacheKey
1626  {
1627  get { return deletedDocs; }
1628  }
1629 
1630 
1631  public override long UniqueTermCount
1632  {
1633  get { return core.GetTermsReader().Size(); }
1634  }
1635 
1636  /// <summary> Lotsa tests did hacks like:<br/>
1637  /// SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
1638  /// They broke. This method serves as a hack to keep hacks working
1639  /// We do it with R/W access for the tests (BW compatibility)
1640  /// </summary>
1641  [Obsolete("Remove this when tests are fixed!")]
1642  public /*internal*/ static SegmentReader GetOnlySegmentReader(Directory dir)
1643  {
1644  return GetOnlySegmentReader(IndexReader.Open(dir,false));
1645  }
1646 
1647  public /*internal*/ static SegmentReader GetOnlySegmentReader(IndexReader reader)
1648  {
1649  var onlySegmentReader = reader as SegmentReader;
1650  if (onlySegmentReader != null)
1651  return onlySegmentReader;
1652 
1653  if (reader is DirectoryReader)
1654  {
1655  IndexReader[] subReaders = reader.GetSequentialSubReaders();
1656  if (subReaders.Length != 1)
1657  {
1658  throw new System.ArgumentException(reader + " has " + subReaders.Length + " segments instead of exactly one");
1659  }
1660 
1661  return (SegmentReader) subReaders[0];
1662  }
1663 
1664  throw new System.ArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
1665  }
1666 
1667  public override int TermInfosIndexDivisor
1668  {
1669  get { return core.termsIndexDivisor; }
1670  }
1671 
1672  public System.Collections.Generic.IDictionary<string, Norm> norms_ForNUnit
1673  {
1674  get { return norms; }
1675  }
1676 
1677  public BitVector deletedDocs_ForNUnit
1678  {
1679  get { return deletedDocs; }
1680  }
1681 
1682  public CoreReaders core_ForNUnit
1683  {
1684  get { return core; }
1685  }
1686 
1687  public Ref deletedDocsRef_ForNUnit
1688  {
1689  get { return deletedDocsRef; }
1690  }
1691  }
1692 }