19 using Lucene.Net.Documents;
20 using Lucene.Net.Support;
24 namespace Lucene.Net.Index
37 private void InitBlock()
39 docFreeList =
new PerDoc[1];
42 internal float docBoost;
43 internal int fieldGen;
50 internal int fieldCount;
54 internal int hashMask = 1;
55 internal int totalFieldCount;
64 this.docState = threadState.docState;
65 this.docFieldProcessor = docFieldProcessor;
66 this.fieldInfos = docFieldProcessor.fieldInfos;
67 this.consumer = docFieldProcessor.consumer.
AddThread(
this);
68 fieldsWriter = docFieldProcessor.fieldsWriter.
AddThread(docState);
71 public override void Abort()
73 for (
int i = 0; i < fieldHash.Length; i++)
91 for (
int i = 0; i < fieldHash.Length; i++)
96 fields.Add(field.consumer);
100 System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
111 for (
int i = 0; i < fieldHash.Length; i++)
116 while (perField != null)
119 if (perField.lastGen == - 1)
126 if (lastPerField == null)
127 fieldHash[i] = perField.next;
129 lastPerField.next = perField.next;
131 if (state.docWriter.infoStream != null)
132 state.docWriter.infoStream.WriteLine(
" purge field=" + perField.fieldInfo.name);
139 perField.lastGen = - 1;
140 lastPerField = perField;
143 perField = perField.next;
148 private void Rehash()
150 int newHashSize = (fieldHash.Length * 2);
151 System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
153 DocFieldProcessorPerField[] newHashArray =
new DocFieldProcessorPerField[newHashSize];
156 int newHashMask = newHashSize - 1;
157 for (
int j = 0; j < fieldHash.Length; j++)
159 DocFieldProcessorPerField fp0 = fieldHash[j];
162 int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
163 DocFieldProcessorPerField nextFP0 = fp0.next;
164 fp0.next = newHashArray[hashPos2];
165 newHashArray[hashPos2] = fp0;
170 fieldHash = newHashArray;
171 hashMask = newHashMask;
177 consumer.StartDocument();
178 fieldsWriter.StartDocument();
182 System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint(
"DocumentsWriter.ThreadState.init start"));
186 int thisFieldGen = fieldGen++;
189 int numDocFields = docFields.Count;
196 for (
int i = 0; i < numDocFields; i++)
199 System.String fieldName = field.
Name;
202 int hashPos = fieldName.GetHashCode() & hashMask;
204 while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
220 fp.next = fieldHash[hashPos];
221 fieldHash[hashPos] = fp;
224 if (totalFieldCount >= fieldHash.Length / 2)
234 if (thisFieldGen != fp.lastGen)
240 if (fieldCount == fields.Length)
242 int newSize = fields.Length * 2;
244 Array.Copy(fields, 0, newArray, 0, fieldCount);
248 fields[fieldCount++] = fp;
249 fp.lastGen = thisFieldGen;
252 if (fp.fieldCount == fp.fields.Length)
255 Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
256 fp.fields = newArray;
259 fp.fields[fp.fieldCount++] = field;
262 fieldsWriter.AddField(field, fp.fieldInfo);
272 QuickSort(fields, 0, fieldCount - 1);
274 for (
int i = 0; i < fieldCount; i++)
275 fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
277 if (docState.maxTermPrefix != null && docState.infoStream != null)
279 docState.infoStream.WriteLine(
"WARNING: document contains at least one immense term (longer than the max length " +
DocumentsWriter.MAX_TERM_LENGTH +
"), all of which were skipped. Please correct the analyzer to not produce such terms. The prefix of the first immense term is: '" + docState.maxTermPrefix +
"...'");
280 docState.maxTermPrefix = null;
289 else if (two == null)
295 PerDoc both = GetPerDoc();
296 both.docID = docState.docID;
297 System.Diagnostics.Debug.Assert(one.docID == docState.docID);
298 System.Diagnostics.Debug.Assert(two.docID == docState.docID);
309 else if (hi == 1 + lo)
311 if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
314 array[lo] = array[hi];
322 if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
324 DocFieldProcessorPerField tmp = array[lo];
325 array[lo] = array[mid];
329 if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
331 DocFieldProcessorPerField tmp = array[mid];
332 array[mid] = array[hi];
335 if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
337 DocFieldProcessorPerField tmp2 = array[lo];
338 array[lo] = array[mid];
349 DocFieldProcessorPerField partition = array[mid];
353 while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
356 while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
361 DocFieldProcessorPerField tmp = array[left];
362 array[left] = array[right];
372 QuickSort(array, lo, left);
373 QuickSort(array, left + 1, hi);
376 internal PerDoc[] docFreeList;
377 internal int freeCount;
378 internal int allocCount;
380 internal PerDoc GetPerDoc()
387 if (allocCount > docFreeList.Length)
392 System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
393 docFreeList =
new PerDoc[
ArrayUtil.GetNextSize(allocCount)];
395 return new PerDoc(
this);
398 return docFreeList[--freeCount];
402 internal void FreePerDoc(PerDoc perDoc)
406 System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
407 docFreeList[freeCount++] = perDoc;
411 internal class PerDoc:DocumentsWriter.DocWriter
413 public PerDoc(DocFieldProcessorPerThread enclosingInstance)
415 InitBlock(enclosingInstance);
417 private void InitBlock(DocFieldProcessorPerThread enclosingInstance)
419 this.enclosingInstance = enclosingInstance;
421 private DocFieldProcessorPerThread enclosingInstance;
422 public DocFieldProcessorPerThread Enclosing_Instance
426 return enclosingInstance;
431 internal DocumentsWriter.DocWriter one;
432 internal DocumentsWriter.DocWriter two;
434 public override long SizeInBytes()
436 return one.SizeInBytes() + two.SizeInBytes();
439 public override void Finish()
454 Enclosing_Instance.FreePerDoc(
this);
458 public override void Abort()
473 Enclosing_Instance.FreePerDoc(
this);