亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? fieldsreader.java

?? lucene-2.4.0 是一個全文收索的工具包
?? JAVA
?? 第 1 頁 / 共 2 頁
字號:
package org.apache.lucene.index;/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements.  See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */import org.apache.lucene.analysis.TokenStream;import org.apache.lucene.document.*;import org.apache.lucene.store.Directory;import org.apache.lucene.store.IndexInput;import org.apache.lucene.store.AlreadyClosedException;import org.apache.lucene.store.BufferedIndexInput;import org.apache.lucene.util.CloseableThreadLocal;import java.io.ByteArrayOutputStream;import java.io.IOException;import java.io.Reader;import java.util.zip.DataFormatException;import java.util.zip.Inflater;/** * Class responsible for access to stored document fields. * <p/> * It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files. * * @version $Id: FieldsReader.java 695184 2008-09-14 10:32:59Z mikemccand $ */final class FieldsReader {  private final FieldInfos fieldInfos;  // The main fieldStream, used only for cloning.  private final IndexInput cloneableFieldsStream;  // This is a clone of cloneableFieldsStream used for reading documents.  // It should not be cloned outside of a synchronized context.  private final IndexInput fieldsStream;  private final IndexInput indexStream;  private int numTotalDocs;  private int size;  private boolean closed;  private final int format;  private final int formatSize;  // The docID offset where our docs begin in the index  // file.  This will be 0 if we have our own private file.  private int docStoreOffset;  private CloseableThreadLocal fieldsStreamTL = new CloseableThreadLocal();  FieldsReader(Directory d, String segment, FieldInfos fn) throws IOException {    this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, -1, 0);  }  FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize) throws IOException {    this(d, segment, fn, readBufferSize, -1, 0);  }  FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) throws IOException {    boolean success = false;    try {      fieldInfos = fn;      cloneableFieldsStream = d.openInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);      indexStream = d.openInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);      // First version of fdx did not include a format      // header, but, the first int will always be 0 in that      // case      int firstInt = indexStream.readInt();      if (firstInt == 0)        format = 0;      else        format = firstInt;      if (format > FieldsWriter.FORMAT_CURRENT)        throw new CorruptIndexException("Incompatible format version: " + format + " expected "                                         + FieldsWriter.FORMAT_CURRENT + " or lower");      if (format > FieldsWriter.FORMAT)        formatSize = 4;      else        formatSize = 0;      if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)        cloneableFieldsStream.setModifiedUTF8StringsMode();      fieldsStream = (IndexInput) cloneableFieldsStream.clone();      final long indexSize = indexStream.length()-formatSize;      if (docStoreOffset != -1) {        // We read only a slice out of this shared fields file        this.docStoreOffset = docStoreOffset;        this.size = size;        // Verify the file is long enough to hold all of our        // docs        assert ((int) (indexSize / 8)) >= size + this.docStoreOffset: "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset;      } else {        this.docStoreOffset = 0;        this.size = (int) (indexSize >> 3);      }      numTotalDocs = (int) (indexSize >> 3);      success = true;    } finally {      // With lock-less commits, it's entirely possible (and      // fine) to hit a FileNotFound exception above. In      // this case, we want to explicitly close any subset      // of things that were opened so that we don't have to      // wait for a GC to do so.      if (!success) {        close();      }    }  }  /**   * @throws AlreadyClosedException if this FieldsReader is closed   */  protected final void ensureOpen() throws AlreadyClosedException {    if (closed) {      throw new AlreadyClosedException("this FieldsReader is closed");    }  }  /**   * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams, including any ones associated with a   * lazy implementation of a Field.  This means that the Fields values will not be accessible.   *   * @throws IOException   */  final void close() throws IOException {    if (!closed) {      if (fieldsStream != null) {        fieldsStream.close();      }      if (cloneableFieldsStream != null) {        cloneableFieldsStream.close();      }      if (indexStream != null) {        indexStream.close();      }      fieldsStreamTL.close();      closed = true;    }  }  final int size() {    return size;  }  private final void seekIndex(int docID) throws IOException {    indexStream.seek(formatSize + (docID + docStoreOffset) * 8L);  }  boolean canReadRawDocs() {    return format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;  }  final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {    seekIndex(n);    long position = indexStream.readLong();    fieldsStream.seek(position);    Document doc = new Document();    int numFields = fieldsStream.readVInt();    for (int i = 0; i < numFields; i++) {      int fieldNumber = fieldsStream.readVInt();      FieldInfo fi = fieldInfos.fieldInfo(fieldNumber);      FieldSelectorResult acceptField = fieldSelector == null ? FieldSelectorResult.LOAD : fieldSelector.accept(fi.name);            byte bits = fieldsStream.readByte();      assert bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY;      boolean compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;      boolean tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;      boolean binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;      //TODO: Find an alternative approach here if this list continues to grow beyond the      //list of 5 or 6 currently here.  See Lucene 762 for discussion      if (acceptField.equals(FieldSelectorResult.LOAD)) {        addField(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.LOAD_FOR_MERGE)) {        addFieldForMerge(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.LOAD_AND_BREAK)){        addField(doc, fi, binary, compressed, tokenize);        break;//Get out of this loop      }      else if (acceptField.equals(FieldSelectorResult.LAZY_LOAD)) {        addFieldLazy(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.SIZE)){        skipField(binary, compressed, addFieldSize(doc, fi, binary, compressed));      }      else if (acceptField.equals(FieldSelectorResult.SIZE_AND_BREAK)){        addFieldSize(doc, fi, binary, compressed);        break;      }      else {        skipField(binary, compressed);      }    }    return doc;  }  /** Returns the length in bytes of each raw document in a   *  contiguous range of length numDocs starting with   *  startDocID.  Returns the IndexInput (the fieldStream),   *  already seeked to the starting point for startDocID.*/  final IndexInput rawDocs(int[] lengths, int startDocID, int numDocs) throws IOException {    seekIndex(startDocID);    long startOffset = indexStream.readLong();    long lastOffset = startOffset;    int count = 0;    while (count < numDocs) {      final long offset;      final int docID = docStoreOffset + startDocID + count + 1;      assert docID <= numTotalDocs;      if (docID < numTotalDocs)         offset = indexStream.readLong();      else        offset = fieldsStream.length();      lengths[count++] = (int) (offset-lastOffset);      lastOffset = offset;    }    fieldsStream.seek(startOffset);    return fieldsStream;  }  /**   * Skip the field.  We still have to read some of the information about the field, but can skip past the actual content.   * This will have the most payoff on large fields.   */  private void skipField(boolean binary, boolean compressed) throws IOException {    skipField(binary, compressed, fieldsStream.readVInt());  }    private void skipField(boolean binary, boolean compressed, int toRead) throws IOException {   if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed) {     fieldsStream.seek(fieldsStream.getFilePointer() + toRead);   } else {     // We need to skip chars.  This will slow us down, but still better     fieldsStream.skipChars(toRead);   }  }  private void addFieldLazy(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws IOException {    if (binary) {      int toRead = fieldsStream.readVInt();      long pointer = fieldsStream.getFilePointer();      if (compressed) {        //was: doc.add(new Fieldable(fi.name, uncompress(b), Fieldable.Store.COMPRESS));        doc.add(new LazyField(fi.name, Field.Store.COMPRESS, toRead, pointer, binary));      } else {        //was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));        doc.add(new LazyField(fi.name, Field.Store.YES, toRead, pointer, binary));      }      //Need to move the pointer ahead by toRead positions      fieldsStream.seek(pointer + toRead);    } else {      Field.Store store = Field.Store.YES;      Field.Index index = getIndexType(fi, tokenize);      Field.TermVector termVector = getTermVectorType(fi);      Fieldable f;      if (compressed) {        store = Field.Store.COMPRESS;        int toRead = fieldsStream.readVInt();        long pointer = fieldsStream.getFilePointer();        f = new LazyField(fi.name, store, toRead, pointer, binary);        //skip over the part that we aren't loading        fieldsStream.seek(pointer + toRead);        f.setOmitNorms(fi.omitNorms);      } else {        int length = fieldsStream.readVInt();        long pointer = fieldsStream.getFilePointer();        //Skip ahead of where we are by the length of what is stored        if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)          fieldsStream.seek(pointer+length);        else          fieldsStream.skipChars(length);        f = new LazyField(fi.name, store, index, termVector, length, pointer, binary);        f.setOmitNorms(fi.omitNorms);      }      doc.add(f);    }  }  // in merge mode we don't uncompress the data of a compressed field  private void addFieldForMerge(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws IOException {    Object data;          if (binary || compressed) {      int toRead = fieldsStream.readVInt();      final byte[] b = new byte[toRead];      fieldsStream.readBytes(b, 0, b.length);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
美女国产一区二区| 日韩极品在线观看| 国产精品视频你懂的| 亚洲精品一区二区三区99| 欧美乱妇20p| 8x福利精品第一导航| 中文字幕欧美日本乱码一线二线| 欧美乱妇23p| 日韩精品一区二区三区三区免费| 欧美一区二区三区免费在线看| 69久久夜色精品国产69蝌蚪网| 欧美久久婷婷综合色| 91精品国产色综合久久| 日韩一区二区免费视频| 麻豆国产一区二区| 精品久久久久一区二区国产| 精品欧美一区二区久久| 2019国产精品| 国产精品三级久久久久三级| 中文字幕一区二区三区不卡在线| 亚洲视频每日更新| 一区二区三区日韩| 午夜久久久影院| 免费精品99久久国产综合精品| 蜜桃视频在线观看一区| 国产乱色国产精品免费视频| 国产99精品在线观看| 99久久99久久精品国产片果冻| 一本久道中文字幕精品亚洲嫩| 欧美视频日韩视频| 精品美女被调教视频大全网站| 国产亚洲精品aa| 一区二区在线观看免费| 人人爽香蕉精品| 国产成人免费在线视频| 色一区在线观看| 欧美一区二区私人影院日本| 国产日韩亚洲欧美综合| 一区二区三区在线免费| 青青草91视频| 成人avav在线| 69成人精品免费视频| 久久久亚洲国产美女国产盗摄| 亚洲视频一区二区免费在线观看| 日韩高清在线电影| 成人中文字幕在线| 欧美四级电影网| 337p粉嫩大胆色噜噜噜噜亚洲 | 精品国产一区二区国模嫣然| 国产精品欧美一区二区三区| 亚洲超碰精品一区二区| 国产高清在线精品| 欧美日韩国产一级片| 国产欧美一区二区精品忘忧草| 一区二区三区在线观看视频| 国产在线播放一区| 欧美午夜精品久久久| 久久久99久久| 日韩精品欧美成人高清一区二区| 成人美女视频在线观看18| 欧美日韩大陆一区二区| 国产精品欧美久久久久一区二区| 全国精品久久少妇| 欧美日韩一二三区| 精品久久人人做人人爽| 夜夜爽夜夜爽精品视频| 丁香一区二区三区| 日韩一区二区三区电影在线观看| 亚洲美女区一区| 国产成人精品影视| 欧美一卡二卡三卡| 亚洲影视资源网| 成人高清免费在线播放| 2欧美一区二区三区在线观看视频| 亚洲精品一二三| 丁香激情综合国产| 日韩一区二区三区在线观看| 一二三四社区欧美黄| 成人免费视频一区二区| 亚洲精品一区二区精华| 免费人成在线不卡| 欧美日韩国产三级| 自拍偷拍欧美激情| 国产电影精品久久禁18| 欧美一区二区三区精品| 亚洲成在人线在线播放| 91麻豆国产福利在线观看| 精品免费视频.| 美女一区二区视频| 777午夜精品视频在线播放| 亚洲在线成人精品| 91国产福利在线| 国产精品短视频| 高清视频一区二区| 国产日韩欧美制服另类| 国产精品一区二区久久不卡| 精品处破学生在线二十三| 美女精品自拍一二三四| 欧美一级理论片| 日韩高清欧美激情| 欧美精品视频www在线观看 | 另类专区欧美蜜桃臀第一页| 欧美日韩一区二区欧美激情| 亚洲男同1069视频| 色乱码一区二区三区88| 亚洲黄色小说网站| 91社区在线播放| 亚洲欧洲av在线| 色综合久久久久久久| 亚洲精选视频免费看| 91免费视频网| 亚洲精品国产高清久久伦理二区| 色哟哟国产精品| 亚洲美女屁股眼交| 欧美日本视频在线| 日韩在线一区二区| 精品一区二区三区在线播放| 久久精品理论片| 精品国产乱码久久久久久老虎 | 亚洲欧美一区二区三区孕妇| 一本久久综合亚洲鲁鲁五月天| 一区二区激情视频| 国产精品99久久久久久似苏梦涵 | 久久久精品天堂| 国产精品一区在线观看乱码| 日本一区二区三区四区| 97久久精品人人做人人爽| 亚洲精品大片www| 欧美日韩国产综合视频在线观看 | 91麻豆精品国产无毒不卡在线观看 | 欧美高清视频不卡网| 青青青伊人色综合久久| 欧美mv日韩mv国产网站app| 国产剧情一区二区| 中文字幕一区二区三中文字幕| 91精品福利在线| 奇米综合一区二区三区精品视频| 久久久蜜桃精品| 91猫先生在线| 青草国产精品久久久久久| 国产亚洲美州欧州综合国| 91看片淫黄大片一级在线观看| 亚洲bt欧美bt精品| 久久免费美女视频| 色就色 综合激情| 看国产成人h片视频| 国产精品久久久久久久久免费樱桃| 在线看日韩精品电影| 看国产成人h片视频| 最近日韩中文字幕| 日韩视频中午一区| 99久久精品国产网站| 日韩av一区二区在线影视| 国产欧美一区二区三区在线看蜜臀 | 夜夜嗨av一区二区三区四季av| 欧美大片一区二区三区| av在线免费不卡| 亚洲精品国产精品乱码不99| 日韩制服丝袜av| 成人欧美一区二区三区小说| 91精品欧美久久久久久动漫| 成人爽a毛片一区二区免费| 午夜视频一区在线观看| 日本一区二区免费在线观看视频 | 日本电影亚洲天堂一区| 久久精品国产秦先生| 亚洲美女少妇撒尿| 久久久亚洲精品石原莉奈| 欧美日韩一卡二卡| 国产成人综合在线播放| 偷拍日韩校园综合在线| 国产精品白丝在线| 欧美电影免费观看高清完整版| 91成人网在线| youjizz国产精品| 国产精品自在在线| 性感美女久久精品| 亚洲欧美另类在线| 久久夜色精品国产欧美乱极品| 欧美日韩国产一二三| 91一区一区三区| 国产激情一区二区三区四区| 日韩和欧美一区二区| 一区二区三区小说| 欧美激情一区三区| 日韩三级视频在线看| 欧美日韩免费观看一区三区| 91片在线免费观看| av中文字幕一区| 福利91精品一区二区三区| 精品一区在线看| 日韩精品一二三| 亚洲国产综合在线| 亚洲激情图片qvod| 日韩伦理电影网| 亚洲图片你懂的| 国产精品电影院| 国产精品国产a| 中文字幕一区视频| 亚洲欧美日韩国产手机在线|