亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? fieldsreader.java

?? lucene-2.4.0 是一個全文收索的工具包
?? JAVA
?? 第 1 頁 / 共 2 頁
字號:
package org.apache.lucene.index;/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements.  See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */import org.apache.lucene.analysis.TokenStream;import org.apache.lucene.document.*;import org.apache.lucene.store.Directory;import org.apache.lucene.store.IndexInput;import org.apache.lucene.store.AlreadyClosedException;import org.apache.lucene.store.BufferedIndexInput;import org.apache.lucene.util.CloseableThreadLocal;import java.io.ByteArrayOutputStream;import java.io.IOException;import java.io.Reader;import java.util.zip.DataFormatException;import java.util.zip.Inflater;/** * Class responsible for access to stored document fields. * <p/> * It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files. * * @version $Id: FieldsReader.java 695184 2008-09-14 10:32:59Z mikemccand $ */final class FieldsReader {  private final FieldInfos fieldInfos;  // The main fieldStream, used only for cloning.  private final IndexInput cloneableFieldsStream;  // This is a clone of cloneableFieldsStream used for reading documents.  // It should not be cloned outside of a synchronized context.  private final IndexInput fieldsStream;  private final IndexInput indexStream;  private int numTotalDocs;  private int size;  private boolean closed;  private final int format;  private final int formatSize;  // The docID offset where our docs begin in the index  // file.  This will be 0 if we have our own private file.  private int docStoreOffset;  private CloseableThreadLocal fieldsStreamTL = new CloseableThreadLocal();  FieldsReader(Directory d, String segment, FieldInfos fn) throws IOException {    this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, -1, 0);  }  FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize) throws IOException {    this(d, segment, fn, readBufferSize, -1, 0);  }  FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) throws IOException {    boolean success = false;    try {      fieldInfos = fn;      cloneableFieldsStream = d.openInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);      indexStream = d.openInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);      // First version of fdx did not include a format      // header, but, the first int will always be 0 in that      // case      int firstInt = indexStream.readInt();      if (firstInt == 0)        format = 0;      else        format = firstInt;      if (format > FieldsWriter.FORMAT_CURRENT)        throw new CorruptIndexException("Incompatible format version: " + format + " expected "                                         + FieldsWriter.FORMAT_CURRENT + " or lower");      if (format > FieldsWriter.FORMAT)        formatSize = 4;      else        formatSize = 0;      if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)        cloneableFieldsStream.setModifiedUTF8StringsMode();      fieldsStream = (IndexInput) cloneableFieldsStream.clone();      final long indexSize = indexStream.length()-formatSize;      if (docStoreOffset != -1) {        // We read only a slice out of this shared fields file        this.docStoreOffset = docStoreOffset;        this.size = size;        // Verify the file is long enough to hold all of our        // docs        assert ((int) (indexSize / 8)) >= size + this.docStoreOffset: "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset;      } else {        this.docStoreOffset = 0;        this.size = (int) (indexSize >> 3);      }      numTotalDocs = (int) (indexSize >> 3);      success = true;    } finally {      // With lock-less commits, it's entirely possible (and      // fine) to hit a FileNotFound exception above. In      // this case, we want to explicitly close any subset      // of things that were opened so that we don't have to      // wait for a GC to do so.      if (!success) {        close();      }    }  }  /**   * @throws AlreadyClosedException if this FieldsReader is closed   */  protected final void ensureOpen() throws AlreadyClosedException {    if (closed) {      throw new AlreadyClosedException("this FieldsReader is closed");    }  }  /**   * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams, including any ones associated with a   * lazy implementation of a Field.  This means that the Fields values will not be accessible.   *   * @throws IOException   */  final void close() throws IOException {    if (!closed) {      if (fieldsStream != null) {        fieldsStream.close();      }      if (cloneableFieldsStream != null) {        cloneableFieldsStream.close();      }      if (indexStream != null) {        indexStream.close();      }      fieldsStreamTL.close();      closed = true;    }  }  final int size() {    return size;  }  private final void seekIndex(int docID) throws IOException {    indexStream.seek(formatSize + (docID + docStoreOffset) * 8L);  }  boolean canReadRawDocs() {    return format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;  }  final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {    seekIndex(n);    long position = indexStream.readLong();    fieldsStream.seek(position);    Document doc = new Document();    int numFields = fieldsStream.readVInt();    for (int i = 0; i < numFields; i++) {      int fieldNumber = fieldsStream.readVInt();      FieldInfo fi = fieldInfos.fieldInfo(fieldNumber);      FieldSelectorResult acceptField = fieldSelector == null ? FieldSelectorResult.LOAD : fieldSelector.accept(fi.name);            byte bits = fieldsStream.readByte();      assert bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY;      boolean compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;      boolean tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;      boolean binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;      //TODO: Find an alternative approach here if this list continues to grow beyond the      //list of 5 or 6 currently here.  See Lucene 762 for discussion      if (acceptField.equals(FieldSelectorResult.LOAD)) {        addField(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.LOAD_FOR_MERGE)) {        addFieldForMerge(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.LOAD_AND_BREAK)){        addField(doc, fi, binary, compressed, tokenize);        break;//Get out of this loop      }      else if (acceptField.equals(FieldSelectorResult.LAZY_LOAD)) {        addFieldLazy(doc, fi, binary, compressed, tokenize);      }      else if (acceptField.equals(FieldSelectorResult.SIZE)){        skipField(binary, compressed, addFieldSize(doc, fi, binary, compressed));      }      else if (acceptField.equals(FieldSelectorResult.SIZE_AND_BREAK)){        addFieldSize(doc, fi, binary, compressed);        break;      }      else {        skipField(binary, compressed);      }    }    return doc;  }  /** Returns the length in bytes of each raw document in a   *  contiguous range of length numDocs starting with   *  startDocID.  Returns the IndexInput (the fieldStream),   *  already seeked to the starting point for startDocID.*/  final IndexInput rawDocs(int[] lengths, int startDocID, int numDocs) throws IOException {    seekIndex(startDocID);    long startOffset = indexStream.readLong();    long lastOffset = startOffset;    int count = 0;    while (count < numDocs) {      final long offset;      final int docID = docStoreOffset + startDocID + count + 1;      assert docID <= numTotalDocs;      if (docID < numTotalDocs)         offset = indexStream.readLong();      else        offset = fieldsStream.length();      lengths[count++] = (int) (offset-lastOffset);      lastOffset = offset;    }    fieldsStream.seek(startOffset);    return fieldsStream;  }  /**   * Skip the field.  We still have to read some of the information about the field, but can skip past the actual content.   * This will have the most payoff on large fields.   */  private void skipField(boolean binary, boolean compressed) throws IOException {    skipField(binary, compressed, fieldsStream.readVInt());  }    private void skipField(boolean binary, boolean compressed, int toRead) throws IOException {   if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed) {     fieldsStream.seek(fieldsStream.getFilePointer() + toRead);   } else {     // We need to skip chars.  This will slow us down, but still better     fieldsStream.skipChars(toRead);   }  }  private void addFieldLazy(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws IOException {    if (binary) {      int toRead = fieldsStream.readVInt();      long pointer = fieldsStream.getFilePointer();      if (compressed) {        //was: doc.add(new Fieldable(fi.name, uncompress(b), Fieldable.Store.COMPRESS));        doc.add(new LazyField(fi.name, Field.Store.COMPRESS, toRead, pointer, binary));      } else {        //was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));        doc.add(new LazyField(fi.name, Field.Store.YES, toRead, pointer, binary));      }      //Need to move the pointer ahead by toRead positions      fieldsStream.seek(pointer + toRead);    } else {      Field.Store store = Field.Store.YES;      Field.Index index = getIndexType(fi, tokenize);      Field.TermVector termVector = getTermVectorType(fi);      Fieldable f;      if (compressed) {        store = Field.Store.COMPRESS;        int toRead = fieldsStream.readVInt();        long pointer = fieldsStream.getFilePointer();        f = new LazyField(fi.name, store, toRead, pointer, binary);        //skip over the part that we aren't loading        fieldsStream.seek(pointer + toRead);        f.setOmitNorms(fi.omitNorms);      } else {        int length = fieldsStream.readVInt();        long pointer = fieldsStream.getFilePointer();        //Skip ahead of where we are by the length of what is stored        if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)          fieldsStream.seek(pointer+length);        else          fieldsStream.skipChars(length);        f = new LazyField(fi.name, store, index, termVector, length, pointer, binary);        f.setOmitNorms(fi.omitNorms);      }      doc.add(f);    }  }  // in merge mode we don't uncompress the data of a compressed field  private void addFieldForMerge(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws IOException {    Object data;          if (binary || compressed) {      int toRead = fieldsStream.readVInt();      final byte[] b = new byte[toRead];      fieldsStream.readBytes(b, 0, b.length);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
精品国产青草久久久久福利| 欧美在线观看一区二区| 国产精品乱人伦| 欧美性色aⅴ视频一区日韩精品| 蜜臀av国产精品久久久久| 国产午夜亚洲精品不卡| 91国产免费观看| 成人一区二区三区视频在线观看 | 亚洲成av人在线观看| 日韩欧美中文字幕公布| www.久久久久久久久| 美女诱惑一区二区| 最新久久zyz资源站| 在线观看av一区二区| 韩国女主播成人在线| 亚洲精品第一国产综合野| 国产视频一区在线播放| 欧美美女一区二区三区| 成人激情视频网站| 韩国精品久久久| 亚洲妇女屁股眼交7| 日韩一区在线看| 欧美精品一区二区久久久| 在线中文字幕一区二区| 不卡av电影在线播放| 捆绑调教美女网站视频一区| 亚洲欧美国产三级| 国产精品久久福利| 久久一夜天堂av一区二区三区| 欧美日韩大陆一区二区| 色香蕉久久蜜桃| 成人国产精品免费| 成人激情av网| 国产河南妇女毛片精品久久久| 九九国产精品视频| 美日韩一级片在线观看| 婷婷中文字幕综合| 日韩激情一区二区| 夜夜亚洲天天久久| 日韩三级视频在线观看| 欧美一区二区精美| 欧美嫩在线观看| 欧美日韩精品欧美日韩精品一| 91美女在线看| 99久久精品免费观看| av一二三不卡影片| 成人国产免费视频| 国产精品亚洲成人| 成人国产精品免费网站| 成人性生交大片免费看中文网站| 成人小视频免费在线观看| 狠狠色狠狠色综合日日91app| 日韩av二区在线播放| 久久99精品国产.久久久久 | 69堂精品视频| 欧美日本不卡视频| 欧美人xxxx| 在线电影欧美成精品| 91精品国产综合久久蜜臀| 欧美日韩高清在线播放| 欧美三级一区二区| 欧美精品日日鲁夜夜添| 欧美成人一区二区三区在线观看 | 日韩欧美在线不卡| 日韩美女视频在线| 久久亚洲欧美国产精品乐播 | 91在线免费播放| 色狠狠桃花综合| 日韩午夜在线观看视频| 欧美成人a∨高清免费观看| 欧美一区二区三区喷汁尤物| 久久夜色精品国产欧美乱极品| 26uuu欧美日本| 自拍偷拍欧美激情| 亚洲成人精品影院| 日本不卡视频一二三区| 国产91综合一区在线观看| 成人黄色av网站在线| 欧美精品亚洲一区二区在线播放| 欧美一激情一区二区三区| 久久亚洲一级片| 亚洲国产裸拍裸体视频在线观看乱了| 偷拍亚洲欧洲综合| 狠狠色狠狠色综合系列| 激情久久五月天| 99久久婷婷国产综合精品| 成人免费观看男女羞羞视频| 91在线观看地址| 欧美精品久久久久久久多人混战| 欧美大片一区二区| 国产亚洲成年网址在线观看| 狠狠色狠狠色合久久伊人| 91免费观看在线| 国产精品久久久久久久久动漫 | 26uuu久久综合| 美脚の诱脚舐め脚责91| 国产a久久麻豆| 欧美精品视频www在线观看| 国产欧美一区二区精品性色 | 久久疯狂做爰流白浆xx| 国产91精品一区二区| 欧美一区二区黄色| 国产精品久久久一本精品| 亚洲综合一区在线| 岛国一区二区在线观看| 欧美日韩国产系列| 日韩美女视频一区二区| 美女视频黄久久| 成人美女视频在线观看| 欧美videossexotv100| 亚洲天堂福利av| 国产福利精品导航| 欧美日韩aaaaaa| 国产精品久久久久一区| 国产自产视频一区二区三区| 日本高清视频一区二区| 国产精品女上位| 久久99最新地址| 欧美三区在线视频| 久久久亚洲综合| 天堂va蜜桃一区二区三区漫画版| 水野朝阳av一区二区三区| www.成人网.com| 国产婷婷精品av在线| 亚洲专区一二三| 国产一区二区三区黄视频| 91精品欧美综合在线观看最新| 亚洲国产精品人人做人人爽| 国产不卡免费视频| 欧美国产欧美亚州国产日韩mv天天看完整| 亚洲国产精品久久久久婷婷884 | 捆绑紧缚一区二区三区视频| 色综合久久综合中文综合网| 久久精品男人天堂av| 韩国成人福利片在线播放| 日韩亚洲欧美一区| 极品少妇xxxx精品少妇| 欧美电影免费观看高清完整版在线 | 中国色在线观看另类| 国产精品亚洲一区二区三区在线| 91超碰这里只有精品国产| 国产欧美日本一区视频| 99久久精品国产麻豆演员表| 国产精品女同一区二区三区| 91在线国产福利| 综合久久国产九一剧情麻豆| 色噜噜偷拍精品综合在线| 亚洲欧美日韩一区| 99视频在线观看一区三区| 亚洲免费三区一区二区| 91色九色蝌蚪| 天堂久久久久va久久久久| 3d动漫精品啪啪1区2区免费 | 一本久久综合亚洲鲁鲁五月天 | 欧美激情综合五月色丁香小说| 蓝色福利精品导航| 国产欧美精品在线观看| 国产精品69毛片高清亚洲| 亚洲欧美在线另类| 日本精品免费观看高清观看| 国产精品天天看| 91蜜桃视频在线| 亚洲综合在线观看视频| 5858s免费视频成人| 奇米影视在线99精品| 久久久一区二区| 99久久er热在这里只有精品15 | 国产69精品久久久久777| 久久精品视频一区二区三区| 国产盗摄一区二区三区| 亚洲欧美日韩久久| 欧美性感一区二区三区| 久久se精品一区精品二区| 久久久久久久久免费| 国产福利一区在线| 亚洲高清视频的网址| 日韩欧美国产午夜精品| 波多野结衣视频一区| 亚洲制服丝袜在线| 欧美日韩国产综合视频在线观看 | 福利一区二区在线| 亚洲一区二区偷拍精品| 欧美一区二区三区免费大片| 国产精品一区二区在线观看网站| 日韩理论片在线| 555www色欧美视频| av不卡免费在线观看| 亚洲成人高清在线| 国产精品五月天| 欧美三级韩国三级日本三斤 | 一区二区三区高清不卡| 日韩精品一区二区三区蜜臀| aaa亚洲精品一二三区| 免费看日韩a级影片| 中文字幕免费不卡| 一本大道久久a久久综合| 国产美女在线精品| 一区二区在线观看av| 国产精品久久久一本精品 | 国产精品18久久久久久久网站|