亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? kohonennetwork.java

?? java neural networkin
?? JAVA
字號:
/**
 * KohonenNeuralNetwork
 * Copyright 2005 by Jeff Heaton(jeff@jeffheaton.com)
 *
 * Example program from Chapter 6
 * Programming Neural Networks in Java
 * http://www.heatonresearch.com/articles/series/1/
 *
 * This software is copyrighted. You may use it in programs
 * of your own, without restriction, but you may not
 * publish the source code without the author's permission.
 * For more information on distributing this code, please
 * visit:
 *    http://www.heatonresearch.com/hr_legal.php
 *
 * @author Jeff Heaton
 * @version 1.1
 */

public class KohonenNetwork extends Network {

  /**
   * The weights of the output neurons base on the input from the
   * input neurons.
   */
  double outputWeights[][];

  /**
   * The learning method.
   */
  protected int learnMethod = 1;

  /**
   * The learning rate.
   */
  protected double learnRate = 0.5;

  /**
   * Abort if error is beyond this
   */
  protected double quitError = 0.1;

  /**
   * How many retries before quit.
   */
  protected int retries = 10000;

  /**
   * Reduction factor.
   */
  protected double reduction = .99;

  /**
   * The owner object, to report to.
   */
  protected NeuralReportable owner;

  /**
   * Set to true to abort learning.
   */
  public boolean halt = false;

  /**
   * The training set.
   */
  protected TrainingSet train;

  /**
   * The constructor.
   *
   * @param inputCount Number of input neurons
   * @param outputCount Number of output neurons
   * @param owner The owner object, for updates.
   */
  public KohonenNetwork(int inputCount,int outputCount,NeuralReportable owner)
  {
    int n ;

    totalError = 1.0 ;

    this.inputNeuronCount = inputCount;
    this.outputNeuronCount = outputCount;
    this.outputWeights = new double[outputNeuronCount][inputNeuronCount+1];
    this.output = new double[outputNeuronCount];
    this.owner = owner;
  }

  /**
   * Set the training set to use.
   *
   * @param set The training set to use.
   */
  public void setTrainingSet(TrainingSet set)
  {
    train = set;
  }

  /**
   * Copy the weights from this network to another.
   *
   * @param dest The destination for the weights.
   * @param source
   */
  public static void copyWeights( KohonenNetwork dest , KohonenNetwork source )
  {
    for ( int i=0;i<source.outputWeights.length;i++ ) {
      System.arraycopy(source.outputWeights[i],
                       0,
                       dest.outputWeights[i],
                       0,
                       source.outputWeights[i].length);
    }
  }


  /**
   * Clear the weights.
   */
  public void clearWeights()
  {
    totalError = 1.0;
    for ( int y=0;y<outputWeights.length;y++ )
      for ( int x=0;x<outputWeights[0].length;x++ )
        outputWeights[y][x]=0;
  }

  /**
   * Normalize the input.
   *
   * @param input input pattern
   * @param normfac the result
   * @param synth synthetic last input
   */
  void normalizeInput(
                     final double input[] ,
                     double normfac[] ,
                     double synth[]
                     )
  {
    double length, d ;

    length = vectorLength ( input ) ;
// just in case it gets too small
    if ( length < 1.E-30 )
      length = 1.E-30 ;


    normfac[0] = 1.0 / Math.sqrt ( length ) ;
    synth[0] = 0.0 ;


  }

  /**
   * Normalize weights
   *
   * @param w Input weights
   */
  void normalizeWeight( double w[] )
  {
    int i ;
    double len ;

    len = vectorLength ( w ) ;
    // just incase it gets too small
    if ( len < 1.E-30 )
      len = 1.E-30 ;


    len = 1.0 / Math.sqrt ( len ) ;
    for ( i=0 ; i<inputNeuronCount ; i++ )
      w[i] *= len ;
    w[inputNeuronCount] = 0;


  }


  /**
   * Try an input patter. This can be used to present an input pattern
   * to the network. Usually its best to call winner to get the winning
   * neuron though.
   *
   * @param input Input pattern.
   */
  void trial ( double input[] )
  {
    int i ;
    double normfac[]=new double[1], synth[]=new double[1], optr[];

    normalizeInput(input,normfac,synth) ;

    for ( i=0 ; i<outputNeuronCount; i++ ) {
      optr = outputWeights[i];
      output[i] = dotProduct( input , optr ) * normfac[0]
                  + synth[0] * optr[inputNeuronCount] ;
      // Remap to bipolar (-1,1 to 0,1)
      output[i] = 0.5 * (output[i] + 1.0) ;
      // account for rounding
      if ( output[i] > 1.0 )
        output[i] = 1.0 ;
      if ( output[i] < 0.0 )
        output[i] = 0.0 ;
    }
  }


  /**
   * Present an input pattern and get the
   * winning neuron.
   *
   * @param input input pattern
   * @param normfac the result
   * @param synth synthetic last input
   * @return The winning neuron number.
   */
  public int winner(double input[] ,double normfac[] ,double synth[])
  {
    int i, win=0;
    double biggest, optr[];

    normalizeInput( input , normfac , synth ) ;  // Normalize input

    biggest = -1.E30;
    for ( i=0 ; i<outputNeuronCount; i++ ) {
      optr = outputWeights[i];
      output[i] = dotProduct (input , optr ) * normfac[0]
                  + synth[0] * optr[inputNeuronCount] ;
      // Remap to bipolar(-1,1 to 0,1)
      output[i] = 0.5 * (output[i] + 1.0) ;
      if ( output[i] > biggest ) {
        biggest = output[i] ;
        win = i ;
      }
// account for rounding
      if ( output[i] > 1.0 )
        output[i] = 1.0 ;
      if ( output[i] < 0.0 )
        output[i] = 0.0 ;
    }

    return win ;
  }

  /**
   * This method does much of the work of the learning process.
   * This method evaluates the weights against the training
   * set.
   *
   * @param rate learning rate
   * @param learn_method method(0=additive, 1=subtractive)
   * @param won a Holds how many times a given neuron won
   * @param bigerr a returns the error
   * @param correc a returns the correction
   * @param work a work area
   * @exception java.lang.RuntimeException
   */
  void evaluateErrors (
                      double rate ,
                      int learn_method ,
                      int won[],
                      double bigerr[] ,
                      double correc[][] ,
                      double work[])
  throws RuntimeException
  {
    int best, size,tset ;
    double dptr[], normfac[] = new double[1];
    double synth[]=new double[1], cptr[], wptr[], length, diff ;


// reset correction and winner counts

    for ( int y=0;y<correc.length;y++ ) {
      for ( int x=0;x<correc[0].length;x++ ) {
        correc[y][x]=0;
      }
    }

    for ( int i=0;i<won.length;i++ )
      won[i]=0;

    bigerr[0] = 0.0 ;
// loop through all training sets to determine correction
    for ( tset=0 ; tset<train.getTrainingSetCount(); tset++ ) {
      dptr = train.getInputSet(tset);
      best = winner ( dptr , normfac , synth ) ;
      won[best]++;
      wptr = outputWeights[best];
      cptr = correc[best];
      length = 0.0 ;

      for ( int i=0 ; i<inputNeuronCount ; i++ ) {
        diff = dptr[i] * normfac[0] - wptr[i] ;
        length += diff * diff ;
        if ( learn_method!=0 )
          cptr[i] += diff ;
        else
          work[i] = rate * dptr[i] * normfac[0] + wptr[i] ;
      }
      diff = synth[0] - wptr[inputNeuronCount] ;
      length += diff * diff ;
      if ( learn_method!=0 )
        cptr[inputNeuronCount] += diff ;
      else
        work[inputNeuronCount] = rate * synth[0] + wptr[inputNeuronCount] ;

      if ( length > bigerr[0] )
        bigerr[0] = length ;

      if ( learn_method==0 ) {
        normalizeWeight( work ) ;
        for ( int i=0 ; i<=inputNeuronCount ; i++ )
          cptr[i] += work[i] - wptr[i] ;
      }

    }

    bigerr[0] = Math.sqrt ( bigerr[0] ) ;
  }

  /**
   * This method is called at the end of a training iteration.
   * This method adjusts the weights based on the previous trial.
   *
   * @param rate learning rate
   * @param learn_method method(0=additive, 1=subtractive)
   * @param won a holds number of times each neuron won
   * @param bigcorr holds the error
   * @param correc holds the correction
   */
  void adjustWeights (
                     double rate ,
                     int learn_method ,
                     int won[] ,
                     double bigcorr[],
                     double correc[][]
                     )

  {
    double corr, cptr[], wptr[], length, f ;

    bigcorr[0] = 0.0 ;

    for ( int i=0 ; i<outputNeuronCount ; i++ ) {

      if ( won[i]==0 )
        continue ;

      wptr = outputWeights[i];
      cptr = correc[i];

      f = 1.0 / (double) won[i] ;
      if ( learn_method!=0 )
        f *= rate ;

      length = 0.0 ;

      for ( int j=0 ; j<=inputNeuronCount ; j++ ) {
        corr = f * cptr[j] ;
        wptr[j] += corr ;
        length += corr * corr ;
      }

      if ( length > bigcorr[0] )
        bigcorr[0] = length ;
    }
    // scale the correction
    bigcorr[0] = Math.sqrt ( bigcorr[0] ) / rate ;
  }




  /**
   * If no neuron wins, then force a winner.
   *
   * @param won how many times each neuron won
   * @exception java.lang.RuntimeException
   */
  void forceWin(
               int won[]
               )
  throws RuntimeException
  {
    int i, tset, best, size, which=0;
    double dptr[], normfac[]=new double[1];
    double synth[] = new double[1], dist, optr[];

    size = inputNeuronCount + 1 ;

    dist = 1.E30 ;
    for ( tset=0 ; tset<train.getTrainingSetCount() ; tset++ ) {
      dptr = train.getInputSet(tset);
      best = winner ( dptr , normfac , synth ) ;
      if ( output[best] < dist ) {
        dist = output[best] ;
        which = tset ;
      }
    }

    dptr = train.getInputSet(which);
    best = winner ( dptr , normfac , synth ) ;

    dist = -1.e30 ;
    i = outputNeuronCount;
    while ( (i--)>0 ) {
      if ( won[i]!=0 )
        continue ;
      if ( output[i] > dist ) {
        dist = output[i] ;
        which = i ;
      }
    }

    optr = outputWeights[which];

    System.arraycopy(dptr,
                     0,
                     optr,
                     0,
                     dptr.length);

    optr[inputNeuronCount] = synth[0] / normfac[0] ;
    normalizeWeight ( optr ) ;
  }



  /**
   * This method is called to train the network. It can run
   * for a very long time and will report progress back to the
   * owner object.
   *
   * @exception java.lang.RuntimeException
   */
  public void learn ()
  throws RuntimeException
  {
    int i, key, tset,iter,n_retry,nwts;
    int won[],winners ;
    double work[],correc[][],rate,best_err,dptr[];
    double bigerr[] = new double[1] ;
    double bigcorr[] = new double[1];
    KohonenNetwork bestnet;  // Preserve best here

    totalError = 1.0 ;


    for ( tset=0 ; tset<train.getTrainingSetCount(); tset++ ) {
      dptr = train.getInputSet(tset);
      if ( vectorLength( dptr ) < 1.E-30 ) {
        throw(new RuntimeException("Multiplicative normalization has null training case")) ;
      }

    }


    bestnet = new KohonenNetwork(inputNeuronCount,outputNeuronCount,owner) ;

    won = new int[outputNeuronCount];
    correc = new double[outputNeuronCount][inputNeuronCount+1];
    if ( learnMethod==0 )
      work = new double[inputNeuronCount+1];
    else
      work = null ;

    rate = learnRate;

    initialize () ;
    best_err = 1.e30 ;

// main loop:

    n_retry = 0 ;
    for ( iter=0 ; ; iter++ ) {

      evaluateErrors ( rate , learnMethod , won ,
                       bigerr , correc , work ) ;

      totalError = bigerr[0] ;

      if ( totalError < best_err ) {
        best_err = totalError ;
        copyWeights ( bestnet , this ) ;
      }

      winners = 0 ;
      for ( i=0;i<won.length;i++ )
        if ( won[i]!=0 )
          winners++;


      if ( bigerr[0] < quitError )
        break ;


      if ( (winners < outputNeuronCount)  &&
           (winners < train.getTrainingSetCount()) ) {
        forceWin ( won ) ;
        continue ;
      }

      adjustWeights ( rate , learnMethod , won , bigcorr, correc ) ;

      owner.update(n_retry,totalError,best_err);
      if ( halt ) {
        owner.update(n_retry,totalError,best_err);
        break;
      }
      Thread.yield();

      if ( bigcorr[0] < 1E-5 ) {
        if ( ++n_retry > retries )
          break ;
        initialize () ;
        iter = -1 ;
        rate = learnRate ;
        continue ;
      }

      if ( rate > 0.01 )
        rate *= reduction ;

    }

// done

    copyWeights( this , bestnet ) ;

    for ( i=0 ; i<outputNeuronCount ; i++ )
      normalizeWeight ( outputWeights[i] ) ;

    halt = true;
    n_retry++;
    owner.update(n_retry,totalError,best_err);
  }

  /**
   * Called to initialize the Kononen network.
   */
  public void initialize()
  {
    int i ;
    double optr[];

    clearWeights() ;
    randomizeWeights( outputWeights ) ;
    for ( i=0 ; i<outputNeuronCount ; i++ ) {
      optr = outputWeights[i];
      normalizeWeight( optr );
    }
  }
}

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
亚洲线精品一区二区三区八戒| 男女激情视频一区| 国产精品国产精品国产专区不片| 久久婷婷国产综合精品青草| 欧美电视剧免费观看| 日韩一区国产二区欧美三区| 欧美日韩精品久久久| 精品视频123区在线观看| 欧洲一区在线观看| 在线免费不卡视频| 欧美天天综合网| 717成人午夜免费福利电影| 91麻豆精品国产| 日韩一级免费一区| 2023国产精华国产精品| 久久精品免视看| 国产精品久久毛片a| 亚洲欧美日韩系列| 亚洲一二三四在线观看| 爽好久久久欧美精品| 老鸭窝一区二区久久精品| 国产在线看一区| 国产成人亚洲精品青草天美| 成人免费电影视频| 91丨porny丨在线| 欧美日韩一二区| 日韩视频在线永久播放| 26uuu欧美| 中文字幕视频一区二区三区久| 亚洲欧美日韩在线| 午夜久久久影院| 激情另类小说区图片区视频区| 国产成人午夜电影网| 一本色道a无线码一区v| 91麻豆精品国产91久久久久久久久| 日韩一区二区影院| 精品国产乱码久久久久久蜜臀| 国产婷婷一区二区| 自拍偷自拍亚洲精品播放| 一区二区三区四区中文字幕| 亚洲一区在线视频| 亚洲色图清纯唯美| 日韩精品亚洲一区二区三区免费| 热久久国产精品| 国产九色精品成人porny| 成人精品国产一区二区4080| 91在线视频在线| 欧美日韩一区二区三区高清| 日韩一区二区精品在线观看| 久久精品夜色噜噜亚洲a∨| 国产人久久人人人人爽| 亚洲精品乱码久久久久久日本蜜臀| 亚洲男人的天堂在线观看| 亚洲sss视频在线视频| 麻豆精品视频在线观看视频| 粉嫩蜜臀av国产精品网站| 久久久久久久综合| 日韩精品一区二区三区视频 | 欧美在线999| 91精品福利在线一区二区三区 | 久久精品一区二区三区av| 国产精品久久久久国产精品日日| 亚洲综合激情另类小说区| 免费在线成人网| 国产精品一品二品| 欧美日韩一区成人| 久久精品人人做| 国产精品乱人伦| 亚洲一二三区不卡| 国产一区二区电影| 欧美色爱综合网| 久久久精品蜜桃| 亚洲一级二级在线| 国产精品一色哟哟哟| 色婷婷狠狠综合| 久久久久久久久久久久电影 | 国产麻豆精品在线| 欧美吻胸吃奶大尺度电影| 精品国产伦一区二区三区观看方式| 亚洲男人的天堂在线观看| 国产在线视频不卡二| 欧美中文字幕久久| 国产精品毛片大码女人| 久久er精品视频| 欧美三级电影网| 亚洲欧洲色图综合| 国产精品一线二线三线| 欧美美女喷水视频| 亚洲欧美日韩国产手机在线 | 精品美女一区二区| 亚洲国产精品久久人人爱| 成人黄色777网| 精品99一区二区三区| 亚洲国产精品久久久久秋霞影院| 成人性生交大片| 精品久久久久久最新网址| 亚洲a一区二区| 91视频91自| 国产亚洲成年网址在线观看| 欧美日韩高清影院| 自拍偷拍国产亚洲| 国产精品99久久久久久似苏梦涵| 在线播放91灌醉迷j高跟美女| 国产精品久久久久久久久动漫| 日韩电影一二三区| 欧美日韩精品一区二区三区蜜桃| 亚洲精品一二三四区| 国产91精品入口| 久久亚洲综合色一区二区三区 | 麻豆精品国产91久久久久久| 94-欧美-setu| 亚洲猫色日本管| www.久久精品| 国产精品区一区二区三区| 精品夜夜嗨av一区二区三区| 91精品国产品国语在线不卡| 污片在线观看一区二区| 欧洲视频一区二区| 伊人色综合久久天天人手人婷| 972aa.com艺术欧美| 最新不卡av在线| 97超碰欧美中文字幕| 国产精品久久看| 99精品国产一区二区三区不卡| 亚洲国产精品传媒在线观看| 另类的小说在线视频另类成人小视频在线| 日韩欧美自拍偷拍| 国内精品国产三级国产a久久| 日韩色在线观看| 久久国产尿小便嘘嘘尿| 精品国产人成亚洲区| 国内精品在线播放| 久久人人爽爽爽人久久久| 国产成人小视频| 亚洲欧洲精品一区二区三区| 色av成人天堂桃色av| 一个色妞综合视频在线观看| 欧美性一区二区| 三级欧美韩日大片在线看| 欧美日韩国产综合一区二区 | 亚洲综合一区二区| 欧美三级在线视频| 日本成人中文字幕| 精品国产亚洲一区二区三区在线观看| 国产一区二区毛片| 国产精品国产三级国产aⅴ原创| 91黄色激情网站| 日韩电影在线一区二区三区| 欧美成人aa大片| 大白屁股一区二区视频| 国产精品视频在线看| 欧美日韩日本视频| 麻豆视频一区二区| 国产日韩精品一区二区浪潮av| 99综合影院在线| 亚洲国产精品一区二区久久恐怖片| 91精品国产品国语在线不卡| 国产一级精品在线| 一区二区三区四区视频精品免费| 欧美久久久久久久久中文字幕| 精品午夜久久福利影院| 中文字幕视频一区| 4438成人网| 丁香婷婷综合网| 亚洲成人av在线电影| 欧美成人在线直播| 成人综合日日夜夜| 亚洲免费在线观看视频| 日韩一区国产二区欧美三区| 成人a免费在线看| 日韩精品一级二级 | 狠狠色2019综合网| 国产亚洲美州欧州综合国| 成年人国产精品| 首页国产欧美久久| 国产精品女人毛片| 欧美一区二区美女| av在线不卡网| 婷婷激情综合网| 国产丝袜美腿一区二区三区| 欧美日韩午夜精品| 国产v日产∨综合v精品视频| 亚洲不卡在线观看| 亚洲欧美一区二区三区国产精品| 久久久久99精品一区| 欧美成人福利视频| 欧美一级二级三级蜜桃| 欧美视频一区二| 国产麻豆91精品| 精品夜夜嗨av一区二区三区| 亚洲福利一区二区三区| 日本一区二区不卡视频| 日韩久久免费av| 欧美日韩午夜精品| 91视频免费观看| 国产精品一区二区果冻传媒| 黄色日韩网站视频| 亚洲成av人在线观看| 中文字幕五月欧美| 国产精品亲子乱子伦xxxx裸|