亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? algorithmrvm.java

?? 完整的模式識別庫
?? JAVA
?? 第 1 頁 / 共 4 頁
字號:
/* * @(#) AlgorithmRVM.java  v6.0 03/15/2005  * created 02/09/03 * Last edited: Ryan Irwin *  */// import java packages//import java.awt.*;import java.util.*;/** * Algorithm Relevance Vector Machines */public class AlgorithmRVM extends Algorithm{    //-----------------------------------------------------------------    //    // static data members    //    //-----------------------------------------------------------------    static final double DEF_ALPHA_THRESH = 1e12;    static final double DEF_MIN_ALLOWED_WEIGHT = 1e-12;    static final long DEF_MAX_RVM_ITS = (long) (1 << 30);    static final long DEF_MAX_UPDATE_ITS = (long) (1 << 30);    static final double DEF_MIN_THETA = 1e-8;    static final double DEF_MOMENTUM = 0.85;    static final long DEF_MAX_ADDITIONS = 1;    static final boolean DEF_SAVE_RESTART = false;    static final boolean DEF_LOAD_RESTART = false;    static final int KERNEL_TYPE_LINEAR = 0;    static final int KERNEL_TYPE_RBF = 1;    static final int KERNEL_TYPE_POLYNOMIAL = 2;    static final int KERNEL_TYPE_DEFAULT = KERNEL_TYPE_LINEAR;    static final Double CLASS_LABEL_1 = new Double(0.0);    static final Double CLASS_LABEL_2 = new Double(1.0);    //-----------------------------------------------------------------    //    // instance data members    //    //-----------------------------------------------------------------    boolean debug_level_d = false;    // RVM data member    //    // relevance vector weights, vectors, and labels    //    Vector<Vector<Double>> x_d = new Vector<Vector<Double>>();     // vectors_d    Vector<Double> y_d = new Vector<Double>();                     // targets_d    Vector<Vector<Double>> evalx_d = new Vector<Vector<Double>>(); // vectors_d    Vector<Double> evaly_d = new Vector<Double>();                 // targets_d    Matrix inv_hessian_d = new Matrix();   // A in [2]    Vector<Double> weights_d = new Vector<Double>();      // w in [1]    double bias_d = 0.0;    // RVM parameters    //    //int kernel_type_d = KERNEL_TYPE_LINEAR;    int kernel_type_d = KERNEL_TYPE_RBF;    // vector of support region points    //    Vector<MyPoint> support_vectors_d = new Vector<MyPoint>();    Vector<MyPoint> decision_regions_d = new Vector<MyPoint>();    int output_canvas_d[][];    /** tuning parameters: These are the only parameters that a user need     * worry about prior to training. The default quantities are usually     * sufficient. However, run-time performance and accuracy can be     * influenced by appropriately tuning these parameters.     *     * maximum hyperparameter value allowed before pruning. decreasing this     * value can speed up convergence of the model but may yield overpruning     * and poor generalization. the value should always be greater than zero     */    double alpha_thresh_d;    /**     * minimum value of a weight allowed in the model. typically as the weight     *decreases toward zero, it should be pruned.      */    double min_allowed_weight_d;    /**     * maximum number of training iterations to carry out before stopping     * adjusting this parameter can result in sub-optimal results     */    long max_rvm_its_d;    /**     * maximum number of iterations that are allowed to pass betweeen     * model updates (adding or pruning of a hyperparameter) before training     * is terminated (for the full mode of training) or a vector is manually     * added (for the incremental mode of training)     */    long max_update_its_d;    /**     * minimum value of the theta calculation (the divisor of equation     * 17 in [3]) that will trigger a model addition (in the     * incremental training mode).     */    double min_theta_d;    /**     * hyperparameter update momentum term. a larger value for this term     * can lead to faster convergence, while too large a value can cause     * oscillation. the value is typically on the range [0,1]     */    double momentum_d;    /**     * number of hyperparameters to add at a time. adding a small number of     * hyperparameters at a time will yield a smoother movement through the     * model space, but may increase the total convergence time.     */    long max_additions_d;    /**     * whether or not to create backup copies of training data. if true then     * data will be occasionally saved to disk in the file provided. that     * file can later be used to restart training in the middle of the     * convergence process. *** the restart facility currently is available     * only for incremental training ***     */    boolean save_restart_d;    //Filename restart_save_file_d;    /**     * whether or not to bootstrap training from a restart file. if true then     * the given restart file is read and training is continued from that point     * forward. *** the restart facility currently is available     * only for incremental training ***     */    boolean load_restart_d;    // Filename restart_load_file_d;    // model data    //    int num_samples_d;                        // number of remaining RVs    Matrix A_d = new Matrix();                // hyperparameter matrix    int dimA_d;                               // number of non-pruned params    Matrix phi_d = new Matrix();              // working design matrix    Vector<Double> curr_weights_d = new Vector<Double>(); // updated weights    Vector<Double> last_rvm_weights_d = new Vector<Double>(); // stored weights for rvm pass    // IRLS training quantities    //    Vector<Double> sigma_d = new Vector<Double>();           // error vector    Matrix B_d = new Matrix();               // data-dependent "noise"    Vector<Double> gradient_d = new Vector<Double>();// gradient w.r.t. weights    Matrix hessian_d = new Matrix();         // hessian w.r.t. weights    Matrix covar_cholesky_d = new Matrix();  // cholesky decomposition of covar    Vector<Double> old_irls_weights_d = new Vector<Double>(); // stored weights for irls pass    long last_changed_d;                 // counter for last time model changed    // incremental training quantities    //    Vector S_d = new Vector();               // updates for incremental train    // Vector hyperparams_d = new Vector();  // current hyperparameters    // Vector weights_d;                     // current hyperparameters    Vector<Double> last_hyperparams_d = new Vector<Double>();     // previous iterations hyperparams    Vector<Double> twoback_hyperparams_d = new Vector<Double>(); // hyperparameters from two    //-------------------------------------------------------------------    //    // classification functions    //    //--------------------------------------------------------------------    /**      * Overrides the initialize() method in the base class.  Initializes     * member data and prepares for execution of first step.  This method     * "resets" the algorithm.     *     * @return  true     */    public boolean initialize()    {	// Debug	//	//  System.out.println("AlgorithmRVM : initialize()");	// check the data points	//	if (output_panel_d == null)	{	    return false;	}	alpha_thresh_d = 1e4;	min_allowed_weight_d = 1e-8;	max_rvm_its_d = DEF_MAX_RVM_ITS;	max_update_its_d = DEF_MAX_UPDATE_ITS;	max_update_its_d = 100;	min_theta_d = DEF_MIN_THETA;	momentum_d = DEF_MOMENTUM;	max_additions_d = DEF_MAX_ADDITIONS;	save_restart_d = DEF_SAVE_RESTART;	load_restart_d = DEF_LOAD_RESTART;	// add the process description for the RVM algorithm	//	if (description_d.size() == 0)       	{	    String str = new String("   0. Initialize the original data.");	    description_d.addElement(str);	    str = new String("   1. Displaying the original data.");	    description_d.addElement(str);	    str = new String("   2. Computing the Relevance Vectors.");	    description_d.addElement(str);	    str = new String("   3. Computing the decision regions.");	    description_d.addElement(str);	}	// append message to process box	//	pro_box_d.appendMessage("Relevance Vector Machine :" + "\n");	// set the data points for this algorithm	//	//	set1_d = (Vector)data_points_d.dset1.clone();	//	set2_d = (Vector)data_points_d.dset2.clone();	//	set1_d = data_points_d.dset1;	set2_d = data_points_d.dset2;	// reset values	//	support_vectors_d = new Vector<MyPoint>();	decision_regions_d = new Vector<MyPoint>();	step_count = 3;	x_d = new Vector<Vector<Double>>();	y_d = new Vector<Double>();	// set the step index	//	step_index_d = 0;	// append message to process box	//	pro_box_d.appendMessage((String)description_d.get(step_index_d));	// exit gracefully	//	return true;    }    /**     * Implementation of the run function from the Runnable interface.    * Determines what the current step is and calls the appropriate method.    */    public void run()    {	// Debug	//	// System.out.println(algo_id + ": run()");		if (step_index_d == 1)        {	    disableControl();	    step1();	    enableControl();	}           	else if (step_index_d == 2)	{	    disableControl();	    step2(); 	    enableControl();	}	        else if (step_index_d == 3)	{	    disableControl();	    step3();	    pro_box_d.appendMessage("   Algorithm Complete");	    enableControl(); 	}	    	// exit gracefully	//	return;    }    /**     *     * step one of the algorithm. Scales the display to fit the plot.     *     * @return true     */    boolean step1()    {	// debug	//	// System.out.println(algo_id + ": step1()");	    	pro_box_d.setProgressMin(0);	pro_box_d.setProgressMax(1);	pro_box_d.setProgressCurr(0);	scaleToFitData();	// Display original data	//	output_panel_d.addOutput(set1_d, Classify.PTYPE_INPUT, 				 data_points_d.color_dset1);	output_panel_d.addOutput(set2_d, Classify.PTYPE_INPUT,				 data_points_d.color_dset2);	output_panel_d.addOutput(set3_d, Classify.PTYPE_INPUT,				 data_points_d.color_dset3);	output_panel_d.addOutput(set4_d, Classify.PTYPE_INPUT, 				 data_points_d.color_dset4);	    	// step 1 completed	//	pro_box_d.setProgressCurr(1);	output_panel_d.repaint();	// exit gracefully	//	return true;    }    /**     *     * step two of the algorithm. Finds the PCA for the given data     *     * @return true     */    boolean step2()    {	// Debug	//	// System.out.println("AlgorithmRVM : step2()");	    	pro_box_d.setProgressMin(0);	pro_box_d.setProgressMax(20);	pro_box_d.setProgressCurr(0);	    	trainFull();		output_panel_d.addOutput(support_vectors_d, 				 Classify.PTYPE_SUPPORT_VECTOR, Color.black);	pro_box_d.setProgressCurr(20);	output_panel_d.repaint();	    	// exit gracefully	//	return true;    }       /**    *    * step three of the algorithm    *    * @return true    */    boolean step3()    {	// Debug	//	// System.out.println("AlgorithmRVM : step3()");	computeDecisionRegions();		// display support vectors	//	output_panel_d.addOutput(decision_regions_d, Classify.PTYPE_INPUT, 				 new Color(255, 200, 0));	output_panel_d.repaint();		computeErrors();		// exit gracefully	//	return true;    }       /**    *     * this method trains an RVM probabilistic classifier on the input data and    * targets provided. the inputs and targets should have a one-to-one    * correspondence and all targets should be either 0 (out-of-class) or    * 1 (in-class). The training scheme follows that of [1] section 3.    * It is assumed that the class data and targets are already set when    * this method is called.    *    * the training algorithm in [1] for RVMs proceeds in three iterative steps    *    *   1. prune away any weights whose hyperparameters have gone to infinity    *    *   2. estimate most probable weights: in this step we find those    *   weights that maximize equation (24) of [1].  The iteratively    *   reweighted least squares algorithm is used to find w_mp    *    *   3. estimate the covariance of a Gaussian approximation to the    *   posterior distribution (the posterior is what we want to    *   model in the end) over the weights centered at the weights,    *   w_mp.    *    *   4. estimate the hyperparameters that govern the weights. this    *   is done by evaluating the derivative over the hyperparameters    *   and finding the maximizing hyperparameters.    *    * 1, 2, 3, and 4 are carried out iteratively until a suitable convergence    * criteria is satisfied.    *    * @return boolean value indicating status    *    */    public boolean trainFull()    {	// Debug	//	// System.out.println("AlgorithmRVM : trainFull()");		// debugging information	//	//debug_level_d = true;		// 0. initialize data structures for training	//	if (!initFullTrain())        {	    // System.out.println("Error at initFullTrain ");	}		if (debug_level_d)        {	    // System.out.println("RVM training");	}		if (debug_level_d)   	{	    // Matrix.printDoubleVector(x_d);	    Matrix.printDoubleVector(y_d);	}		// debugging information	//	if (debug_level_d)       	{	    // System.out.println("RVM convergence loop");	}		// loop until convergence or until a maximum number of iterations has	// been reached	//	long num_rvm_its = 0;	boolean rvm_converged = false;	while (!rvm_converged)        {	    	    // store the weights achieved on the last iteration so we	    // can test for convergence later	    //	    //last_rvm_weights_d = (Vector)curr_weights_d.clone();	    //	    last_rvm_weights_d = curr_weights_d;	    	    // debugging information	    //	    if (debug_level_d)	    {	      	// System.out.println("RVM Iteration: " + num_rvm_its);	    }	    	    if (debug_level_d)	    {		// System.out.println("phi_d: ");		// phi_d.printMatrix();		// System.out.println("A_d: ");		// A_d.printMatrix();		// System.out.println("weights_d: ");		// Matrix.printDoubleVector(weights_d);		// System.out.println("curr_weights_d: ");		// Matrix.printDoubleVector(curr_weights_d);	    }	    	    // 1. prune only after the first iteration (we use a	    // weight of exactly 0.0 to trigger pruning so pruning on	    // the first iteration would prune all weights	    //	    if (num_rvm_its > 0)	    {		pruneAndUpdate();	    }	    	    if (debug_level_d)	    {		// System.out.println("After pruning: ");		// System.out.println("phi_d: ");		// phi_d.printMatrix();		// System.out.println("A_d: ");		// A_d.printMatrix();		// System.out.println("weights_d: ");		// Matrix.printDoubleVector(weights_d);		// System.out.println("curr_weights_d: ");		// Matrix.printDoubleVector(curr_weights_d);	        // System.out.println("curr_weights_d: ");		// Matrix.printMatrix(curr);	    }	    // if all weights have been pruned, then there is nothing	    // left to do and the process has converged (albeit to a	    // pretty useless solution)	    //	    if (dimA_d == 0)	    {		rvm_converged = true;				// debugging information		//		if (debug_level_d)	     	{		    // System.out.println("rvm convergence achieved");		}			       // conclude training	       //		break;	    }	    	    // 2. run a pass of IRLS training to estimate w_MP	    //	    irlsTrain();	    	    // 3. estimate the covariance of the Gaussian	    // approximation compute the variance vector. The	    // covariance is the inverse of the Hessian matrix. Only	    // the diagonal elements are needed to update the	    // hyperparameters. From the Cholesky decomposition, we	    // can efficiently find these values. After this function	    // call, the diagonal elements of covar_cholesky will	    // contain the negation of the diagonal elements of the	    // estimated covariance.  The other elements of	    // covar_cholesky are not meaningful.	    //

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
日韩视频永久免费| 丰满亚洲少妇av| 午夜精品福利一区二区三区av | 成人美女视频在线看| 91国产视频在线观看| 欧美不卡在线视频| 亚洲综合成人在线视频| 国产一区二区在线影院| 欧美日韩在线电影| 亚洲欧洲国产日韩| 国产精品自在在线| 5月丁香婷婷综合| 一区二区三区成人| 国产成a人亚洲精| 日韩区在线观看| 午夜视频在线观看一区| 99视频精品在线| 国产欧美中文在线| 国产一区二区三区四区在线观看| 欧美在线不卡视频| 最新国产成人在线观看| 国产精品一区二区不卡| 欧美一级久久久| 视频在线在亚洲| 欧美日韩中字一区| 一区二区三区欧美在线观看| 成人午夜视频网站| 国产精品素人视频| 成人午夜精品在线| 国产精品乱码人人做人人爱| 国产精品综合在线视频| 国产夜色精品一区二区av| 国产一区二区女| 久久婷婷综合激情| 国产成人啪免费观看软件| 久久久99精品久久| 东方aⅴ免费观看久久av| 精品对白一区国产伦| 色婷婷综合视频在线观看| 欧美国产日韩在线观看| 粗大黑人巨茎大战欧美成人| 国产精品视频免费| 99久久综合色| 一区二区三区精品在线| 91天堂素人约啪| 亚洲一区二区三区四区中文字幕| 欧美视频你懂的| 人人狠狠综合久久亚洲| 日韩美女在线视频| 国产一区二区三区四区在线观看| 国产蜜臀av在线一区二区三区| 成人一区二区三区视频在线观看 | 2020国产精品自拍| 国产福利一区二区三区| 国产精品久久久久久久裸模| 99精品视频一区二区三区| 一区二区三区日韩在线观看| 91精品国产综合久久福利软件 | k8久久久一区二区三区| 亚洲美女精品一区| 欧美人与性动xxxx| 国产专区综合网| 中文字幕中文字幕在线一区| 欧美主播一区二区三区| 日本中文字幕一区| 亚洲国产精品传媒在线观看| 一本久道中文字幕精品亚洲嫩| 五月天欧美精品| 国产欧美精品区一区二区三区 | 亚洲成人av一区二区三区| 67194成人在线观看| 国产aⅴ综合色| 亚洲国产一区在线观看| 国产无一区二区| 欧美日韩综合在线| 久久精品国内一区二区三区| 国产精品对白交换视频| 欧美一级在线免费| 不卡一区中文字幕| 美女视频黄免费的久久 | 99r国产精品| 麻豆精品视频在线| 亚洲激情av在线| 久久精品男人天堂av| 91麻豆精品国产综合久久久久久 | 欧美性猛片xxxx免费看久爱| 国产在线观看一区二区| 亚洲18色成人| 国产精品免费久久| 精品国产电影一区二区| 欧美日韩一区二区三区高清| 成人手机在线视频| 极品尤物av久久免费看| 亚洲国产婷婷综合在线精品| 国产精品三级久久久久三级| 精品国产乱码久久久久久影片| 欧美性大战久久久久久久蜜臀| 成熟亚洲日本毛茸茸凸凹| 久久精品国产99久久6| 午夜视频一区二区| 亚洲一区二区三区四区五区黄 | 在线视频国内一区二区| 成人午夜激情在线| 国产成人免费视频一区| 久久精品国产久精国产爱| 亚洲国产婷婷综合在线精品| 亚洲乱码国产乱码精品精98午夜| 久久久不卡网国产精品一区| 精品久久久久久最新网址| 欧美精品一二三区| 欧美影院一区二区三区| 色噜噜夜夜夜综合网| aaa欧美日韩| 不卡在线视频中文字幕| 成人aaaa免费全部观看| 成人小视频在线| 成人国产精品免费网站| 不卡一区在线观看| 91婷婷韩国欧美一区二区| 色婷婷狠狠综合| 欧美亚洲动漫精品| 欧美精品日韩一本| 欧美一区二区三区视频在线| 欧美一区二区三区免费大片| 91麻豆精品国产无毒不卡在线观看| 欧美三级乱人伦电影| 欧美精品亚洲一区二区在线播放| 欧美丰满美乳xxx高潮www| 日韩一区二区三| 日韩欧美一级精品久久| 亚洲精品一区二区三区蜜桃下载| 精品久久久久久亚洲综合网| 欧美国产成人在线| 亚洲乱码国产乱码精品精的特点| 亚洲精品乱码久久久久久| 午夜视频在线观看一区二区三区| 日本美女一区二区| 国产成人免费视频网站| 色噜噜狠狠成人网p站| 91精品国产综合久久婷婷香蕉| 欧美刺激脚交jootjob| 日本一区二区三区在线观看| 亚洲欧美在线观看| 天天综合天天做天天综合| 久久99国产精品免费| 成人小视频在线观看| 欧美探花视频资源| 26uuu亚洲综合色| 中文字幕永久在线不卡| 亚洲va韩国va欧美va精品| 狠狠色综合播放一区二区| 成年人国产精品| 欧美一区二区大片| 国产精品久久久爽爽爽麻豆色哟哟| 一区二区三区成人| 国产白丝精品91爽爽久久| 欧美影院一区二区三区| 国产无遮挡一区二区三区毛片日本| 一区二区三区四区不卡视频| 六月丁香婷婷色狠狠久久| 99在线热播精品免费| 91精品福利在线一区二区三区| 久久中文字幕电影| 亚洲午夜激情av| 国产69精品一区二区亚洲孕妇| 91在线视频网址| 欧美一区中文字幕| 亚洲精品欧美综合四区| 精品一区二区在线视频| 色视频成人在线观看免| 久久综合狠狠综合| 日韩电影在线观看网站| 91蜜桃免费观看视频| 久久久久久一级片| 日韩激情av在线| 一本色道久久综合狠狠躁的推荐 | 欧美一区二区大片| 亚洲综合视频在线观看| 国产91高潮流白浆在线麻豆 | 亚洲少妇30p| 国产精品一卡二| 欧美一区在线视频| 亚洲va在线va天堂| 在线观看欧美黄色| 亚洲色图视频免费播放| 国产白丝精品91爽爽久久| 欧美精品一区二区三区很污很色的 | 精品在线观看视频| 欧美日本高清视频在线观看| 亚洲免费在线看| 丁香天五香天堂综合| 日韩精品一区二| 奇米888四色在线精品| 欧美人妖巨大在线| 亚洲综合激情网| 在线一区二区三区四区五区| 专区另类欧美日韩| 97久久超碰国产精品| 成人免费一区二区三区视频 | 亚洲人成精品久久久久|