?? tfrc-sink.cc
字號:
now, sample, drops); //printf ("time: %7.5f send_rate: %7.5f\n", now, sendrate); //printf ("time: %7.5f maxseq: %d\n", now, maxseq);}void TfrcSinkAgent::print_loss_all(int *sample) { double now = Scheduler::instance().clock(); printf ("%f: sample 0: %5d 1: %5d 2: %5d 3: %5d 4: %5d\n", now, sample[0], sample[1], sample[2], sample[3], sample[4]); }void TfrcSinkAgent::print_losses_all(int *losses) { double now = Scheduler::instance().clock(); printf ("%f: losses 0: %5d 1: %5d 2: %5d 3: %5d 4: %5d\n", now, losses[0], losses[1], losses[2], losses[3], losses[4]); }void TfrcSinkAgent::print_count_losses_all(int *count_losses) { double now = Scheduler::instance().clock(); printf ("%f: count? 0: %5d 1: %5d 2: %5d 3: %5d 4: %5d\n", now, count_losses[0], count_losses[1], count_losses[2], count_losses[3], count_losses[4]); }////////////////////////////////////////// algo specific code /////////////////////////////////////////////////////////////// WALI Code////double TfrcSinkAgent::est_loss_WALI () { int i; double ave_interval1, ave_interval2; int ds ; if (!init_WALI_flag) { init_WALI () ; } // sample[i] counts the number of packets since the i-th loss event // sample[0] contains the most recent sample. for (i = last_sample; i <= maxseq ; i ++) { sample[0]++; if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST) { // new loss event // double now = Scheduler::instance().clock(); sample_count ++; shift_array (sample, numsamples+1, 0); shift_array (losses, numsamples+1, 1); shift_array (count_losses, numsamples+1, 0); multiply_array(mult, numsamples+1, mult_factor_); shift_array (mult, numsamples+1, 1.0); mult_factor_ = 1.0; } } last_sample = maxseq+1 ; if (sample_count>numsamples+1) // The array of loss intervals is full. ds=numsamples+1; else ds=sample_count; if (sample_count == 1 && false_sample == 0) // no losses yet return 0; /* do we need to discount weights? */ if (sample_count > 1 && discount && sample[0] > 0) { double ave = weighted_average1(1, ds, 1.0, mult, weights, sample, ShortIntervals_, losses, count_losses); //double ave = weighted_average(1, ds, 1.0, mult, weights, sample); int factor = 2; double ratio = (factor*ave)/sample[0]; double min_ratio = 0.5; if ( ratio < 1.0) { // the most recent loss interval is very large mult_factor_ = ratio; if (mult_factor_ < min_ratio) mult_factor_ = min_ratio; } } // Calculations including the most recent loss interval. ave_interval1 = weighted_average1(0, ds, mult_factor_, mult, weights, sample, ShortIntervals_, losses, count_losses); //ave_interval1 = weighted_average(0, ds, mult_factor_, mult, weights, sample); // The most recent loss interval does not end in a loss // event. Include the most recent interval in the // calculations only if this increases the estimated loss // interval. ave_interval2 = weighted_average1(1, ds, mult_factor_, mult, weights, sample, ShortIntervals_, losses, count_losses); //ave_interval2 = weighted_average(1, ds, mult_factor_, mult, weights, sample); if (ave_interval2 > ave_interval1) ave_interval1 = ave_interval2; if (ave_interval1 > 0) { if (printLoss_ > 0) { print_loss(sample[0], ave_interval1); print_loss_all(sample); if (ShortIntervals_ > 0) { print_losses_all(losses); print_count_losses_all(count_losses); } } return 1/ave_interval1; } else return 999; }// Calculate the weighted average.double TfrcSinkAgent::weighted_average(int start, int end, double factor, double *m, double *w, int *sample){ int i; double wsum = 0; double answer = 0; if (smooth_ == 1 && start == 0) { if (end == numsamples+1) { // the array is full, but we don't want to uses // the last loss interval in the array end = end-1; } // effectively shift the weight arrays for (i = start ; i < end; i++) if (i==0) wsum += m[i]*w[i+1]; else wsum += factor*m[i]*w[i+1]; for (i = start ; i < end; i++) if (i==0) answer += m[i]*w[i+1]*sample[i]/wsum; else answer += factor*m[i]*w[i+1]*sample[i]/wsum; return answer; } else { for (i = start ; i < end; i++) if (i==0) wsum += m[i]*w[i]; else wsum += factor*m[i]*w[i]; for (i = start ; i < end; i++) if (i==0) answer += m[i]*w[i]*sample[i]/wsum; else answer += factor*m[i]*w[i]*sample[i]/wsum; return answer; }}int TfrcSinkAgent::get_sample(int oldSample, int numLosses) { int newSample; if (numLosses == 0) { newSample = oldSample; } else { newSample = (int) floor(oldSample / numLosses); } return newSample;}// Calculate the weighted average, factor*m[i]*w[i]*sample[i]/wsum.// "factor" is "mult_factor_", for weighting the most recent interval// when it is very large// "m[i]" is "mult[]", for old values of "mult_factor_".//// When ShortIntervals_ is 1, the length of a loss interval is// "sample[i]/losses[i]" for short intervals, not just "sample[i]".// This is equivalent to a loss event rate of "losses[i]/sample[i]",// instead of "1/sample[i]".//// When ShortIntervals_ is 2, it is like ShortIntervals_ of 1,// except that the number of losses per loss interval is at// most 1460/byte-size-of-small-packets.//double TfrcSinkAgent::weighted_average1(int start, int end, double factor, double *m, double *w, int *sample, int ShortIntervals, int *losses, int *count_losses){ int i; int ThisSample; double wsum = 0; double answer = 0; if (smooth_ == 1 && start == 0) { if (end == numsamples+1) { // the array is full, but we don't want to uses // the last loss interval in the array end = end-1; } // effectively shift the weight arrays for (i = start ; i < end; i++) if (i==0) wsum += m[i]*w[i+1]; else wsum += factor*m[i]*w[i+1]; for (i = start ; i < end; i++) { ThisSample = sample[i]; if (ShortIntervals == 1 && count_losses[i] == 1) { ThisSample = get_sample(sample[i], losses[i]); } if (ShortIntervals == 2 && count_losses[i] == 1) { int adjusted_losses = int(fsize_/size_); if (losses[i] < adjusted_losses) { adjusted_losses = losses[i]; } ThisSample = get_sample(sample[i], adjusted_losses); } if (i==0) answer += m[i]*w[i+1]*ThisSample/wsum; //answer += m[i]*w[i+1]*sample[i]/wsum; else answer += factor*m[i]*w[i+1]*ThisSample/wsum; //answer += factor*m[i]*w[i+1]*sample[i]/wsum; } return answer; } else { for (i = start ; i < end; i++) if (i==0) wsum += m[i]*w[i]; else wsum += factor*m[i]*w[i]; for (i = start ; i < end; i++) { ThisSample = sample[i]; if (ShortIntervals == 1 && count_losses[i] == 1) { ThisSample = get_sample(sample[i], losses[i]); } if (ShortIntervals == 2 && count_losses[i] == 1) { ThisSample = get_sample(sample[i], 7); // Replace 7 by 1460/packet size. } if (i==0) answer += m[i]*w[i]*ThisSample/wsum; //answer += m[i]*w[i]*sample[i]/wsum; else answer += factor*m[i]*w[i]*ThisSample/wsum; //answer += factor*m[i]*w[i]*sample[i]/wsum; } return answer; }}// Shift array a[] up, starting with a[sz-2] -> a[sz-1].void TfrcSinkAgent::shift_array(int *a, int sz, int defval) { int i ; for (i = sz-2 ; i >= 0 ; i--) { a[i+1] = a[i] ; } a[0] = defval;}void TfrcSinkAgent::shift_array(double *a, int sz, double defval) { int i ; for (i = sz-2 ; i >= 0 ; i--) { a[i+1] = a[i] ; } a[0] = defval;}// Multiply array by value, starting with array index 1.// Array index 0 of the unshifted array contains the most recent interval.void TfrcSinkAgent::multiply_array(double *a, int sz, double multiplier) { int i ; for (i = 1; i <= sz-1; i++) { double old = a[i]; a[i] = old * multiplier ; }}/* * We just received our first loss, and need to adjust our history. */double TfrcSinkAgent::adjust_history (double ts){ int i; double p; for (i = maxseq; i >= 0 ; i --) { if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST ) { lossvec_[i%hsz] = NOT_RCVD; } } lastloss = ts; lastloss_round_id = round_id ; p=b_to_p(est_thput()*psize_, rtt_, tzero_, fsize_, 1); false_sample = (int)(1.0/p); sample[1] = false_sample; sample[0] = 0; losses[1] = 0; losses[0] = 1; count_losses[1] = 0; count_losses[0] = 0; sample_count++; if (printLoss_) { print_loss_all (sample); if (ShortIntervals_ == 1) { print_losses_all(losses); print_count_losses_all(count_losses); } } false_sample = -1 ; return p;}/* * Initialize data structures for weights. */void TfrcSinkAgent::init_WALI () { int i; if (numsamples < 0) numsamples = DEFAULT_NUMSAMPLES ; if (smooth_ == 1) { numsamples = numsamples + 1; } sample = (int *)malloc((numsamples+1)*sizeof(int)); losses = (int *)malloc((numsamples+1)*sizeof(int)); count_losses = (int *)malloc((numsamples+1)*sizeof(int)); weights = (double *)malloc((numsamples+1)*sizeof(double)); mult = (double *)malloc((numsamples+1)*sizeof(double)); for (i = 0 ; i < numsamples+1 ; i ++) { sample[i] = 0 ; } if (smooth_ == 1) { int mid = int(numsamples/2); for (i = 0; i < mid; i ++) { weights[i] = 1.0; } for (i = mid; i <= numsamples; i ++){ weights[i] = 1.0 - (i-mid)/(mid + 1.0); } } else { int mid = int(numsamples/2); for (i = 0; i < mid; i ++) { weights[i] = 1.0; } for (i = mid; i <= numsamples; i ++){ weights[i] = 1.0 - (i+1-mid)/(mid + 1.0); } } for (i = 0; i < numsamples+1; i ++) { mult[i] = 1.0 ; } init_WALI_flag = 1; /* initialization done */}///////////////////////////// EWMA ////////////////////////////////////////////double TfrcSinkAgent::est_loss_EWMA () { double p1, p2 ; for (int i = last_sample; i <= maxseq ; i ++) { loss_int++; if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST ) { if (avg_loss_int < 0) { avg_loss_int = loss_int ; } else { avg_loss_int = history*avg_loss_int + (1-history)*loss_int ; } loss_int = 0 ; } } last_sample = maxseq+1 ; if (avg_loss_int < 0) { p1 = 0; } else { p1 = 1.0/avg_loss_int ; } if (loss_int == 0 || avg_loss_int < 0){ //XXX this last check was added by a //person who knows nothing of this //code just to stop FP div by zero. //Values were history=.75, //avg_loss_int=-1, loss_int=3. If //you know what should be here, //please cleanup and remove this //comment. p2 = p1 ; } else { p2 = 1.0/(history*avg_loss_int + (1-history)*loss_int) ; } if (p2 < p1) { p1 = p2 ; } if (printLoss_ > 0) { if (p1 > 0) print_loss(loss_int, 1.0/p1); else print_loss(loss_int, 0.00001); print_loss_all(sample); } return p1 ;}///////////////////////////// RBPH ////////////////////////////////////////////double TfrcSinkAgent::est_loss_RBPH () { double numpkts = hsz ; double p ; // how many pkts we should go back? if (sendrate > 0 && rtt_ > 0) { double x = b_to_p(sendrate, rtt_, tzero_, psize_, 1); if (x > 0) numpkts = minlc/x ; else numpkts = hsz ; } // that number must be below maxseq and hsz if (numpkts > maxseq) numpkts = maxseq ; if (numpkts > hsz) numpkts = hsz ; int lc = 0; int pc = 0; int i = maxseq ; // first see if how many lc's we find in numpkts while (pc < numpkts) { pc ++ ; if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST ) lc ++ ; i -- ; } // if not enough lsos events, keep going back ... if (lc < minlc) { // but only as far as the history allows ... numpkts = maxseq ; if (numpkts > hsz) numpkts = hsz ; while ((lc < minlc) && (pc < numpkts)) { pc ++ ; if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST ) lc ++ ; i -- ; } } if (pc == 0) p = 0; else p = (double)lc/(double)pc ; if (printLoss_ > 0) { if (p > 0) print_loss(0, 1.0/p); else print_loss(0, 0.00001); print_loss_all(sample); } return p ;}///////////////////////////// EBPH ////////////////////////////////////////////double TfrcSinkAgent::est_loss_EBPH () { double numpkts = hsz ; double p ; int lc = 0; int pc = 0; int i = maxseq ; numpkts = maxseq ; if (numpkts > hsz) numpkts = hsz ; while ((lc < minlc) && (pc < numpkts)) { pc ++ ; if (lossvec_[i%hsz] == LOST || lossvec_[i%hsz] == ECNLOST) lc ++ ; i -- ; } if (pc == 0) p = 0; else p = (double)lc/(double)pc ; if (printLoss_ > 0) { if (p > 0) print_loss(0, 1.0/p); else print_loss(0, 0.00001); print_loss_all(sample); } return p ;}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -