亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? ebm.cpp

?? Gaussian Mixture Algorithm
?? CPP
?? 第 1 頁 / 共 3 頁
字號:
{	idx_dtanh(in->x, in->ddx);	idx_mul(in->ddx, in->ddx, in->ddx);	idx_mul(in->ddx, out->ddx, in->ddx);}void tanh_module::forget(forget_param_linear& fp){}void tanh_module::normalize(){}////////////////////////////////////////////////////////////////addc_module::addc_module(parameter *p, intg size){	bias = new state_idx(p,size);}addc_module::~addc_module(){	delete bias;}void addc_module::fprop(state_idx* in, state_idx* out){	out->resize(bias->x.dim(0));	idx_add(in->x,bias->x,out->x);}void addc_module::bprop(state_idx* in, state_idx* out){	idx_copy(out->dx,in->dx);	idx_copy(out->dx,bias->dx);}void addc_module::bbprop(state_idx* in, state_idx* out){	idx_copy(out->ddx,in->ddx);	idx_copy(out->ddx,bias->ddx);}void addc_module::forget(forget_param_linear& fp){	idx_clear(bias->x);}void addc_module::normalize(){}////////////////////////////////////////////////////////////////nn_layer_full::nn_layer_full(parameter *p, intg ninputs, intg noutputs){	linear = new linear_module(p,ninputs,noutputs);	bias = new state_idx(p,noutputs);	sum = new state_idx(noutputs);	sigmoid = new tanh_module();}nn_layer_full::~nn_layer_full(){	delete sigmoid;	delete sum;	delete bias;	delete linear;}void nn_layer_full::fprop(state_idx *in, state_idx *out){	out->resize(bias->x.dim(0));	linear->fprop(in, sum);	idx_add(sum->x, bias->x, sum->x);	sigmoid->fprop(sum, out);}void nn_layer_full::bprop(state_idx *in, state_idx *out){	sigmoid->bprop(sum, out);	idx_copy(sum->dx, bias->dx);	linear->bprop(in, sum);}void nn_layer_full::bbprop(state_idx *in, state_idx *out){	sigmoid->bbprop(sum, out);	idx_copy(sum->ddx, bias->ddx);	linear->bbprop(in, sum);}void nn_layer_full::forget(forget_param_linear &fp){	linear->forget(fp);	idx_clear(bias->x);}////////////////////////////////////////////////////////////////f_layer::f_layer(parameter *p, intg tin, intg tout, intg si, intg sj,		module_1_1<state_idx,state_idx> *sq){	weight = new state_idx(p, tout, tin);	bias = new state_idx(p, tout);	sum = new state_idx(tout, si, sj);	squash = sq;}f_layer::~f_layer(){	delete weight;	delete bias;	delete sum;}void f_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	double z = fp.value / pow(weight->x.dim(1), fp.exponent);	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");	idx_aloop1(w,weight->x,double)	{	*w = drand(z);}}void f_layer::fprop(state_idx *in, state_idx *out){	intg inx_d1 = in->x.dim(1);	intg inx_d2 = in->x.dim(2);	intg ws = weight->x.dim(0);	// resize sum and output	sum->resize(ws, inx_d1, inx_d2);	out->resize(ws, inx_d1, inx_d2);	// main matrix multiplication	{		int tr[] = { 2, 1, 0 };		Idx<double> inx(in->x.transpose(tr));		Idx<double> outx(sum->x.transpose(tr));		// loop over spatial dimensions		idx_bloop2(linx,inx,double, loutx,outx,double)		{			idx_bloop2(llinx,linx,double, lloutx,loutx,double)			{				// fprintf(stdout,"f_layer::fprop\n");				// weight->x.pretty(stdout);				// weight->x.fdump(stdout);				// llinx.pretty(stdout);				// llinx.fdump(stdout);				// lloutx.pretty(stdout);				// multiply weight matrix by input				idx_m2dotm1(weight->x, llinx, lloutx);				// lloutx.pretty(stdout);				// lloutx.fdump(stdout);			}		}	}	{ // add bias		// fprintf(stdout,"f_layer::fprop adding bias\n");		// bias->x.pretty(stdout);		// bias->x.fdump(stdout);		idx_bloop2(sumx,sum->x,double, biasx,bias->x,double)		{			idx_addc(sumx, biasx.get(), sumx);		}		// sum->x.pretty(stdout);		// sum->x.fdump(stdout);	}	// call squashing function	squash->fprop(sum, out);}void f_layer::bprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bprop(sum, out);	// compute gradient of bias	{		idx_bloop2(lha,sum->dx,double, lb,bias->dx,double)		{	*(lb.ptr()) += idx_sum(lha);}	}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->dx.transpose(tr));	Idx<double> outdx(sum->dx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(0, 1));	{ idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double) {		{ idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double) {			idx_m1extm1acc(lloutdx,llinx,weight->dx);			idx_m2dotm1(tkerx,lloutdx,llindx);			// direct call because idxm2dotm1 didn't use to work on tranposed matrices			// cblas_dgemv(CblasRowMajor, CblasTrans, weight->x.dim(0), weight->x.dim(1),			//             1.0, weight->x.idx_ptr(), weight->x.mod(0),			//             lloutdx.idx_ptr(), lloutdx.mod(0),			//             0.0, llindx.idx_ptr(), llindx.mod(0));		}}	}}}void f_layer::bbprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb); }}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->ddx.transpose(tr));	Idx<double> outdx(sum->ddx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(1, 0));	idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double)	{		idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double)		{			idx_m1squextm1acc(lloutdx,llinx,weight->ddx);			idx_m2squdotm1(tkerx,lloutdx,llindx);		}	}}////////////////////////////////////////////////////////////////c_layer::c_layer(parameter *p, intg ki, intg kj, intg ri, intg rj, Idx<intg> *tbl,		intg thick, intg si, intg sj, module_1_1<state_idx,state_idx> *sqsh){	thickness = thick;	stridei = ri;	stridej = rj;	kernel = new state_idx(p, tbl->dim(0), ki, kj);	table = tbl;	bias = new state_idx(p, thick);	sum = new state_idx(thick, si, sj);	squash = sqsh;}c_layer::~c_layer(){	delete kernel;	delete bias;	delete sum;}void c_layer::set_stride(intg ri, intg rj){	stridei = ri;	stridej = rj;}void c_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	Idx<double> kx(kernel->x);	intg vsize = kx.dim(1);	intg hsize = kx.dim(2);	Idx<intg> ts(table->select(1, 1));	Idx<int> fanin(1 + idx_max(ts));	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");  idx_clear(fanin);	{ idx_bloop1(tab, *table, intg)	{			fanin.set(1 + fanin.get(tab.get(1)), tab.get(1)); }}	{ idx_bloop2(tab, *table, intg, x, kx, double) {		double s = fp.value / pow((vsize * hsize * fanin.get(tab.get(1))), fp.exponent);		{	idx_bloop1(lx, x, double)	{			{	idx_bloop1(llx, lx, double) {				double n = drand(-s, s);				llx.set(n);			}}		}}	}}}void c_layer::fprop(state_idx *in, state_idx *out){	intg ki = kernel->x.dim(1);	intg kj = kernel->x.dim(2);	intg sini = in->x.dim(1);	intg sinj = in->x.dim(2);	if (((sini - (ki - stridei)) % stridei != 0) || ((sinj - (kj - stridej))			% stridej != 0))ylerror("inconsistent input size, kernel size, and subsampling ratio.");	if ((stridei != 1) || (stridej != 1))ylerror("stride > 1 not implemented yet.");	Idx<double> uuin(in->x.unfold(1, ki, stridei));	uuin = uuin.unfold(2, kj, stridej);	Idx<double> lki(kernel->x.dim(1), kernel->x.dim(2));	// resize output if necessary	sum->resize(thickness, uuin.dim(1), uuin.dim(2));	out->resize(thickness, uuin.dim(1), uuin.dim(2));	idx_clear(sum->x);	// generic convolution	{	idx_bloop2(lk, kernel->x, double, lt, *table, intg)	{		Idx<double> suin(uuin.select(0, lt->get(0)));		Idx<double> sout((sum->x).select(0, lt->get(1)));		idx_m4dotm2acc(suin, lk, sout);	}}	// add bias	{	idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double)	{		idx_addc(sumx, biasx.get(), sumx);	}}	// call squashing function	squash->fprop(sum, out);}void c_layer::bprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->dx);	Idx<double> uuin(in->dx.unfold(1, (kernel->dx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->dx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->dx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->dx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{ idx_bloop3 (lk, kernel->dx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->dx).select(0, lt.get(1)));			// backward convolution			idx_m2extm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4dotm2acc(sborp, sout, lk);	}}}void c_layer::bbprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->ddx);	Idx<double> uuin(in->ddx.unfold(1, (kernel->ddx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->ddx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->ddx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->ddx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{	idx_bloop3 (lk, kernel->ddx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->ddx).select(0, lt.get(1)));			// backward convolution			idx_m2squextm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4squdotm2acc(sborp, sout, lk);	}}}////////////////////////////////////////////////////////////////#ifdef USE_IPP// TODO: Copy IPP in project// TODO: ipp 64 for doubles?void c_layer_ipp::fprop (state_idx *in, state_idx *out) {  intg ki = kernel->x.dim(1);  intg kj = kernel->x.dim(2);  intg sini = in->x.dim(1);  intg sinj = in->x.dim(2);  if (((sini - (ki - stridei) % stridei) != 0) ||      ((sinj - (kj - stridej) % stridej) != 0))    ylerror("inconsistent input size, kernel size, and subsampling ratio.");  if ((stridei != 1) || (stridej != 1))    ylerror("stride > 1 not implemented yet.");  Idx<double> uuin = in->x.unfold(1, ki, stridei);  uuin = uuin.spec.unfold_inplace(2, kj, stridej);  Idx<double> lki = Idx<double>(kernel->x.dim(1), kernel->x.dim(2));  // resize output if necessary  sum->resize(thickness, uuin.dim(1), uuin.dim(2));  out->resize(thickness, uuin.dim(1), uuin.dim(2));  idx_clear(sum->x);  // generic convolution  Idx<double> tout = Idx<double>(sum->x.dim(1), sum->x.dim(2));  { idx_bloop2(lk, kernel->x, double, lt, *table, intg) {      rev_idx2_tr(*lk, lki);//      ipp_convolution_float(in->x.select(0, lt.get(0)), lki, tout);//      ipp_add_float(tout, sum->x.select(0, lt.get(1)));    }  }  // add bias  { idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,	       outx, out->x, double) {//      ipp_addc_nip_float(sumx, biasx.get(), outx);    }  }  // call squashing function  squash->fprop(sum, out);}void c_layer_ipp::bprop (state_idx *in, state_idx *out) {  // backprop gradient through squasher  squash->bprop(sum, out);  // compute gradient of bias  { idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {      idx_sumacc(lha, lb);    }  }  // backprop through convolution  idx_clear(in->dx);  /*  (let* ((ki (idx-dim :kernel:dx 1))	 (kj (idx-dim :kernel:dx 2))	 (ini (idx-dim :in:dx 1))	 (inj (idx-dim :in:dx 2))	 (outi (idx-dim :out:dx 1))	 (outj (idx-dim :out:dx 2))	 (sumi (idx-dim :sum:dx 1))	 (sumj (idx-dim :sum:dx 2))	 (souti (gbtype-matrix sumi sumj))	 (tout (gbtype-matrix ki kj)))	 (idx-bloop ((lk :kernel:dx) (lkf :kernel:x) (lt table))	 (let* ((islice (lt 0))	 (sout  (select :sum:dx 0 (lt 1)))	 )	 ;; backward convolution	 (ipp-convolution-full-float sout lkf (select :in:dx 0 islice))	 ;; compute gradient for kernel	 (rev-idx2-tr-float sout souti)	 (ipp-convolution-float (select :in:x 0 islice) souti tout)	 (ipp-add-float tout lk)	 )))	 */}#endif////////////////////////////////////////////////////////////////Idx<intg> full_table(intg a, intg b) {  Idx<intg> m(a * b, 2);  intg p = 0;  for (intg j = 0; j < b; ++j) {  	for (intg i = 0; i < a; ++i) {  		m.set(i, p, 0);  		m.set(j, p, 1);  		p++;  	}  }  return m;}////////////////////////////////////////////////////////////////////////s_layer::s_layer(parameter *p, intg ki, intg kj, intg thick, intg si, intg sj,		module_1_1<state_idx, state_idx> *sqsh) :	stridei(ki), stridej(kj), squash(sqsh){	coeff = new state_idx(p, thick);	bias = new state_idx(p, thick);	sub = new state_idx(thick, si, sj);	sum = new state_idx(thick, si, sj);}s_layer::~s_layer(){	delete coeff;	delete bias;	delete sub;	delete sum;}void s_layer::fprop(state_idx *in, state_idx *out){	intg sin_t = in->x.dim(0);	intg sin_i = in->x.dim(1);	intg sin_j = in->x.dim(2);	intg si = sin_i / stridei;	intg sj = sin_j / stridej;	if( (sin_i % stridei) != 0 ||			(sin_j % stridej) != 0)		ylerror("inconsistent input size and subsampleing ratio");	sub->resize(sin_t, si, sj);	sum->resize(sin_t, si, sj);	out->resize(sin_t, si, sj);	// 1. subsampling ( coeff * average )	idx_clear(sub->x);	{ idx_bloop4(lix, in->x, double, lsx, sub->x, double,			lcx, coeff->x, double, ltx, sum->x, double) {		Idx<double> uuin(lix->unfold(1, stridej, stridej));		uuin = uuin.unfold(0, stridei, stridei);		{ idx_eloop1(z1, uuin, double) {			{ idx_eloop1(z2, z1, double) {				idx_add(z2, lsx, lsx);			 }			}		 }		}		idx_dotc(lsx, lcx.get(), ltx);	 }	}	// 2. add bias	{ idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double) {		idx_addc(sumx, biasx.get(), sumx);	 }	}	// 3. call squashing function	squash->fprop(sum, out);}void s_layer::bprop(state_idx *in, state_idx *out){	// 1.	squash->bprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->dx, double, ltdx, sum->dx, double,			lsx, sub->x, double) {		idx_dotacc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->dx, double, lsdx, sub->dx, double,			lcx, coeff->x, double, ltdx2, sum->dx, double) {		idx_dotc(ltdx2, lcx.get(), lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::bbprop(state_idx *in, state_idx *out){	// 1.	squash->bbprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->ddx, double, ltdx, sum->ddx, double,			lsx, sub->x, double) {		idx_m2squdotm2acc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->ddx, double, lsdx, sub->ddx, double,			lcx, coeff->x, double, ltdx2, sum->ddx, double) {		double cf = lcx.get();		idx_dotc(ltdx2, cf * cf, lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::forget(forget_param_linear &fp) {	double c = fp.value / pow(stridei * stridej, fp.exponent);	idx_clear(bias->x);	idx_fill(coeff->x, c);}////////////////////////////////////////////////////////////////////////logadd_layer::logadd_layer(intg thick, intg si, intg sj) {  expdist = Idx<double>(thick, si, sj);  sumexp = Idx<double>(thick);		// scaled partition function}void logadd_layer::fprop(state_idx *in, state_idx *out) {  intg thick = in->x.dim(0);	intg si = in->x.dim(1);	intg sj = in->x.dim(2);	expdist.resize(thick, si, sj);  out->x.resize(thick);	if (1 == (si * sj)) {		// save time and precision if no replication		Idx<double> inx(in->x.select(2, 0));		Idx<double> m(inx.select(1, 0));		Idx<double> ed(expdist.select(2, 0));		Idx<double> ed1(ed.select(1, 0));		idx_fill(ed1, 1.0);		idx_fill(sumexp, 1.0);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
欧美国产禁国产网站cc| 99久久99久久精品免费观看 | 亚洲人成精品久久久久| 久久久影视传媒| 欧美精品一区二区久久久| 日韩一级片在线观看| 精品国精品国产| 久久人人爽爽爽人久久久| 久久久久国产免费免费| 欧美激情综合五月色丁香小说| 国产目拍亚洲精品99久久精品| 国产精品无遮挡| 亚洲人成网站精品片在线观看| 有坂深雪av一区二区精品| 无码av免费一区二区三区试看 | 久久婷婷成人综合色| 欧美精彩视频一区二区三区| 国产女主播在线一区二区| 综合久久国产九一剧情麻豆| 亚洲一区二区三区中文字幕| 日韩成人精品在线观看| 国产一区二区在线观看免费| 99视频一区二区| 欧美日韩精品高清| 精品盗摄一区二区三区| **欧美大码日韩| 亚洲成人动漫在线观看| 国产一区二区在线观看免费| 99re视频这里只有精品| 91麻豆精品国产91久久久使用方法| 精品久久五月天| 国产三级精品三级在线专区| 国产精品不卡一区二区三区| 日韩视频免费观看高清完整版在线观看| 精品理论电影在线| 综合激情网...| 玖玖九九国产精品| 色婷婷精品久久二区二区蜜臂av| 欧美一级精品在线| 亚洲精品中文在线影院| 国内精品伊人久久久久影院对白| 色综合av在线| 久久久久久一二三区| 亚洲国产va精品久久久不卡综合| 国产成人欧美日韩在线电影| 91麻豆精品国产91久久久久久久久 | 欧美精品一区二区三区高清aⅴ| 亚洲视频香蕉人妖| 国产精品资源站在线| 欧美麻豆精品久久久久久| 中文字幕一区二区三区精华液| 激情综合五月婷婷| 欧美日韩不卡一区| 亚洲影院免费观看| www.一区二区| 中文一区一区三区高中清不卡| 精品一区精品二区高清| 欧美日韩国产小视频在线观看| 亚洲视频一区二区免费在线观看| 高清在线不卡av| 久久久不卡网国产精品二区| 免费成人深夜小野草| 欧美精品三级日韩久久| 午夜视频在线观看一区| 欧美性大战xxxxx久久久| 中文字幕一区二区不卡| 成人综合婷婷国产精品久久免费| 久久精子c满五个校花| 精油按摩中文字幕久久| 精品少妇一区二区三区免费观看| 日本视频一区二区| 欧美电影一区二区| 日本 国产 欧美色综合| 337p亚洲精品色噜噜| 免费观看久久久4p| 欧美白人最猛性xxxxx69交| 久久99精品久久久| 2024国产精品| 成人综合在线视频| 亚洲美女免费视频| 91久久久免费一区二区| 亚洲国产婷婷综合在线精品| 欧美日韩在线不卡| 日本视频在线一区| 久久嫩草精品久久久精品| 国产一区二区不卡| 18成人在线视频| 欧美性大战xxxxx久久久| 日韩精品成人一区二区在线| 日韩精品中文字幕一区| 国产激情一区二区三区桃花岛亚洲| 亚洲国产欧美日韩另类综合| 欧美日韩一级大片网址| 日韩精品电影一区亚洲| 久久久一区二区三区捆绑**| 波波电影院一区二区三区| 一级日本不卡的影视| 9191久久久久久久久久久| 国产乱码精品一区二区三| 亚洲视频在线一区二区| 日韩视频免费观看高清在线视频| 久久精品72免费观看| 国产女人18毛片水真多成人如厕| 91免费国产在线| 日本欧美在线观看| 国产精品色噜噜| 欧美日韩另类国产亚洲欧美一级| 久久精品国产精品青草| 亚洲人成影院在线观看| 日韩欧美国产小视频| 91性感美女视频| 久草在线在线精品观看| 亚洲精品五月天| 久久久久一区二区三区四区| 欧美日韩三级一区二区| 国产成人免费xxxxxxxx| 秋霞电影一区二区| 自拍偷拍亚洲欧美日韩| 精品av久久707| 欧美日韩国产系列| 99免费精品在线观看| 国内精品嫩模私拍在线| 午夜影院在线观看欧美| 亚洲视频一区二区在线观看| 久久久久久久久久久久久夜| 欧美日韩午夜精品| av亚洲产国偷v产偷v自拍| 国产呦精品一区二区三区网站| 婷婷丁香激情综合| 亚洲三级在线免费| 中文字幕在线一区| 久久免费美女视频| 日韩亚洲欧美综合| 欧美肥胖老妇做爰| 欧美午夜电影在线播放| 色综合天天综合在线视频| 成人小视频免费观看| 国产福利一区二区| 国模套图日韩精品一区二区| 久久国产剧场电影| 久久精品国产秦先生| 蜜桃一区二区三区在线观看| 日本成人中文字幕在线视频| 亚洲成a人片在线不卡一二三区| 一区二区三区在线视频观看| 亚洲黄色免费电影| 最近日韩中文字幕| 国产片一区二区| 中文字幕欧美三区| 国产精品久久久久毛片软件| 中文欧美字幕免费| 中文字幕av一区二区三区高| 国产欧美va欧美不卡在线| 中文在线一区二区| 1区2区3区精品视频| 一区二区三区日韩欧美精品| 亚洲精品视频在线观看网站| 亚洲国产视频一区| 日韩精品亚洲一区| 捆绑紧缚一区二区三区视频| 国产一区二区三区最好精华液| 国产成人免费av在线| voyeur盗摄精品| 色妹子一区二区| 欧美日韩不卡在线| 精品国产一区二区三区不卡 | 日韩毛片一二三区| 一区二区三区欧美亚洲| 亚洲国产精品久久艾草纯爱| 奇米色777欧美一区二区| 久久国产精品72免费观看| 国产精品一区二区在线看| 97久久超碰国产精品电影| 欧美夫妻性生活| 国产日韩精品一区二区三区| 亚洲美女精品一区| 麻豆一区二区三区| 成人免费视频一区| 欧美男男青年gay1069videost| 日韩欧美亚洲另类制服综合在线| 中文成人av在线| 日韩福利电影在线观看| 国产成人8x视频一区二区| 欧美系列在线观看| 亚洲精品一区二区三区99| 亚洲图片另类小说| 麻豆精品视频在线观看| 成人av在线资源网站| 欧美日韩国产成人在线91| 久久综合九色综合欧美就去吻| 亚洲乱码国产乱码精品精可以看 | 国产免费成人在线视频| 亚洲精品日韩综合观看成人91| 人禽交欧美网站| 91在线看国产| 久久众筹精品私拍模特| 亚洲国产综合色| thepron国产精品| 精品久久免费看| 亚洲国产精品精华液网站|