亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? ebm.cpp

?? Gaussian Mixture Algorithm
?? CPP
?? 第 1 頁 / 共 3 頁
字號:
{	idx_dtanh(in->x, in->ddx);	idx_mul(in->ddx, in->ddx, in->ddx);	idx_mul(in->ddx, out->ddx, in->ddx);}void tanh_module::forget(forget_param_linear& fp){}void tanh_module::normalize(){}////////////////////////////////////////////////////////////////addc_module::addc_module(parameter *p, intg size){	bias = new state_idx(p,size);}addc_module::~addc_module(){	delete bias;}void addc_module::fprop(state_idx* in, state_idx* out){	out->resize(bias->x.dim(0));	idx_add(in->x,bias->x,out->x);}void addc_module::bprop(state_idx* in, state_idx* out){	idx_copy(out->dx,in->dx);	idx_copy(out->dx,bias->dx);}void addc_module::bbprop(state_idx* in, state_idx* out){	idx_copy(out->ddx,in->ddx);	idx_copy(out->ddx,bias->ddx);}void addc_module::forget(forget_param_linear& fp){	idx_clear(bias->x);}void addc_module::normalize(){}////////////////////////////////////////////////////////////////nn_layer_full::nn_layer_full(parameter *p, intg ninputs, intg noutputs){	linear = new linear_module(p,ninputs,noutputs);	bias = new state_idx(p,noutputs);	sum = new state_idx(noutputs);	sigmoid = new tanh_module();}nn_layer_full::~nn_layer_full(){	delete sigmoid;	delete sum;	delete bias;	delete linear;}void nn_layer_full::fprop(state_idx *in, state_idx *out){	out->resize(bias->x.dim(0));	linear->fprop(in, sum);	idx_add(sum->x, bias->x, sum->x);	sigmoid->fprop(sum, out);}void nn_layer_full::bprop(state_idx *in, state_idx *out){	sigmoid->bprop(sum, out);	idx_copy(sum->dx, bias->dx);	linear->bprop(in, sum);}void nn_layer_full::bbprop(state_idx *in, state_idx *out){	sigmoid->bbprop(sum, out);	idx_copy(sum->ddx, bias->ddx);	linear->bbprop(in, sum);}void nn_layer_full::forget(forget_param_linear &fp){	linear->forget(fp);	idx_clear(bias->x);}////////////////////////////////////////////////////////////////f_layer::f_layer(parameter *p, intg tin, intg tout, intg si, intg sj,		module_1_1<state_idx,state_idx> *sq){	weight = new state_idx(p, tout, tin);	bias = new state_idx(p, tout);	sum = new state_idx(tout, si, sj);	squash = sq;}f_layer::~f_layer(){	delete weight;	delete bias;	delete sum;}void f_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	double z = fp.value / pow(weight->x.dim(1), fp.exponent);	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");	idx_aloop1(w,weight->x,double)	{	*w = drand(z);}}void f_layer::fprop(state_idx *in, state_idx *out){	intg inx_d1 = in->x.dim(1);	intg inx_d2 = in->x.dim(2);	intg ws = weight->x.dim(0);	// resize sum and output	sum->resize(ws, inx_d1, inx_d2);	out->resize(ws, inx_d1, inx_d2);	// main matrix multiplication	{		int tr[] = { 2, 1, 0 };		Idx<double> inx(in->x.transpose(tr));		Idx<double> outx(sum->x.transpose(tr));		// loop over spatial dimensions		idx_bloop2(linx,inx,double, loutx,outx,double)		{			idx_bloop2(llinx,linx,double, lloutx,loutx,double)			{				// fprintf(stdout,"f_layer::fprop\n");				// weight->x.pretty(stdout);				// weight->x.fdump(stdout);				// llinx.pretty(stdout);				// llinx.fdump(stdout);				// lloutx.pretty(stdout);				// multiply weight matrix by input				idx_m2dotm1(weight->x, llinx, lloutx);				// lloutx.pretty(stdout);				// lloutx.fdump(stdout);			}		}	}	{ // add bias		// fprintf(stdout,"f_layer::fprop adding bias\n");		// bias->x.pretty(stdout);		// bias->x.fdump(stdout);		idx_bloop2(sumx,sum->x,double, biasx,bias->x,double)		{			idx_addc(sumx, biasx.get(), sumx);		}		// sum->x.pretty(stdout);		// sum->x.fdump(stdout);	}	// call squashing function	squash->fprop(sum, out);}void f_layer::bprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bprop(sum, out);	// compute gradient of bias	{		idx_bloop2(lha,sum->dx,double, lb,bias->dx,double)		{	*(lb.ptr()) += idx_sum(lha);}	}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->dx.transpose(tr));	Idx<double> outdx(sum->dx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(0, 1));	{ idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double) {		{ idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double) {			idx_m1extm1acc(lloutdx,llinx,weight->dx);			idx_m2dotm1(tkerx,lloutdx,llindx);			// direct call because idxm2dotm1 didn't use to work on tranposed matrices			// cblas_dgemv(CblasRowMajor, CblasTrans, weight->x.dim(0), weight->x.dim(1),			//             1.0, weight->x.idx_ptr(), weight->x.mod(0),			//             lloutdx.idx_ptr(), lloutdx.mod(0),			//             0.0, llindx.idx_ptr(), llindx.mod(0));		}}	}}}void f_layer::bbprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb); }}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->ddx.transpose(tr));	Idx<double> outdx(sum->ddx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(1, 0));	idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double)	{		idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double)		{			idx_m1squextm1acc(lloutdx,llinx,weight->ddx);			idx_m2squdotm1(tkerx,lloutdx,llindx);		}	}}////////////////////////////////////////////////////////////////c_layer::c_layer(parameter *p, intg ki, intg kj, intg ri, intg rj, Idx<intg> *tbl,		intg thick, intg si, intg sj, module_1_1<state_idx,state_idx> *sqsh){	thickness = thick;	stridei = ri;	stridej = rj;	kernel = new state_idx(p, tbl->dim(0), ki, kj);	table = tbl;	bias = new state_idx(p, thick);	sum = new state_idx(thick, si, sj);	squash = sqsh;}c_layer::~c_layer(){	delete kernel;	delete bias;	delete sum;}void c_layer::set_stride(intg ri, intg rj){	stridei = ri;	stridej = rj;}void c_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	Idx<double> kx(kernel->x);	intg vsize = kx.dim(1);	intg hsize = kx.dim(2);	Idx<intg> ts(table->select(1, 1));	Idx<int> fanin(1 + idx_max(ts));	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");  idx_clear(fanin);	{ idx_bloop1(tab, *table, intg)	{			fanin.set(1 + fanin.get(tab.get(1)), tab.get(1)); }}	{ idx_bloop2(tab, *table, intg, x, kx, double) {		double s = fp.value / pow((vsize * hsize * fanin.get(tab.get(1))), fp.exponent);		{	idx_bloop1(lx, x, double)	{			{	idx_bloop1(llx, lx, double) {				double n = drand(-s, s);				llx.set(n);			}}		}}	}}}void c_layer::fprop(state_idx *in, state_idx *out){	intg ki = kernel->x.dim(1);	intg kj = kernel->x.dim(2);	intg sini = in->x.dim(1);	intg sinj = in->x.dim(2);	if (((sini - (ki - stridei)) % stridei != 0) || ((sinj - (kj - stridej))			% stridej != 0))ylerror("inconsistent input size, kernel size, and subsampling ratio.");	if ((stridei != 1) || (stridej != 1))ylerror("stride > 1 not implemented yet.");	Idx<double> uuin(in->x.unfold(1, ki, stridei));	uuin = uuin.unfold(2, kj, stridej);	Idx<double> lki(kernel->x.dim(1), kernel->x.dim(2));	// resize output if necessary	sum->resize(thickness, uuin.dim(1), uuin.dim(2));	out->resize(thickness, uuin.dim(1), uuin.dim(2));	idx_clear(sum->x);	// generic convolution	{	idx_bloop2(lk, kernel->x, double, lt, *table, intg)	{		Idx<double> suin(uuin.select(0, lt->get(0)));		Idx<double> sout((sum->x).select(0, lt->get(1)));		idx_m4dotm2acc(suin, lk, sout);	}}	// add bias	{	idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double)	{		idx_addc(sumx, biasx.get(), sumx);	}}	// call squashing function	squash->fprop(sum, out);}void c_layer::bprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->dx);	Idx<double> uuin(in->dx.unfold(1, (kernel->dx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->dx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->dx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->dx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{ idx_bloop3 (lk, kernel->dx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->dx).select(0, lt.get(1)));			// backward convolution			idx_m2extm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4dotm2acc(sborp, sout, lk);	}}}void c_layer::bbprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->ddx);	Idx<double> uuin(in->ddx.unfold(1, (kernel->ddx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->ddx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->ddx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->ddx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{	idx_bloop3 (lk, kernel->ddx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->ddx).select(0, lt.get(1)));			// backward convolution			idx_m2squextm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4squdotm2acc(sborp, sout, lk);	}}}////////////////////////////////////////////////////////////////#ifdef USE_IPP// TODO: Copy IPP in project// TODO: ipp 64 for doubles?void c_layer_ipp::fprop (state_idx *in, state_idx *out) {  intg ki = kernel->x.dim(1);  intg kj = kernel->x.dim(2);  intg sini = in->x.dim(1);  intg sinj = in->x.dim(2);  if (((sini - (ki - stridei) % stridei) != 0) ||      ((sinj - (kj - stridej) % stridej) != 0))    ylerror("inconsistent input size, kernel size, and subsampling ratio.");  if ((stridei != 1) || (stridej != 1))    ylerror("stride > 1 not implemented yet.");  Idx<double> uuin = in->x.unfold(1, ki, stridei);  uuin = uuin.spec.unfold_inplace(2, kj, stridej);  Idx<double> lki = Idx<double>(kernel->x.dim(1), kernel->x.dim(2));  // resize output if necessary  sum->resize(thickness, uuin.dim(1), uuin.dim(2));  out->resize(thickness, uuin.dim(1), uuin.dim(2));  idx_clear(sum->x);  // generic convolution  Idx<double> tout = Idx<double>(sum->x.dim(1), sum->x.dim(2));  { idx_bloop2(lk, kernel->x, double, lt, *table, intg) {      rev_idx2_tr(*lk, lki);//      ipp_convolution_float(in->x.select(0, lt.get(0)), lki, tout);//      ipp_add_float(tout, sum->x.select(0, lt.get(1)));    }  }  // add bias  { idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,	       outx, out->x, double) {//      ipp_addc_nip_float(sumx, biasx.get(), outx);    }  }  // call squashing function  squash->fprop(sum, out);}void c_layer_ipp::bprop (state_idx *in, state_idx *out) {  // backprop gradient through squasher  squash->bprop(sum, out);  // compute gradient of bias  { idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {      idx_sumacc(lha, lb);    }  }  // backprop through convolution  idx_clear(in->dx);  /*  (let* ((ki (idx-dim :kernel:dx 1))	 (kj (idx-dim :kernel:dx 2))	 (ini (idx-dim :in:dx 1))	 (inj (idx-dim :in:dx 2))	 (outi (idx-dim :out:dx 1))	 (outj (idx-dim :out:dx 2))	 (sumi (idx-dim :sum:dx 1))	 (sumj (idx-dim :sum:dx 2))	 (souti (gbtype-matrix sumi sumj))	 (tout (gbtype-matrix ki kj)))	 (idx-bloop ((lk :kernel:dx) (lkf :kernel:x) (lt table))	 (let* ((islice (lt 0))	 (sout  (select :sum:dx 0 (lt 1)))	 )	 ;; backward convolution	 (ipp-convolution-full-float sout lkf (select :in:dx 0 islice))	 ;; compute gradient for kernel	 (rev-idx2-tr-float sout souti)	 (ipp-convolution-float (select :in:x 0 islice) souti tout)	 (ipp-add-float tout lk)	 )))	 */}#endif////////////////////////////////////////////////////////////////Idx<intg> full_table(intg a, intg b) {  Idx<intg> m(a * b, 2);  intg p = 0;  for (intg j = 0; j < b; ++j) {  	for (intg i = 0; i < a; ++i) {  		m.set(i, p, 0);  		m.set(j, p, 1);  		p++;  	}  }  return m;}////////////////////////////////////////////////////////////////////////s_layer::s_layer(parameter *p, intg ki, intg kj, intg thick, intg si, intg sj,		module_1_1<state_idx, state_idx> *sqsh) :	stridei(ki), stridej(kj), squash(sqsh){	coeff = new state_idx(p, thick);	bias = new state_idx(p, thick);	sub = new state_idx(thick, si, sj);	sum = new state_idx(thick, si, sj);}s_layer::~s_layer(){	delete coeff;	delete bias;	delete sub;	delete sum;}void s_layer::fprop(state_idx *in, state_idx *out){	intg sin_t = in->x.dim(0);	intg sin_i = in->x.dim(1);	intg sin_j = in->x.dim(2);	intg si = sin_i / stridei;	intg sj = sin_j / stridej;	if( (sin_i % stridei) != 0 ||			(sin_j % stridej) != 0)		ylerror("inconsistent input size and subsampleing ratio");	sub->resize(sin_t, si, sj);	sum->resize(sin_t, si, sj);	out->resize(sin_t, si, sj);	// 1. subsampling ( coeff * average )	idx_clear(sub->x);	{ idx_bloop4(lix, in->x, double, lsx, sub->x, double,			lcx, coeff->x, double, ltx, sum->x, double) {		Idx<double> uuin(lix->unfold(1, stridej, stridej));		uuin = uuin.unfold(0, stridei, stridei);		{ idx_eloop1(z1, uuin, double) {			{ idx_eloop1(z2, z1, double) {				idx_add(z2, lsx, lsx);			 }			}		 }		}		idx_dotc(lsx, lcx.get(), ltx);	 }	}	// 2. add bias	{ idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double) {		idx_addc(sumx, biasx.get(), sumx);	 }	}	// 3. call squashing function	squash->fprop(sum, out);}void s_layer::bprop(state_idx *in, state_idx *out){	// 1.	squash->bprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->dx, double, ltdx, sum->dx, double,			lsx, sub->x, double) {		idx_dotacc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->dx, double, lsdx, sub->dx, double,			lcx, coeff->x, double, ltdx2, sum->dx, double) {		idx_dotc(ltdx2, lcx.get(), lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::bbprop(state_idx *in, state_idx *out){	// 1.	squash->bbprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->ddx, double, ltdx, sum->ddx, double,			lsx, sub->x, double) {		idx_m2squdotm2acc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->ddx, double, lsdx, sub->ddx, double,			lcx, coeff->x, double, ltdx2, sum->ddx, double) {		double cf = lcx.get();		idx_dotc(ltdx2, cf * cf, lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::forget(forget_param_linear &fp) {	double c = fp.value / pow(stridei * stridej, fp.exponent);	idx_clear(bias->x);	idx_fill(coeff->x, c);}////////////////////////////////////////////////////////////////////////logadd_layer::logadd_layer(intg thick, intg si, intg sj) {  expdist = Idx<double>(thick, si, sj);  sumexp = Idx<double>(thick);		// scaled partition function}void logadd_layer::fprop(state_idx *in, state_idx *out) {  intg thick = in->x.dim(0);	intg si = in->x.dim(1);	intg sj = in->x.dim(2);	expdist.resize(thick, si, sj);  out->x.resize(thick);	if (1 == (si * sj)) {		// save time and precision if no replication		Idx<double> inx(in->x.select(2, 0));		Idx<double> m(inx.select(1, 0));		Idx<double> ed(expdist.select(2, 0));		Idx<double> ed1(ed.select(1, 0));		idx_fill(ed1, 1.0);		idx_fill(sumexp, 1.0);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
欧美一区二区国产| 国产精品一区二区视频| 亚洲国产成人自拍| 欧美久久久一区| gogogo免费视频观看亚洲一| 夜夜揉揉日日人人青青一国产精品| 欧美日韩国产小视频在线观看| 国产一区视频在线看| 亚洲精品第1页| 久久久精品国产免费观看同学| 成人福利在线看| 奇米精品一区二区三区在线观看| 性感美女极品91精品| 欧美在线看片a免费观看| 亚洲国产wwwccc36天堂| 天堂蜜桃91精品| 久久99热狠狠色一区二区| 国产精品自拍在线| 99精品视频一区| 91精品免费在线观看| 久久婷婷成人综合色| 亚洲精品久久嫩草网站秘色| 国产精品传媒入口麻豆| 国产欧美一区二区三区网站| 欧美成人一区二区三区在线观看 | 亚洲美女免费视频| 一色屋精品亚洲香蕉网站| 国产欧美1区2区3区| 欧美性淫爽ww久久久久无| 欧美在线观看视频在线| 欧美在线观看一区| 欧美成人综合网站| 欧美成人福利视频| 欧美精品一区二区久久久| 久久亚洲综合色| 中文字幕在线不卡一区二区三区| 国产精品久久99| 综合色天天鬼久久鬼色| 国产精品久久久久影院| 亚洲三级电影网站| 日韩av电影免费观看高清完整版在线观看 | 日本高清免费不卡视频| 欧美日韩精品电影| 2020国产精品自拍| 亚洲图片激情小说| 一区二区高清在线| 久久99精品久久久久| 99国产欧美久久久精品| 欧美一区二区三区四区高清| 久久久激情视频| 五月激情综合婷婷| 国产成人夜色高潮福利影视| 99久久精品国产精品久久| 欧美久久一二区| 亚洲国产成人午夜在线一区 | 日韩美女视频在线| 中文字幕欧美区| 天天综合色天天| 99国产精品久| 日韩免费成人网| 一区二区三区中文字幕| 国产激情精品久久久第一区二区| 欧美日韩在线亚洲一区蜜芽| 国产精品情趣视频| 九九**精品视频免费播放| 7777精品伊人久久久大香线蕉 | 97精品超碰一区二区三区| 欧美日韩免费一区二区三区视频| 欧美激情一区二区| 日韩av中文字幕一区二区| 日本高清成人免费播放| 欧美国产97人人爽人人喊| 日本不卡123| 欧美三级在线看| 一区二区三区在线高清| 99久久99精品久久久久久| 欧美韩国日本一区| 国产精品99久久久久| 久久综合九色综合97婷婷女人| 美女高潮久久久| 日韩欧美高清一区| 国产伦精一区二区三区| 中文字幕欧美三区| 欧美私模裸体表演在线观看| 青草国产精品久久久久久| 久久中文字幕电影| aaa国产一区| 亚洲免费观看在线视频| 欧美成人福利视频| 91在线小视频| 精品一区免费av| 亚洲在线视频免费观看| 日韩欧美国产三级| 色美美综合视频| 韩国精品主播一区二区在线观看 | 3d动漫精品啪啪1区2区免费 | 精品国产91乱码一区二区三区 | 亚洲电影中文字幕在线观看| 欧美体内she精视频| 日韩国产欧美在线观看| 国产视频一区二区在线观看| 色老头久久综合| 国产在线看一区| 亚洲午夜电影在线观看| 久久精品人人爽人人爽| 欧美一区二区免费| 91网站最新网址| 免费看日韩精品| 亚洲一区二区三区四区的 | 7777精品久久久大香线蕉| 成人白浆超碰人人人人| 丝瓜av网站精品一区二区| 中文字幕一区二区三区在线不卡 | 亚洲综合清纯丝袜自拍| 2024国产精品| 日韩一区二区三区观看| 色天使色偷偷av一区二区| 国产传媒日韩欧美成人| 丝袜美腿亚洲一区二区图片| 亚洲视频在线观看一区| 久久久久久影视| 日韩网站在线看片你懂的| 欧美色图天堂网| 色综合久久综合| 成人h动漫精品一区二区| 国产成人免费xxxxxxxx| 久久精品999| 日本在线观看不卡视频| 亚洲综合网站在线观看| 国产精品私人影院| 亚洲欧美日韩在线播放| 日韩美女视频一区| 国产视频在线观看一区二区三区| 欧美在线视频日韩| 91女神在线视频| 成人一区二区视频| 国v精品久久久网| 欧美日韩一区二区不卡| 色偷偷一区二区三区| 在线精品视频免费播放| 欧美日韩高清一区| 日韩一级黄色大片| 中文一区在线播放| 亚洲三级电影网站| 男人操女人的视频在线观看欧美| 日本美女一区二区三区视频| 国产乱色国产精品免费视频| 成人av电影在线播放| 成人av网址在线| 欧美一区日本一区韩国一区| 中文子幕无线码一区tr| 亚洲一区二区欧美日韩| 久久se这里有精品| 91免费国产在线| 精品国产伦一区二区三区免费| 最新国产精品久久精品| 偷窥少妇高潮呻吟av久久免费| 国产在线乱码一区二区三区| 欧美日韩一卡二卡三卡| 国产日本一区二区| 日韩电影在线免费观看| 91一区二区在线观看| 日韩一级完整毛片| 亚洲国产精品成人综合 | 久久先锋资源网| 亚洲国产中文字幕在线视频综合 | 欧美一区二区三区在线观看 | 国产一区二区精品久久91| 欧美亚洲丝袜传媒另类| 国产精品蜜臀av| 国产在线精品一区在线观看麻豆| 欧美日韩免费一区二区三区视频| 亚洲欧洲日韩在线| k8久久久一区二区三区 | 中文字幕在线不卡视频| 国产丶欧美丶日本不卡视频| 日韩一区二区三区电影在线观看| 亚洲一区在线视频| 一本大道综合伊人精品热热| 欧美国产一区视频在线观看| 久草中文综合在线| 国产亚洲精品7777| 国产精品一区三区| 国产精品美女www爽爽爽| 国产成人亚洲综合a∨猫咪| 国产精品久久综合| 成人av免费在线观看| 中文字幕在线一区免费| 91免费国产在线观看| 亚洲精品视频自拍| 欧美日韩国产综合一区二区三区 | 欧美福利电影网| 蜜桃精品视频在线观看| 2024国产精品| 色综合久久综合网| 精品在线观看免费| 亚洲少妇最新在线视频| 91原创在线视频| 亚洲电影中文字幕在线观看| 欧美在线观看18|