亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? ebm.cpp

?? Gaussian Mixture Algorithm
?? CPP
?? 第 1 頁 / 共 3 頁
字號:
{	idx_dtanh(in->x, in->dx);	idx_mul(in->dx, out->dx, in->dx);}void tanh_module::bbprop(state_idx *in, state_idx *out){	idx_dtanh(in->x, in->ddx);	idx_mul(in->ddx, in->ddx, in->ddx);	idx_mul(in->ddx, out->ddx, in->ddx);}void tanh_module::forget(forget_param_linear& fp){}void tanh_module::normalize(){}////////////////////////////////////////////////////////////////addc_module::addc_module(parameter *p, intg size){	bias = new state_idx(p,size);}addc_module::~addc_module(){	delete bias;}void addc_module::fprop(state_idx* in, state_idx* out){	out->resize(bias->x.dim(0));	idx_add(in->x,bias->x,out->x);}void addc_module::bprop(state_idx* in, state_idx* out){	idx_copy(out->dx,in->dx);	idx_copy(out->dx,bias->dx);}void addc_module::bbprop(state_idx* in, state_idx* out){	idx_copy(out->ddx,in->ddx);	idx_copy(out->ddx,bias->ddx);}void addc_module::forget(forget_param_linear& fp){	idx_clear(bias->x);}void addc_module::normalize(){}////////////////////////////////////////////////////////////////nn_layer_full::nn_layer_full(parameter *p, intg ninputs, intg noutputs){	linear = new linear_module(p,ninputs,noutputs);	bias = new state_idx(p,noutputs);	sum = new state_idx(noutputs);	sigmoid = new tanh_module();}nn_layer_full::~nn_layer_full(){	delete sigmoid;	delete sum;	delete bias;	delete linear;}void nn_layer_full::fprop(state_idx *in, state_idx *out){	out->resize(bias->x.dim(0));	linear->fprop(in, sum);	idx_add(sum->x, bias->x, sum->x);	sigmoid->fprop(sum, out);}void nn_layer_full::bprop(state_idx *in, state_idx *out){	sigmoid->bprop(sum, out);	idx_copy(sum->dx, bias->dx);	linear->bprop(in, sum);}void nn_layer_full::bbprop(state_idx *in, state_idx *out){	sigmoid->bbprop(sum, out);	idx_copy(sum->ddx, bias->ddx);	linear->bbprop(in, sum);}void nn_layer_full::forget(forget_param_linear &fp){	linear->forget(fp);	idx_clear(bias->x);}////////////////////////////////////////////////////////////////f_layer::f_layer(parameter *p, intg tin, intg tout, intg si, intg sj,		module_1_1<state_idx,state_idx> *sq){	weight = new state_idx(p, tout, tin);	bias = new state_idx(p, tout);	sum = new state_idx(tout, si, sj);	squash = sq;}f_layer::~f_layer(){	delete weight;	delete bias;	delete sum;}void f_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	double z = fp.value / pow(weight->x.dim(1), fp.exponent);	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");	idx_aloop1(w,weight->x,double)	{	*w = drand(z);}}void f_layer::fprop(state_idx *in, state_idx *out){	intg inx_d1 = in->x.dim(1);	intg inx_d2 = in->x.dim(2);	intg ws = weight->x.dim(0);	// resize sum and output	sum->resize(ws, inx_d1, inx_d2);	out->resize(ws, inx_d1, inx_d2);	// main matrix multiplication	{		int tr[] = { 2, 1, 0 };		Idx<double> inx(in->x.transpose(tr));		Idx<double> outx(sum->x.transpose(tr));		// loop over spatial dimensions		idx_bloop2(linx,inx,double, loutx,outx,double)		{			idx_bloop2(llinx,linx,double, lloutx,loutx,double)			{				// fprintf(stdout,"f_layer::fprop\n");				// weight->x.pretty(stdout);				// weight->x.fdump(stdout);				// llinx.pretty(stdout);				// llinx.fdump(stdout);				// lloutx.pretty(stdout);				// multiply weight matrix by input				idx_m2dotm1(weight->x, llinx, lloutx);				// lloutx.pretty(stdout);				// lloutx.fdump(stdout);			}		}	}	{ // add bias		// fprintf(stdout,"f_layer::fprop adding bias\n");		// bias->x.pretty(stdout);		// bias->x.fdump(stdout);		idx_bloop2(sumx,sum->x,double, biasx,bias->x,double)		{			idx_addc(sumx, biasx.get(), sumx);		}		// sum->x.pretty(stdout);		// sum->x.fdump(stdout);	}	// call squashing function	squash->fprop(sum, out);}void f_layer::bprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bprop(sum, out);	// compute gradient of bias	{		idx_bloop2(lha,sum->dx,double, lb,bias->dx,double)		{	*(lb.ptr()) += idx_sum(lha);}	}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->dx.transpose(tr));	Idx<double> outdx(sum->dx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(0, 1));	{ idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double) {		{ idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double) {			idx_m1extm1acc(lloutdx,llinx,weight->dx);			idx_m2dotm1(tkerx,lloutdx,llindx);			// direct call because idxm2dotm1 didn't use to work on tranposed matrices			// cblas_dgemv(CblasRowMajor, CblasTrans, weight->x.dim(0), weight->x.dim(1),			//             1.0, weight->x.idx_ptr(), weight->x.mod(0),			//             lloutdx.idx_ptr(), lloutdx.mod(0),			//             0.0, llindx.idx_ptr(), llindx.mod(0));		}}	}}}void f_layer::bbprop(state_idx *in, state_idx *out){	// backprop through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb); }}	// backprop through weight matrix	int tr[] = { 2, 1, 0 };	Idx<double> inx(in->x.transpose(tr));	Idx<double> indx(in->ddx.transpose(tr));	Idx<double> outdx(sum->ddx.transpose(tr));	Idx<double> tkerx(weight->x.transpose(1, 0));	idx_bloop3(linx,inx,double, lindx,indx,double, loutdx,outdx,double)	{		idx_bloop3(llinx,linx,double, llindx,lindx,double, lloutdx,loutdx,double)		{			idx_m1squextm1acc(lloutdx,llinx,weight->ddx);			idx_m2squdotm1(tkerx,lloutdx,llindx);		}	}}////////////////////////////////////////////////////////////////c_layer::c_layer(parameter *p, intg ki, intg kj, intg ri, intg rj, Idx<intg> *tbl,		intg thick, intg si, intg sj, module_1_1<state_idx,state_idx> *sqsh){	thickness = thick;	stridei = ri;	stridej = rj;	kernel = new state_idx(p, tbl->dim(0), ki, kj);	table = tbl;	bias = new state_idx(p, thick);	sum = new state_idx(thick, si, sj);	squash = sqsh;}c_layer::~c_layer(){	delete kernel;	delete bias;	delete sum;}void c_layer::set_stride(intg ri, intg rj){	stridei = ri;	stridej = rj;}void c_layer::forget(forget_param_linear &fp){	idx_clear(bias->x);	Idx<double> kx(kernel->x);	intg vsize = kx.dim(1);	intg hsize = kx.dim(2);	Idx<intg> ts(table->select(1, 1));	Idx<int> fanin(1 + idx_max(ts));	if(!drand_ini) printf("You have not initialized random sequence. Please call init_drand() before using this function !\n");  idx_clear(fanin);	{ idx_bloop1(tab, *table, intg)	{			fanin.set(1 + fanin.get(tab.get(1)), tab.get(1)); }}	{ idx_bloop2(tab, *table, intg, x, kx, double) {		double s = fp.value / pow((vsize * hsize * fanin.get(tab.get(1))), fp.exponent);		{	idx_bloop1(lx, x, double)	{			{	idx_bloop1(llx, lx, double) {				double n = drand(-s, s);				llx.set(n);			}}		}}	}}}void c_layer::fprop(state_idx *in, state_idx *out){	intg ki = kernel->x.dim(1);	intg kj = kernel->x.dim(2);	intg sini = in->x.dim(1);	intg sinj = in->x.dim(2);	if (((sini - (ki - stridei)) % stridei != 0) || ((sinj - (kj - stridej))			% stridej != 0))ylerror("inconsistent input size, kernel size, and subsampling ratio.");	if ((stridei != 1) || (stridej != 1))ylerror("stride > 1 not implemented yet.");	Idx<double> uuin(in->x.unfold(1, ki, stridei));	uuin = uuin.unfold(2, kj, stridej);	Idx<double> lki(kernel->x.dim(1), kernel->x.dim(2));	// resize output if necessary	sum->resize(thickness, uuin.dim(1), uuin.dim(2));	out->resize(thickness, uuin.dim(1), uuin.dim(2));	idx_clear(sum->x);	// generic convolution	{	idx_bloop2(lk, kernel->x, double, lt, *table, intg)	{		Idx<double> suin(uuin.select(0, lt->get(0)));		Idx<double> sout((sum->x).select(0, lt->get(1)));		idx_m4dotm2acc(suin, lk, sout);	}}	// add bias	{	idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double)	{		idx_addc(sumx, biasx.get(), sumx);	}}	// call squashing function	squash->fprop(sum, out);}void c_layer::bprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bprop(sum, out);	// compute gradient of bias	{	idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->dx);	Idx<double> uuin(in->dx.unfold(1, (kernel->dx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->dx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->dx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->dx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{ idx_bloop3 (lk, kernel->dx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->dx).select(0, lt.get(1)));			// backward convolution			idx_m2extm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4dotm2acc(sborp, sout, lk);	}}}void c_layer::bbprop(state_idx *in, state_idx *out){	// backprop gradient through squasher	squash->bbprop(sum, out);	// compute gradient of bias	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {			idx_sumacc(lha, lb); }}	// backprop through convolution	idx_clear(in->ddx);	Idx<double> uuin(in->ddx.unfold(1, (kernel->ddx).dim(1), stridei));	uuin = uuin.unfold(2, (kernel->ddx).dim(2), stridej);	Idx<double> uuinf(in->x.unfold(1, (kernel->ddx).dim(1), stridei));	uuinf = uuinf.unfold(2, (kernel->ddx).dim(2), stridej);	int transp[5] = { 0, 3, 4, 1, 2 };	Idx<double> borp(uuinf.transpose(transp));	{	idx_bloop3 (lk, kernel->ddx, double, lkf, kernel->x, double, lt, *table, intg) {			intg islice = lt.get(0);			Idx<double> suin(uuin.select(0, islice));			Idx<double> sborp(borp.select(0, islice));			Idx<double> sout((sum->ddx).select(0, lt.get(1)));			// backward convolution			idx_m2squextm2acc(sout, lkf, suin);			// compute gradient for kernel			idx_m4squdotm2acc(sborp, sout, lk);	}}}////////////////////////////////////////////////////////////////#ifdef USE_IPP// TODO: Copy IPP in project// TODO: ipp 64 for doubles?void c_layer_ipp::fprop (state_idx *in, state_idx *out) {  intg ki = kernel->x.dim(1);  intg kj = kernel->x.dim(2);  intg sini = in->x.dim(1);  intg sinj = in->x.dim(2);  if (((sini - (ki - stridei) % stridei) != 0) ||      ((sinj - (kj - stridej) % stridej) != 0))    ylerror("inconsistent input size, kernel size, and subsampling ratio.");  if ((stridei != 1) || (stridej != 1))    ylerror("stride > 1 not implemented yet.");  Idx<double> uuin = in->x.unfold(1, ki, stridei);  uuin = uuin.spec.unfold_inplace(2, kj, stridej);  Idx<double> lki = Idx<double>(kernel->x.dim(1), kernel->x.dim(2));  // resize output if necessary  sum->resize(thickness, uuin.dim(1), uuin.dim(2));  out->resize(thickness, uuin.dim(1), uuin.dim(2));  idx_clear(sum->x);  // generic convolution  Idx<double> tout = Idx<double>(sum->x.dim(1), sum->x.dim(2));  { idx_bloop2(lk, kernel->x, double, lt, *table, intg) {      rev_idx2_tr(*lk, lki);//      ipp_convolution_float(in->x.select(0, lt.get(0)), lki, tout);//      ipp_add_float(tout, sum->x.select(0, lt.get(1)));    }  }  // add bias  { idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,	       outx, out->x, double) {//      ipp_addc_nip_float(sumx, biasx.get(), outx);    }  }  // call squashing function  squash->fprop(sum, out);}void c_layer_ipp::bprop (state_idx *in, state_idx *out) {  // backprop gradient through squasher  squash->bprop(sum, out);  // compute gradient of bias  { idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {      idx_sumacc(lha, lb);    }  }  // backprop through convolution  idx_clear(in->dx);  /*  (let* ((ki (idx-dim :kernel:dx 1))	 (kj (idx-dim :kernel:dx 2))	 (ini (idx-dim :in:dx 1))	 (inj (idx-dim :in:dx 2))	 (outi (idx-dim :out:dx 1))	 (outj (idx-dim :out:dx 2))	 (sumi (idx-dim :sum:dx 1))	 (sumj (idx-dim :sum:dx 2))	 (souti (gbtype-matrix sumi sumj))	 (tout (gbtype-matrix ki kj)))	 (idx-bloop ((lk :kernel:dx) (lkf :kernel:x) (lt table))	 (let* ((islice (lt 0))	 (sout  (select :sum:dx 0 (lt 1)))	 )	 ;; backward convolution	 (ipp-convolution-full-float sout lkf (select :in:dx 0 islice))	 ;; compute gradient for kernel	 (rev-idx2-tr-float sout souti)	 (ipp-convolution-float (select :in:x 0 islice) souti tout)	 (ipp-add-float tout lk)	 )))	 */}#endif////////////////////////////////////////////////////////////////Idx<intg> full_table(intg a, intg b) {  Idx<intg> m(a * b, 2);  intg p = 0;  for (intg j = 0; j < b; ++j) {  	for (intg i = 0; i < a; ++i) {  		m.set(i, p, 0);  		m.set(j, p, 1);  		p++;  	}  }  return m;}////////////////////////////////////////////////////////////////////////s_layer::s_layer(parameter *p, intg ki, intg kj, intg thick, intg si, intg sj,		module_1_1<state_idx, state_idx> *sqsh) :	stridei(ki), stridej(kj), squash(sqsh){	coeff = new state_idx(p, thick);	bias = new state_idx(p, thick);	sub = new state_idx(thick, si, sj);	sum = new state_idx(thick, si, sj);}s_layer::~s_layer(){	delete coeff;	delete bias;	delete sub;	delete sum;}void s_layer::fprop(state_idx *in, state_idx *out){	intg sin_t = in->x.dim(0);	intg sin_i = in->x.dim(1);	intg sin_j = in->x.dim(2);	intg si = sin_i / stridei;	intg sj = sin_j / stridej;	if( (sin_i % stridei) != 0 ||			(sin_j % stridej) != 0)		ylerror("inconsistent input size and subsampleing ratio");	sub->resize(sin_t, si, sj);	sum->resize(sin_t, si, sj);	out->resize(sin_t, si, sj);	// 1. subsampling ( coeff * average )	idx_clear(sub->x);	{ idx_bloop4(lix, in->x, double, lsx, sub->x, double,			lcx, coeff->x, double, ltx, sum->x, double) {		Idx<double> uuin(lix->unfold(1, stridej, stridej));		uuin = uuin.unfold(0, stridei, stridei);		{ idx_eloop1(z1, uuin, double) {			{ idx_eloop1(z2, z1, double) {				idx_add(z2, lsx, lsx);			 }			}		 }		}		idx_dotc(lsx, lcx.get(), ltx);	 }	}	// 2. add bias	{ idx_bloop3(sumx, sum->x, double, biasx, bias->x, double,			outx, out->x, double) {		idx_addc(sumx, biasx.get(), sumx);	 }	}	// 3. call squashing function	squash->fprop(sum, out);}void s_layer::bprop(state_idx *in, state_idx *out){	// 1.	squash->bprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->dx, double, lb, bias->dx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->dx, double, ltdx, sum->dx, double,			lsx, sub->x, double) {		idx_dotacc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->dx, double, lsdx, sub->dx, double,			lcx, coeff->x, double, ltdx2, sum->dx, double) {		idx_dotc(ltdx2, lcx.get(), lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::bbprop(state_idx *in, state_idx *out){	// 1.	squash->bbprop(sum, out);	// 2.	{ idx_bloop2(lha, sum->ddx, double, lb, bias->ddx, double) {		idx_sumacc(lha, lb);	}}	// 3.	{ idx_bloop3(lcdx, coeff->ddx, double, ltdx, sum->ddx, double,			lsx, sub->x, double) {		idx_m2squdotm2acc(lsx, ltdx, lcdx);	}}	// 4.	{ idx_bloop4(lidx, in->ddx, double, lsdx, sub->ddx, double,			lcx, coeff->x, double, ltdx2, sum->ddx, double) {		double cf = lcx.get();		idx_dotc(ltdx2, cf * cf, lsdx);		idx_m2oversample(lsdx, stridei, stridej, lidx);	}}}void s_layer::forget(forget_param_linear &fp) {	double c = fp.value / pow(stridei * stridej, fp.exponent);	idx_clear(bias->x);	idx_fill(coeff->x, c);}////////////////////////////////////////////////////////////////////////logadd_layer::logadd_layer(intg thick, intg si, intg sj) {  expdist = Idx<double>(thick, si, sj);  sumexp = Idx<double>(thick);		// scaled partition function}void logadd_layer::fprop(state_idx *in, state_idx *out) {  intg thick = in->x.dim(0);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
国内成人精品2018免费看| 日韩精品一二三四| 久久91精品久久久久久秒播| 日本黄色一区二区| 国产色产综合色产在线视频| 美女视频一区在线观看| 欧美三级蜜桃2在线观看| 国产精品青草综合久久久久99| 五月天丁香久久| 91免费版在线看| 老司机免费视频一区二区| 色噜噜狠狠色综合欧洲selulu| 国产欧美日韩在线| 国产精选一区二区三区| 欧美一区二区三区色| 亚洲午夜久久久久久久久久久| 成人av午夜电影| 国产视频视频一区| 国产麻豆成人精品| 精品久久一区二区三区| 日本亚洲免费观看| 91精品免费在线| 日韩电影一区二区三区| 欧美日韩国产美| 亚洲福利视频一区| 欧美日韩和欧美的一区二区| 亚洲女同ⅹxx女同tv| av一区二区三区黑人| 国产精品免费丝袜| 高清成人在线观看| 国产精品久久久久毛片软件| 成人免费视频国产在线观看| 欧美激情资源网| 成人理论电影网| 国产精品区一区二区三| 99精品久久只有精品| 国产精品久久午夜夜伦鲁鲁| 大尺度一区二区| 国产精品网站在线观看| 懂色av一区二区三区免费观看| 国产精品系列在线| av电影在线观看不卡| 日韩美女精品在线| 91官网在线观看| 亚洲午夜久久久久久久久电影网| 欧美唯美清纯偷拍| 五月天中文字幕一区二区| 欧美精品日韩综合在线| 麻豆国产91在线播放| 久久婷婷综合激情| 国产91综合网| 亚洲精品日韩专区silk| 欧美综合在线视频| 日韩综合小视频| 日韩三级高清在线| 国产美女在线精品| 中文字幕中文字幕在线一区| 91久久免费观看| 午夜久久福利影院| 日韩美女视频一区二区在线观看| 极品销魂美女一区二区三区| 欧美高清在线精品一区| 91免费国产视频网站| 视频一区视频二区中文| 精品国精品自拍自在线| 高清不卡一二三区| 亚洲国产一区二区三区| 欧美一区二区精品久久911| 国产一区二区在线免费观看| 亚洲欧洲成人av每日更新| 在线观看国产91| 麻豆精品视频在线观看视频| 国产精品素人一区二区| 在线日韩一区二区| 看国产成人h片视频| 中文字幕乱码日本亚洲一区二区 | 一区二区三区 在线观看视频| 欧美在线观看一区| 另类小说综合欧美亚洲| 中日韩免费视频中文字幕| 欧美日韩在线亚洲一区蜜芽| 久久99精品久久久久久动态图| 国产精品午夜电影| 欧美美女视频在线观看| 国产激情视频一区二区三区欧美 | 日本欧美一区二区在线观看| 精品国产精品网麻豆系列| 不卡av在线网| 日本不卡视频一二三区| 国产欧美综合色| 精品视频一区 二区 三区| 狠狠色狠狠色综合系列| 一区二区三区自拍| 久久久久国产精品人| 欧美日韩在线一区二区| 国产丶欧美丶日本不卡视频| 亚洲成人自拍网| 中文字幕精品一区二区三区精品| 欧美日韩黄视频| av动漫一区二区| 精品一区二区在线播放| 一区二区三区欧美亚洲| 久久久久成人黄色影片| 在线成人免费视频| thepron国产精品| 久久精品国产精品青草| 一区二区三区小说| 国产丝袜美腿一区二区三区| 欧美高清激情brazzers| 不卡一区二区三区四区| 麻豆视频一区二区| 亚洲成人激情av| ...xxx性欧美| 久久久精品一品道一区| 欧美一区二区私人影院日本| 91丨porny丨最新| 国产在线观看免费一区| 天堂在线一区二区| 依依成人精品视频| 中文字幕精品在线不卡| 精品国产露脸精彩对白 | 91片在线免费观看| 国产麻豆成人传媒免费观看| 日本亚洲欧美天堂免费| 亚洲福利视频一区二区| 亚洲欧美一区二区三区国产精品| 26uuu亚洲| 91麻豆精品久久久久蜜臀| 色综合一个色综合亚洲| 国产91精品露脸国语对白| 黄色小说综合网站| 久久电影国产免费久久电影 | 国产婷婷色一区二区三区| 欧美变态tickling挠脚心| 欧美精品第一页| 欧美色图激情小说| 欧美性感一区二区三区| 91久久国产最好的精华液| 成人av影视在线观看| 成人在线一区二区三区| 国产高清视频一区| 国产一区二区中文字幕| 国内精品伊人久久久久av一坑| 日本在线不卡一区| 日本午夜一本久久久综合| 日韩二区在线观看| 日韩中文字幕一区二区三区| 午夜精品一区二区三区三上悠亚| 一区二区三区毛片| 亚洲成人一区在线| 爽好多水快深点欧美视频| 日日骚欧美日韩| 日韩电影网1区2区| 毛片基地黄久久久久久天堂| 欧美96一区二区免费视频| 日本不卡高清视频| 精品夜夜嗨av一区二区三区| 激情综合色播五月| 国产在线视频精品一区| 国产精品自拍网站| 国产a久久麻豆| 99精品视频在线观看| 91免费观看在线| 欧美日韩在线播放一区| 91精品国模一区二区三区| 日韩欧美激情四射| 久久久久久一二三区| 国产精品丝袜黑色高跟| 亚洲欧美怡红院| 一级女性全黄久久生活片免费| 亚洲自拍偷拍麻豆| 天堂va蜜桃一区二区三区| 美女精品一区二区| 国产综合久久久久影院| 国产91丝袜在线播放| 91在线免费播放| 欧美日韩三级视频| 日韩欧美成人激情| 国产香蕉久久精品综合网| 中文字幕一区二区三| 亚洲精品免费在线播放| 肉丝袜脚交视频一区二区| 激情五月激情综合网| 成人午夜视频福利| 91成人免费网站| 欧美一区二区福利在线| 久久久久国产精品厨房| 亚洲精品国产无套在线观| 日韩综合小视频| 高清国产午夜精品久久久久久| 色88888久久久久久影院按摩| 91麻豆精品国产91久久久更新时间| 精品久久久久久久一区二区蜜臀| 国产精品午夜在线观看| 亚洲第一电影网| 国产很黄免费观看久久| 在线观看视频一区| 久久久久97国产精华液好用吗| 伊人一区二区三区| 九色综合国产一区二区三区|