亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? dct-a.asm

?? 絕對好的源碼
?? ASM
字號:
;*****************************************************************************;* dct.asm: h264 encoder library;*****************************************************************************;* Copyright (C) 2003 x264 project;* $Id: dct.asm,v 1.1 2004/06/03 19:27:07 fenrir Exp $;*;* Authors: Min Chen <chenm001.163.com> (converted to nasm);*          Laurent Aimar <fenrir@via.ecp.fr> (initial version);*;* This program is free software; you can redistribute it and/or modify;* it under the terms of the GNU General Public License as published by;* the Free Software Foundation; either version 2 of the License, or;* (at your option) any later version.;*;* This program is distributed in the hope that it will be useful,;* but WITHOUT ANY WARRANTY; without even the implied warranty of;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the;* GNU General Public License for more details.;*;* You should have received a copy of the GNU General Public License;* along with this program; if not, write to the Free Software;* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.;*****************************************************************************;*****************************************************************************;*                                                                           *;*  Revision history:                                                        *;*                                                                           *;*  2004.04.28  portab all 4x4 function to nasm (CM)                         *;*                                                                           *;*****************************************************************************BITS 64;=============================================================================; Macros and other preprocessor constants;=============================================================================%include "amd64inc.asm"%macro MMX_ZERO 1    pxor    %1, %1%endmacro%macro MMX_LOAD_DIFF_4P 5    movd        %1, %4    punpcklbw   %1, %3    movd        %2, %5    punpcklbw   %2, %3    psubw       %1, %2%endmacro%macro MMX_LOAD_DIFF_8P 5    movq        %1, %4    punpcklbw   %1, %3    movq        %2, %5    punpcklbw   %2, %3    psubw       %1, %2%endmacro%macro MMX_SUMSUB_BA 2    paddw   %1, %2    paddw   %2, %2    psubw   %2, %1%endmacro%macro MMX_SUMSUB_BADC 4    paddw   %1, %2    paddw   %3, %4    paddw   %2, %2    paddw   %4, %4    psubw   %2, %1    psubw   %4, %3%endmacro%macro MMX_SUMSUB2_AB 3    movq    %3, %1    paddw   %1, %1    paddw   %1, %2    psubw   %3, %2    psubw   %3, %2%endmacro%macro MMX_SUMSUBD2_AB 4    movq    %4, %1    movq    %3, %2    psraw   %2, 1    psraw   %4, 1    paddw   %1, %2    psubw   %4, %3%endmacro%macro SBUTTERFLY 5    mov%1       %5, %3    punpckl%2   %3, %4    punpckh%2   %5, %4%endmacro;-----------------------------------------------------------------------------; input ABCD output ADTC;-----------------------------------------------------------------------------%macro MMX_TRANSPOSE 5    SBUTTERFLY q, wd, %1, %2, %5    SBUTTERFLY q, wd, %3, %4, %2    SBUTTERFLY q, dq, %1, %3, %4    SBUTTERFLY q, dq, %5, %2, %3%endmacro;-----------------------------------------------------------------------------; input ABCDEFGH output AFHDTECB ;-----------------------------------------------------------------------------%macro SSE2_TRANSPOSE8x8 9    SBUTTERFLY dqa, wd, %1, %2, %9    SBUTTERFLY dqa, wd, %3, %4, %2    SBUTTERFLY dqa, wd, %5, %6, %4    SBUTTERFLY dqa, wd, %7, %8, %6    SBUTTERFLY dqa, dq, %1, %3, %8    SBUTTERFLY dqa, dq, %9, %2, %3    SBUTTERFLY dqa, dq, %5, %7, %2    SBUTTERFLY dqa, dq, %4, %6, %7    SBUTTERFLY dqa, qdq, %1, %5, %6    SBUTTERFLY dqa, qdq, %9, %4, %5    SBUTTERFLY dqa, qdq, %8, %2, %4    SBUTTERFLY dqa, qdq, %3, %7, %2%endmacro%macro MMX_STORE_DIFF_4P 5    paddw       %1, %3    psraw       %1, 6    movd        %2, %5    punpcklbw   %2, %4    paddsw      %1, %2    packuswb    %1, %1    movd        %5, %1%endmacro%macro MMX_STORE_DIFF_8P 4    psraw       %1, 6    movq        %2, %4    punpcklbw   %2, %3    paddsw      %1, %2    packuswb    %1, %1      movq        %4, %1%endmacro;=============================================================================; Constants;=============================================================================SECTION .rodata align=16pw_1:  times 8 dw 1pw_32: times 8 dw 32;=============================================================================; Code;=============================================================================SECTION .textcglobal x264_dct4x4dc_mmxALIGN 16;-----------------------------------------------------------------------------;   void x264_dct4x4dc_mmx( int16_t d[4][4] );-----------------------------------------------------------------------------x264_dct4x4dc_mmx:    movq    mm0,        [parm1q+ 0]    movq    mm1,        [parm1q+ 8]    movq    mm2,        [parm1q+16]    movq    mm3,        [parm1q+24]    MMX_SUMSUB_BADC     mm1, mm0, mm3, mm2          ; mm1=s01  mm0=d01  mm3=s23  mm2=d23    MMX_SUMSUB_BADC     mm3, mm1, mm2, mm0          ; mm3=s01+s23  mm1=s01-s23  mm2=d01+d23  mm0=d01-d23    MMX_TRANSPOSE       mm3, mm1, mm0, mm2, mm4     ; in: mm3, mm1, mm0, mm2  out: mm3, mm2, mm4, mm0     MMX_SUMSUB_BADC     mm2, mm3, mm0, mm4          ; mm2=s01  mm3=d01  mm0=s23  mm4=d23    MMX_SUMSUB_BADC     mm0, mm2, mm4, mm3          ; mm0=s01+s23  mm2=s01-s23  mm4=d01+d23  mm3=d01-d23    movq    mm6,        [pw_1 GLOBAL]    paddw   mm0,        mm6    paddw   mm2,        mm6    psraw   mm0,        1    movq    [parm1q+ 0],mm0    psraw   mm2,        1    movq    [parm1q+ 8],mm2    paddw   mm3,        mm6    paddw   mm4,        mm6    psraw   mm3,        1    movq    [parm1q+16],mm3    psraw   mm4,        1    movq    [parm1q+24],mm4    retcglobal x264_idct4x4dc_mmxALIGN 16;-----------------------------------------------------------------------------;   void x264_idct4x4dc_mmx( int16_t d[4][4] );-----------------------------------------------------------------------------x264_idct4x4dc_mmx:    movq    mm0, [parm1q+ 0]    movq    mm1, [parm1q+ 8]    movq    mm2, [parm1q+16]    movq    mm3, [parm1q+24]    MMX_SUMSUB_BADC     mm1, mm0, mm3, mm2          ; mm1=s01  mm0=d01  mm3=s23  mm2=d23    MMX_SUMSUB_BADC     mm3, mm1, mm2, mm0          ; mm3=s01+s23 mm1=s01-s23 mm2=d01+d23 mm0=d01-d23    MMX_TRANSPOSE       mm3, mm1, mm0, mm2, mm4     ; in: mm3, mm1, mm0, mm2  out: mm3, mm2, mm4, mm0     MMX_SUMSUB_BADC     mm2, mm3, mm0, mm4          ; mm2=s01  mm3=d01  mm0=s23  mm4=d23    MMX_SUMSUB_BADC     mm0, mm2, mm4, mm3          ; mm0=s01+s23  mm2=s01-s23  mm4=d01+d23  mm3=d01-d23    movq    [parm1q+ 0], mm0    movq    [parm1q+ 8], mm2    movq    [parm1q+16], mm3    movq    [parm1q+24], mm4    retcglobal x264_sub4x4_dct_mmxALIGN 16;-----------------------------------------------------------------------------;   void x264_sub4x4_dct_mmx( int16_t dct[4][4], uint8_t *pix1, uint8_t *pix2 );-----------------------------------------------------------------------------x264_sub4x4_dct_mmx:    MMX_ZERO    mm7    ; Load 4 lines    MMX_LOAD_DIFF_4P    mm0, mm6, mm7, [parm2q+0*FENC_STRIDE], [parm3q+0*FDEC_STRIDE]    MMX_LOAD_DIFF_4P    mm1, mm6, mm7, [parm2q+1*FENC_STRIDE], [parm3q+1*FDEC_STRIDE]    MMX_LOAD_DIFF_4P    mm2, mm6, mm7, [parm2q+2*FENC_STRIDE], [parm3q+2*FDEC_STRIDE]    MMX_LOAD_DIFF_4P    mm3, mm6, mm7, [parm2q+3*FENC_STRIDE], [parm3q+3*FDEC_STRIDE]    MMX_SUMSUB_BADC     mm3, mm0, mm2, mm1          ; mm3=s03  mm0=d03  mm2=s12  mm1=d12    MMX_SUMSUB_BA       mm2, mm3                    ; mm2=s03+s12      mm3=s03-s12    MMX_SUMSUB2_AB      mm0, mm1, mm4               ; mm0=2.d03+d12    mm4=d03-2.d12    ; transpose in: mm2, mm0, mm3, mm4, out: mm2, mm4, mm1, mm3    MMX_TRANSPOSE       mm2, mm0, mm3, mm4, mm1    MMX_SUMSUB_BADC     mm3, mm2, mm1, mm4          ; mm3=s03  mm2=d03  mm1=s12  mm4=d12    MMX_SUMSUB_BA       mm1, mm3                    ; mm1=s03+s12      mm3=s03-s12    MMX_SUMSUB2_AB      mm2, mm4, mm0               ; mm2=2.d03+d12    mm0=d03-2.d12    movq    [parm1q+ 0], mm1    movq    [parm1q+ 8], mm2    movq    [parm1q+16], mm3    movq    [parm1q+24], mm0    retcglobal x264_add4x4_idct_mmxALIGN 16;-----------------------------------------------------------------------------;   void x264_add4x4_idct_mmx( uint8_t *p_dst, int16_t dct[4][4] );-----------------------------------------------------------------------------x264_add4x4_idct_mmx:    ; Load dct coeffs    movq    mm0, [parm2q+ 0] ; dct    movq    mm1, [parm2q+ 8]    movq    mm2, [parm2q+16]    movq    mm3, [parm2q+24]        MMX_SUMSUB_BA       mm2, mm0                        ; mm2=s02  mm0=d02    MMX_SUMSUBD2_AB     mm1, mm3, mm5, mm4              ; mm1=s13  mm4=d13 ( well 1 + 3>>1 and 1>>1 + 3)    MMX_SUMSUB_BADC     mm1, mm2, mm4, mm0              ; mm1=s02+s13  mm2=s02-s13  mm4=d02+d13  mm0=d02-d13    ; in: mm1, mm4, mm0, mm2  out: mm1, mm2, mm3, mm0    MMX_TRANSPOSE       mm1, mm4, mm0, mm2, mm3    MMX_SUMSUB_BA       mm3, mm1                        ; mm3=s02  mm1=d02    MMX_SUMSUBD2_AB     mm2, mm0, mm5, mm4              ; mm2=s13  mm4=d13 ( well 1 + 3>>1 and 1>>1 + 3)    MMX_SUMSUB_BADC     mm2, mm3, mm4, mm1              ; mm2=s02+s13  mm3=s02-s13  mm4=d02+d13  mm1=d02-d13    MMX_ZERO            mm7    movq                mm6, [pw_32 GLOBAL]        MMX_STORE_DIFF_4P   mm2, mm0, mm6, mm7, [parm1q+0*FDEC_STRIDE]    MMX_STORE_DIFF_4P   mm4, mm0, mm6, mm7, [parm1q+1*FDEC_STRIDE]    MMX_STORE_DIFF_4P   mm1, mm0, mm6, mm7, [parm1q+2*FDEC_STRIDE]    MMX_STORE_DIFF_4P   mm3, mm0, mm6, mm7, [parm1q+3*FDEC_STRIDE]    ret; =============================================================================; 8x8 Transform; =============================================================================; in:  ABCDEFGH; out: FBCGEDHI%macro DCT8_1D 10    MMX_SUMSUB_BA  %8, %1 ; %8=s07, %1=d07    MMX_SUMSUB_BA  %7, %2 ; %7=s16, %2=d16    MMX_SUMSUB_BA  %6, %3 ; %6=s25, %3=d25    MMX_SUMSUB_BA  %5, %4 ; %5=s34, %4=d34    MMX_SUMSUB_BA  %5, %8 ; %5=a0, %8=a2    MMX_SUMSUB_BA  %6, %7 ; %6=a1, %7=a3    movdqa  %9, %1    psraw   %9, 1    paddw   %9, %1    paddw   %9, %2    paddw   %9, %3 ; %9=a4    movdqa  %10, %4    psraw   %10, 1    paddw   %10, %4    paddw   %10, %2    psubw   %10, %3 ; %10=a7    MMX_SUMSUB_BA  %4, %1    psubw   %1, %3    psubw   %4, %2    psraw   %3, 1    psraw   %2, 1    psubw   %1, %3 ; %1=a5    psubw   %4, %2 ; %4=a6    MMX_SUMSUB_BA  %6, %5 ; %6=b0, %5=b4    movdqa  %2, %10    psraw   %2, 2    paddw   %2, %9 ; %2=b1    psraw   %9, 2    psubw   %9, %10 ; %9=b7    movdqa  %3, %7    psraw   %3, 1    paddw   %3, %8 ; %3=b2    psraw   %8, 1    psubw   %8, %7 ; %8=b6    movdqa  %7, %4    psraw   %7, 2    paddw   %7, %1 ; %7=b3    psraw   %1, 2    psubw   %4, %1 ; %4=b5%endmacrocglobal x264_sub8x8_dct8_sse2ALIGN 16;-----------------------------------------------------------------------------;   void __cdecl x264_sub8x8_dct8_sse2( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 );-----------------------------------------------------------------------------x264_sub8x8_dct8_sse2:    MMX_ZERO  xmm9    MMX_LOAD_DIFF_8P  xmm0, xmm8, xmm9, [parm2q+0*FENC_STRIDE], [parm3q+0*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm1, xmm8, xmm9, [parm2q+1*FENC_STRIDE], [parm3q+1*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm2, xmm8, xmm9, [parm2q+2*FENC_STRIDE], [parm3q+2*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm3, xmm8, xmm9, [parm2q+3*FENC_STRIDE], [parm3q+3*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm4, xmm8, xmm9, [parm2q+4*FENC_STRIDE], [parm3q+4*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm5, xmm8, xmm9, [parm2q+5*FENC_STRIDE], [parm3q+5*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm6, xmm8, xmm9, [parm2q+6*FENC_STRIDE], [parm3q+6*FDEC_STRIDE]    MMX_LOAD_DIFF_8P  xmm7, xmm8, xmm9, [parm2q+7*FENC_STRIDE], [parm3q+7*FDEC_STRIDE]    DCT8_1D           xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9    SSE2_TRANSPOSE8x8 xmm5, xmm1, xmm2, xmm6, xmm4, xmm3, xmm7, xmm8, xmm0    DCT8_1D           xmm5, xmm3, xmm8, xmm6, xmm0, xmm4, xmm2, xmm1, xmm7, xmm9    movdqa  [parm1q+0x00], xmm4    movdqa  [parm1q+0x10], xmm3    movdqa  [parm1q+0x20], xmm8    movdqa  [parm1q+0x30], xmm2    movdqa  [parm1q+0x40], xmm0    movdqa  [parm1q+0x50], xmm6    movdqa  [parm1q+0x60], xmm1    movdqa  [parm1q+0x70], xmm7    ret; in:  ABCDEFGH; out: IBHDEACG%macro IDCT8_1D 10    MMX_SUMSUB_BA  %5, %1 ; %5=a0, %1=a2    movdqa  %10, %3    psraw   %3, 1    psubw   %3, %7 ; %3=a4    psraw   %7, 1    paddw   %7, %10 ; %7=a6    movdqa  %9, %2    psraw   %9, 1    paddw   %9, %2    paddw   %9, %4    paddw   %9, %6 ; %9=a7        movdqa  %10, %6    psraw   %10, 1    paddw   %10, %6    paddw   %10, %8    psubw   %10, %2 ; %10=a5    psubw   %2, %4    psubw   %6, %4    paddw   %2, %8    psubw   %6, %8    psraw   %4, 1    psraw   %8, 1    psubw   %2, %4 ; %2=a3    psubw   %6, %8 ; %6=a1    MMX_SUMSUB_BA %7, %5 ; %7=b0, %5=b6    MMX_SUMSUB_BA %3, %1 ; %3=b2, %1=b4    movdqa  %4, %9    psraw   %4, 2    paddw   %4, %6 ; %4=b1    psraw   %6, 2    psubw   %9, %6 ; %9=b7    movdqa  %8, %10    psraw   %8, 2    paddw   %8, %2 ; %8=b3    psraw   %2, 2    psubw   %2, %10 ; %2=b5    MMX_SUMSUB_BA %9, %7 ; %9=c0, %7=c7    MMX_SUMSUB_BA %2, %3 ; %2=c1, %3=c6    MMX_SUMSUB_BA %8, %1 ; %8=c2, %1=c5    MMX_SUMSUB_BA %4, %5 ; %4=c3, %5=c4%endmacrocglobal x264_add8x8_idct8_sse2ALIGN 16;-----------------------------------------------------------------------------;   void __cdecl x264_add8x8_idct8_sse2( uint8_t *p_dst, int16_t dct[8][8] );-----------------------------------------------------------------------------x264_add8x8_idct8_sse2:    movdqa  xmm0, [parm2q+0x00]    movdqa  xmm1, [parm2q+0x10]    movdqa  xmm2, [parm2q+0x20]    movdqa  xmm3, [parm2q+0x30]    movdqa  xmm4, [parm2q+0x40]    movdqa  xmm5, [parm2q+0x50]    movdqa  xmm6, [parm2q+0x60]    movdqa  xmm7, [parm2q+0x70]    IDCT8_1D          xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm9, xmm8    SSE2_TRANSPOSE8x8 xmm9, xmm1, xmm7, xmm3, xmm4, xmm0, xmm2, xmm6, xmm5    paddw             xmm9, [pw_32 GLOBAL] ; rounding for the >>6 at the end    IDCT8_1D          xmm9, xmm0, xmm6, xmm3, xmm5, xmm4, xmm7, xmm1, xmm8, xmm2     MMX_ZERO  xmm15    MMX_STORE_DIFF_8P   xmm8, xmm14, xmm15, [parm1q+0*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm0, xmm14, xmm15, [parm1q+1*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm1, xmm14, xmm15, [parm1q+2*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm3, xmm14, xmm15, [parm1q+3*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm5, xmm14, xmm15, [parm1q+4*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm9, xmm14, xmm15, [parm1q+5*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm6, xmm14, xmm15, [parm1q+6*FDEC_STRIDE]    MMX_STORE_DIFF_8P   xmm7, xmm14, xmm15, [parm1q+7*FDEC_STRIDE]    ret

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
美女被吸乳得到大胸91| 337p亚洲精品色噜噜狠狠| 在线观看亚洲专区| 欧美成人精品高清在线播放| 亚洲色图欧美激情| 国产精品正在播放| 日韩午夜av一区| 亚洲一区二区三区在线看| 成人在线综合网站| 精品国产乱码久久久久久牛牛 | 亚洲精品高清在线观看| 青娱乐精品视频| 欧美在线免费观看视频| 中文字幕一区二区日韩精品绯色| 麻豆精品一区二区三区| 欧美在线免费观看亚洲| 亚洲色图在线看| 不卡av电影在线播放| 精品国产乱码久久久久久久| 日本不卡在线视频| 欧美精品一卡两卡| 一区二区欧美在线观看| 一本到不卡免费一区二区| 中文字幕乱码久久午夜不卡| 国产资源精品在线观看| 精品国产99国产精品| 美腿丝袜亚洲综合| 欧美成人猛片aaaaaaa| 免费成人在线影院| 欧美大黄免费观看| 国产传媒日韩欧美成人| 精品国产人成亚洲区| 琪琪久久久久日韩精品| 欧美精品电影在线播放| 日本vs亚洲vs韩国一区三区二区 | 亚洲一区二区精品3399| 色94色欧美sute亚洲13| 一区二区三区四区五区视频在线观看| 91天堂素人约啪| 一区二区在线观看不卡| 欧美午夜视频网站| 日本在线不卡视频一二三区| 日韩欧美亚洲国产另类| 国产精品一区在线| 国产精品高清亚洲| 欧美色图激情小说| 美女精品自拍一二三四| 久久免费国产精品| 色综合久久久久久久久久久| 亚洲成av人片一区二区三区| 欧美一区二区在线看| 国产一区欧美二区| 中文字幕一区在线观看视频| 欧美色电影在线| 国内偷窥港台综合视频在线播放| 中日韩av电影| 欧美日韩国产一级二级| 激情综合色丁香一区二区| 国产精品久久毛片a| 欧美日韩在线观看一区二区| 美女性感视频久久| 国产精品免费av| 欧美老年两性高潮| 七七婷婷婷婷精品国产| 国产精品每日更新在线播放网址| 欧美综合久久久| 精品亚洲porn| 亚洲激情图片小说视频| 精品国产髙清在线看国产毛片| 成人不卡免费av| 日韩vs国产vs欧美| 亚洲欧洲日韩在线| 2014亚洲片线观看视频免费| 色综合天天综合狠狠| 久久精品国产网站| 亚洲自拍偷拍九九九| 久久久久久久综合日本| 欧美日韩三级一区| 成人av电影在线播放| 免费在线观看一区| 亚洲主播在线播放| 国产精品免费视频一区| 日韩午夜精品电影| 日本韩国欧美在线| 国产91在线观看| 久久成人免费日本黄色| 亚洲一区二区三区不卡国产欧美 | 国产91高潮流白浆在线麻豆| 香蕉加勒比综合久久 | 中文字幕一区二区不卡 | 国产精品青草久久| 91精品婷婷国产综合久久性色| 99国产麻豆精品| 国产精品系列在线播放| 轻轻草成人在线| 同产精品九九九| 一级日本不卡的影视| 国产精品欧美一区二区三区| 精品久久人人做人人爰| 欧美肥胖老妇做爰| 欧美日韩精品免费| 欧美日韩一区小说| 欧美羞羞免费网站| 日本高清无吗v一区| 色综合久久九月婷婷色综合| www.av精品| 成人a区在线观看| 成人免费视频一区| 国产成人免费视频精品含羞草妖精 | 日韩福利视频导航| 亚洲高清免费在线| 亚洲自拍偷拍欧美| 一区二区三区欧美| 一区二区三区在线高清| 亚洲一区二区综合| 亚洲成人中文在线| 日韩电影免费一区| 另类小说综合欧美亚洲| 韩国女主播成人在线| 国产一区亚洲一区| 国产精品亚洲成人| 成人av网站免费| 91免费版pro下载短视频| 欧美一卡二卡在线观看| 7777女厕盗摄久久久| 欧美一级一区二区| 久久丝袜美腿综合| 亚洲国产精品国自产拍av| 亚洲欧洲成人精品av97| 亚洲免费av高清| 亚洲成人自拍一区| 精品一二线国产| 成人av免费在线| 欧美日韩一级黄| 精品播放一区二区| 综合久久久久久久| 亚洲3atv精品一区二区三区| 青青草97国产精品免费观看无弹窗版| 韩国v欧美v日本v亚洲v| 成人毛片视频在线观看| 在线观看免费一区| 日韩欧美一区二区视频| 国产网站一区二区三区| 亚洲欧美一区二区三区极速播放 | 欧美一级免费观看| 欧美激情自拍偷拍| 亚洲一区二区三区中文字幕在线| 久久国产夜色精品鲁鲁99| 成人在线视频一区| 欧美日韩综合一区| 久久久综合精品| 亚洲午夜日本在线观看| 国精产品一区一区三区mba视频 | 国产麻豆精品95视频| 91老师国产黑色丝袜在线| 日韩一区二区免费在线观看| 欧美激情中文不卡| 国产在线精品国自产拍免费| av动漫一区二区| 日韩视频一区二区三区在线播放| 国产精品美女久久久久久久久| 亚洲国产一区二区三区| 国产成人免费视频精品含羞草妖精| 欧美丝袜丝交足nylons图片| 国产人妖乱国产精品人妖| 日韩和欧美的一区| 91免费版在线看| 久久精品视频在线看| 手机精品视频在线观看| av激情成人网| 久久久久国产一区二区三区四区| 一区二区三区四区视频精品免费| 国产一区二区看久久| 欧美日韩高清一区| 亚洲视频免费在线观看| 国产精品1024久久| 欧美大尺度电影在线| 亚洲国产欧美在线| av激情亚洲男人天堂| 国产欧美在线观看一区| 久久国产剧场电影| 777午夜精品免费视频| 亚洲综合成人在线视频| 91丨porny丨最新| 国产精品久久综合| 国产成人精品免费在线| xf在线a精品一区二区视频网站| 午夜精品久久一牛影视| 欧美日免费三级在线| 亚洲综合久久av| 在线观看成人小视频| 亚洲精品福利视频网站| 日本韩国一区二区| 一区二区三区免费在线观看| 在线中文字幕一区二区| 亚洲精品一二三| 欧美午夜不卡视频| 五月天网站亚洲| 欧美日韩在线观看一区二区| 天天综合天天综合色|